diff options
Diffstat (limited to 'arch/arm')
34 files changed, 248 insertions, 1133 deletions
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index f15f82bf3a50..e968a52e4881 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug | |||
@@ -356,15 +356,15 @@ choice | |||
356 | is nothing connected to read from the DCC. | 356 | is nothing connected to read from the DCC. |
357 | 357 | ||
358 | config DEBUG_SEMIHOSTING | 358 | config DEBUG_SEMIHOSTING |
359 | bool "Kernel low-level debug output via semihosting I" | 359 | bool "Kernel low-level debug output via semihosting I/O" |
360 | help | 360 | help |
361 | Semihosting enables code running on an ARM target to use | 361 | Semihosting enables code running on an ARM target to use |
362 | the I/O facilities on a host debugger/emulator through a | 362 | the I/O facilities on a host debugger/emulator through a |
363 | simple SVC calls. The host debugger or emulator must have | 363 | simple SVC call. The host debugger or emulator must have |
364 | semihosting enabled for the special svc call to be trapped | 364 | semihosting enabled for the special svc call to be trapped |
365 | otherwise the kernel will crash. | 365 | otherwise the kernel will crash. |
366 | 366 | ||
367 | This is known to work with OpenOCD, as wellas | 367 | This is known to work with OpenOCD, as well as |
368 | ARM's Fast Models, or any other controlling environment | 368 | ARM's Fast Models, or any other controlling environment |
369 | that implements semihosting. | 369 | that implements semihosting. |
370 | 370 | ||
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 210c923025b1..74381a31ee42 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -283,10 +283,10 @@ zImage Image xipImage bootpImage uImage: vmlinux | |||
283 | zinstall uinstall install: vmlinux | 283 | zinstall uinstall install: vmlinux |
284 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ | 284 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ |
285 | 285 | ||
286 | %.dtb: | 286 | %.dtb: scripts |
287 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ | 287 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ |
288 | 288 | ||
289 | dtbs: | 289 | dtbs: scripts |
290 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ | 290 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ |
291 | 291 | ||
292 | # We use MRPROPER_FILES and CLEAN_FILES now | 292 | # We use MRPROPER_FILES and CLEAN_FILES now |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index b8c64b80bafc..81769c1341fa 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -659,10 +659,14 @@ __armv7_mmu_cache_on: | |||
659 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 659 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
660 | orr r0, r0, #1 << 25 @ big-endian page tables | 660 | orr r0, r0, #1 << 25 @ big-endian page tables |
661 | #endif | 661 | #endif |
662 | mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg | ||
662 | orrne r0, r0, #1 @ MMU enabled | 663 | orrne r0, r0, #1 @ MMU enabled |
663 | movne r1, #0xfffffffd @ domain 0 = client | 664 | movne r1, #0xfffffffd @ domain 0 = client |
665 | bic r6, r6, #1 << 31 @ 32-bit translation system | ||
666 | bic r6, r6, #3 << 0 @ use only ttbr0 | ||
664 | mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer | 667 | mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer |
665 | mcrne p15, 0, r1, c3, c0, 0 @ load domain access control | 668 | mcrne p15, 0, r1, c3, c0, 0 @ load domain access control |
669 | mcrne p15, 0, r6, c2, c0, 2 @ load ttb control | ||
666 | #endif | 670 | #endif |
667 | mcr p15, 0, r0, c7, c5, 4 @ ISB | 671 | mcr p15, 0, r0, c7, c5, 4 @ ISB |
668 | mcr p15, 0, r0, c1, c0, 0 @ load control register | 672 | mcr p15, 0, r0, c1, c0, 0 @ load control register |
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index db2245353f0f..0d6bb738c6de 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig | |||
@@ -145,6 +145,8 @@ CONFIG_MMC_SDHCI_TEGRA=y | |||
145 | CONFIG_RTC_CLASS=y | 145 | CONFIG_RTC_CLASS=y |
146 | CONFIG_RTC_DRV_EM3027=y | 146 | CONFIG_RTC_DRV_EM3027=y |
147 | CONFIG_RTC_DRV_TEGRA=y | 147 | CONFIG_RTC_DRV_TEGRA=y |
148 | CONFIG_DMADEVICES=y | ||
149 | CONFIG_TEGRA20_APB_DMA=y | ||
148 | CONFIG_STAGING=y | 150 | CONFIG_STAGING=y |
149 | CONFIG_SENSORS_ISL29018=y | 151 | CONFIG_SENSORS_ISL29018=y |
150 | CONFIG_SENSORS_ISL29028=y | 152 | CONFIG_SENSORS_ISL29028=y |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 03fb93621d0d..5c8b3bf4d825 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -320,4 +320,12 @@ | |||
320 | .size \name , . - \name | 320 | .size \name , . - \name |
321 | .endm | 321 | .endm |
322 | 322 | ||
323 | .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req | ||
324 | #ifndef CONFIG_CPU_USE_DOMAINS | ||
325 | adds \tmp, \addr, #\size - 1 | ||
326 | sbcccs \tmp, \tmp, \limit | ||
327 | bcs \bad | ||
328 | #endif | ||
329 | .endm | ||
330 | |||
323 | #endif /* __ASM_ASSEMBLER_H__ */ | 331 | #endif /* __ASM_ASSEMBLER_H__ */ |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index e965f1b560f1..5f6ddcc56452 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x) | |||
187 | #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) | 187 | #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) |
188 | #endif | 188 | #endif |
189 | #endif | 189 | #endif |
190 | #endif /* __ASSEMBLY__ */ | ||
190 | 191 | ||
191 | #ifndef PHYS_OFFSET | 192 | #ifndef PHYS_OFFSET |
192 | #ifdef PLAT_PHYS_OFFSET | 193 | #ifdef PLAT_PHYS_OFFSET |
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x) | |||
196 | #endif | 197 | #endif |
197 | #endif | 198 | #endif |
198 | 199 | ||
200 | #ifndef __ASSEMBLY__ | ||
201 | |||
199 | /* | 202 | /* |
200 | * PFNs are used to describe any physical page; this means | 203 | * PFNs are used to describe any physical page; this means |
201 | * PFN 0 == physical address 0. | 204 | * PFN 0 == physical address 0. |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 314d4664eae7..99a19512ee26 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
199 | { | 199 | { |
200 | pgtable_page_dtor(pte); | 200 | pgtable_page_dtor(pte); |
201 | 201 | ||
202 | #ifdef CONFIG_ARM_LPAE | ||
203 | tlb_add_flush(tlb, addr); | ||
204 | #else | ||
202 | /* | 205 | /* |
203 | * With the classic ARM MMU, a pte page has two corresponding pmd | 206 | * With the classic ARM MMU, a pte page has two corresponding pmd |
204 | * entries, each covering 1MB. | 207 | * entries, each covering 1MB. |
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
206 | addr &= PMD_MASK; | 209 | addr &= PMD_MASK; |
207 | tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); | 210 | tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); |
208 | tlb_add_flush(tlb, addr + SZ_1M); | 211 | tlb_add_flush(tlb, addr + SZ_1M); |
212 | #endif | ||
209 | 213 | ||
210 | tlb_remove_page(tlb, pte); | 214 | tlb_remove_page(tlb, pte); |
211 | } | 215 | } |
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 479a6352e0b5..77bd79f2ffdb 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
@@ -101,28 +101,39 @@ extern int __get_user_1(void *); | |||
101 | extern int __get_user_2(void *); | 101 | extern int __get_user_2(void *); |
102 | extern int __get_user_4(void *); | 102 | extern int __get_user_4(void *); |
103 | 103 | ||
104 | #define __get_user_x(__r2,__p,__e,__s,__i...) \ | 104 | #define __GUP_CLOBBER_1 "lr", "cc" |
105 | #ifdef CONFIG_CPU_USE_DOMAINS | ||
106 | #define __GUP_CLOBBER_2 "ip", "lr", "cc" | ||
107 | #else | ||
108 | #define __GUP_CLOBBER_2 "lr", "cc" | ||
109 | #endif | ||
110 | #define __GUP_CLOBBER_4 "lr", "cc" | ||
111 | |||
112 | #define __get_user_x(__r2,__p,__e,__l,__s) \ | ||
105 | __asm__ __volatile__ ( \ | 113 | __asm__ __volatile__ ( \ |
106 | __asmeq("%0", "r0") __asmeq("%1", "r2") \ | 114 | __asmeq("%0", "r0") __asmeq("%1", "r2") \ |
115 | __asmeq("%3", "r1") \ | ||
107 | "bl __get_user_" #__s \ | 116 | "bl __get_user_" #__s \ |
108 | : "=&r" (__e), "=r" (__r2) \ | 117 | : "=&r" (__e), "=r" (__r2) \ |
109 | : "0" (__p) \ | 118 | : "0" (__p), "r" (__l) \ |
110 | : __i, "cc") | 119 | : __GUP_CLOBBER_##__s) |
111 | 120 | ||
112 | #define get_user(x,p) \ | 121 | #define __get_user_check(x,p) \ |
113 | ({ \ | 122 | ({ \ |
123 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ | ||
114 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ | 124 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ |
115 | register unsigned long __r2 asm("r2"); \ | 125 | register unsigned long __r2 asm("r2"); \ |
126 | register unsigned long __l asm("r1") = __limit; \ | ||
116 | register int __e asm("r0"); \ | 127 | register int __e asm("r0"); \ |
117 | switch (sizeof(*(__p))) { \ | 128 | switch (sizeof(*(__p))) { \ |
118 | case 1: \ | 129 | case 1: \ |
119 | __get_user_x(__r2, __p, __e, 1, "lr"); \ | 130 | __get_user_x(__r2, __p, __e, __l, 1); \ |
120 | break; \ | 131 | break; \ |
121 | case 2: \ | 132 | case 2: \ |
122 | __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ | 133 | __get_user_x(__r2, __p, __e, __l, 2); \ |
123 | break; \ | 134 | break; \ |
124 | case 4: \ | 135 | case 4: \ |
125 | __get_user_x(__r2, __p, __e, 4, "lr"); \ | 136 | __get_user_x(__r2, __p, __e, __l, 4); \ |
126 | break; \ | 137 | break; \ |
127 | default: __e = __get_user_bad(); break; \ | 138 | default: __e = __get_user_bad(); break; \ |
128 | } \ | 139 | } \ |
@@ -130,42 +141,57 @@ extern int __get_user_4(void *); | |||
130 | __e; \ | 141 | __e; \ |
131 | }) | 142 | }) |
132 | 143 | ||
144 | #define get_user(x,p) \ | ||
145 | ({ \ | ||
146 | might_fault(); \ | ||
147 | __get_user_check(x,p); \ | ||
148 | }) | ||
149 | |||
133 | extern int __put_user_1(void *, unsigned int); | 150 | extern int __put_user_1(void *, unsigned int); |
134 | extern int __put_user_2(void *, unsigned int); | 151 | extern int __put_user_2(void *, unsigned int); |
135 | extern int __put_user_4(void *, unsigned int); | 152 | extern int __put_user_4(void *, unsigned int); |
136 | extern int __put_user_8(void *, unsigned long long); | 153 | extern int __put_user_8(void *, unsigned long long); |
137 | 154 | ||
138 | #define __put_user_x(__r2,__p,__e,__s) \ | 155 | #define __put_user_x(__r2,__p,__e,__l,__s) \ |
139 | __asm__ __volatile__ ( \ | 156 | __asm__ __volatile__ ( \ |
140 | __asmeq("%0", "r0") __asmeq("%2", "r2") \ | 157 | __asmeq("%0", "r0") __asmeq("%2", "r2") \ |
158 | __asmeq("%3", "r1") \ | ||
141 | "bl __put_user_" #__s \ | 159 | "bl __put_user_" #__s \ |
142 | : "=&r" (__e) \ | 160 | : "=&r" (__e) \ |
143 | : "0" (__p), "r" (__r2) \ | 161 | : "0" (__p), "r" (__r2), "r" (__l) \ |
144 | : "ip", "lr", "cc") | 162 | : "ip", "lr", "cc") |
145 | 163 | ||
146 | #define put_user(x,p) \ | 164 | #define __put_user_check(x,p) \ |
147 | ({ \ | 165 | ({ \ |
166 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ | ||
148 | register const typeof(*(p)) __r2 asm("r2") = (x); \ | 167 | register const typeof(*(p)) __r2 asm("r2") = (x); \ |
149 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ | 168 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ |
169 | register unsigned long __l asm("r1") = __limit; \ | ||
150 | register int __e asm("r0"); \ | 170 | register int __e asm("r0"); \ |
151 | switch (sizeof(*(__p))) { \ | 171 | switch (sizeof(*(__p))) { \ |
152 | case 1: \ | 172 | case 1: \ |
153 | __put_user_x(__r2, __p, __e, 1); \ | 173 | __put_user_x(__r2, __p, __e, __l, 1); \ |
154 | break; \ | 174 | break; \ |
155 | case 2: \ | 175 | case 2: \ |
156 | __put_user_x(__r2, __p, __e, 2); \ | 176 | __put_user_x(__r2, __p, __e, __l, 2); \ |
157 | break; \ | 177 | break; \ |
158 | case 4: \ | 178 | case 4: \ |
159 | __put_user_x(__r2, __p, __e, 4); \ | 179 | __put_user_x(__r2, __p, __e, __l, 4); \ |
160 | break; \ | 180 | break; \ |
161 | case 8: \ | 181 | case 8: \ |
162 | __put_user_x(__r2, __p, __e, 8); \ | 182 | __put_user_x(__r2, __p, __e, __l, 8); \ |
163 | break; \ | 183 | break; \ |
164 | default: __e = __put_user_bad(); break; \ | 184 | default: __e = __put_user_bad(); break; \ |
165 | } \ | 185 | } \ |
166 | __e; \ | 186 | __e; \ |
167 | }) | 187 | }) |
168 | 188 | ||
189 | #define put_user(x,p) \ | ||
190 | ({ \ | ||
191 | might_fault(); \ | ||
192 | __put_user_check(x,p); \ | ||
193 | }) | ||
194 | |||
169 | #else /* CONFIG_MMU */ | 195 | #else /* CONFIG_MMU */ |
170 | 196 | ||
171 | /* | 197 | /* |
@@ -219,6 +245,7 @@ do { \ | |||
219 | unsigned long __gu_addr = (unsigned long)(ptr); \ | 245 | unsigned long __gu_addr = (unsigned long)(ptr); \ |
220 | unsigned long __gu_val; \ | 246 | unsigned long __gu_val; \ |
221 | __chk_user_ptr(ptr); \ | 247 | __chk_user_ptr(ptr); \ |
248 | might_fault(); \ | ||
222 | switch (sizeof(*(ptr))) { \ | 249 | switch (sizeof(*(ptr))) { \ |
223 | case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ | 250 | case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ |
224 | case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ | 251 | case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ |
@@ -300,6 +327,7 @@ do { \ | |||
300 | unsigned long __pu_addr = (unsigned long)(ptr); \ | 327 | unsigned long __pu_addr = (unsigned long)(ptr); \ |
301 | __typeof__(*(ptr)) __pu_val = (x); \ | 328 | __typeof__(*(ptr)) __pu_val = (x); \ |
302 | __chk_user_ptr(ptr); \ | 329 | __chk_user_ptr(ptr); \ |
330 | might_fault(); \ | ||
303 | switch (sizeof(*(ptr))) { \ | 331 | switch (sizeof(*(ptr))) { \ |
304 | case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ | 332 | case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ |
305 | case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ | 333 | case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index ba386bd94107..281bf3301241 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -159,6 +159,12 @@ static int debug_arch_supported(void) | |||
159 | arch >= ARM_DEBUG_ARCH_V7_1; | 159 | arch >= ARM_DEBUG_ARCH_V7_1; |
160 | } | 160 | } |
161 | 161 | ||
162 | /* Can we determine the watchpoint access type from the fsr? */ | ||
163 | static int debug_exception_updates_fsr(void) | ||
164 | { | ||
165 | return 0; | ||
166 | } | ||
167 | |||
162 | /* Determine number of WRP registers available. */ | 168 | /* Determine number of WRP registers available. */ |
163 | static int get_num_wrp_resources(void) | 169 | static int get_num_wrp_resources(void) |
164 | { | 170 | { |
@@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
604 | /* Aligned */ | 610 | /* Aligned */ |
605 | break; | 611 | break; |
606 | case 1: | 612 | case 1: |
607 | /* Allow single byte watchpoint. */ | ||
608 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) | ||
609 | break; | ||
610 | case 2: | 613 | case 2: |
611 | /* Allow halfword watchpoints and breakpoints. */ | 614 | /* Allow halfword watchpoints and breakpoints. */ |
612 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) | 615 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) |
613 | break; | 616 | break; |
617 | case 3: | ||
618 | /* Allow single byte watchpoint. */ | ||
619 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) | ||
620 | break; | ||
614 | default: | 621 | default: |
615 | ret = -EINVAL; | 622 | ret = -EINVAL; |
616 | goto out; | 623 | goto out; |
@@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
619 | info->address &= ~alignment_mask; | 626 | info->address &= ~alignment_mask; |
620 | info->ctrl.len <<= offset; | 627 | info->ctrl.len <<= offset; |
621 | 628 | ||
622 | /* | 629 | if (!bp->overflow_handler) { |
623 | * Currently we rely on an overflow handler to take | 630 | /* |
624 | * care of single-stepping the breakpoint when it fires. | 631 | * Mismatch breakpoints are required for single-stepping |
625 | * In the case of userspace breakpoints on a core with V7 debug, | 632 | * breakpoints. |
626 | * we can use the mismatch feature as a poor-man's hardware | 633 | */ |
627 | * single-step, but this only works for per-task breakpoints. | 634 | if (!core_has_mismatch_brps()) |
628 | */ | 635 | return -EINVAL; |
629 | if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || | 636 | |
630 | !core_has_mismatch_brps() || !bp->hw.bp_target)) { | 637 | /* We don't allow mismatch breakpoints in kernel space. */ |
631 | pr_warning("overflow handler required but none found\n"); | 638 | if (arch_check_bp_in_kernelspace(bp)) |
632 | ret = -EINVAL; | 639 | return -EPERM; |
640 | |||
641 | /* | ||
642 | * Per-cpu breakpoints are not supported by our stepping | ||
643 | * mechanism. | ||
644 | */ | ||
645 | if (!bp->hw.bp_target) | ||
646 | return -EINVAL; | ||
647 | |||
648 | /* | ||
649 | * We only support specific access types if the fsr | ||
650 | * reports them. | ||
651 | */ | ||
652 | if (!debug_exception_updates_fsr() && | ||
653 | (info->ctrl.type == ARM_BREAKPOINT_LOAD || | ||
654 | info->ctrl.type == ARM_BREAKPOINT_STORE)) | ||
655 | return -EINVAL; | ||
633 | } | 656 | } |
657 | |||
634 | out: | 658 | out: |
635 | return ret; | 659 | return ret; |
636 | } | 660 | } |
@@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, | |||
706 | goto unlock; | 730 | goto unlock; |
707 | 731 | ||
708 | /* Check that the access type matches. */ | 732 | /* Check that the access type matches. */ |
709 | access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : | 733 | if (debug_exception_updates_fsr()) { |
710 | HW_BREAKPOINT_R; | 734 | access = (fsr & ARM_FSR_ACCESS_MASK) ? |
711 | if (!(access & hw_breakpoint_type(wp))) | 735 | HW_BREAKPOINT_W : HW_BREAKPOINT_R; |
712 | goto unlock; | 736 | if (!(access & hw_breakpoint_type(wp))) |
737 | goto unlock; | ||
738 | } | ||
713 | 739 | ||
714 | /* We have a winner. */ | 740 | /* We have a winner. */ |
715 | info->trigger = addr; | 741 | info->trigger = addr; |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index f7945218b8c6..b0179b89a04c 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | |||
420 | #endif | 420 | #endif |
421 | instr = *(u32 *) pc; | 421 | instr = *(u32 *) pc; |
422 | } else if (thumb_mode(regs)) { | 422 | } else if (thumb_mode(regs)) { |
423 | get_user(instr, (u16 __user *)pc); | 423 | if (get_user(instr, (u16 __user *)pc)) |
424 | goto die_sig; | ||
424 | if (is_wide_instruction(instr)) { | 425 | if (is_wide_instruction(instr)) { |
425 | unsigned int instr2; | 426 | unsigned int instr2; |
426 | get_user(instr2, (u16 __user *)pc+1); | 427 | if (get_user(instr2, (u16 __user *)pc+1)) |
428 | goto die_sig; | ||
427 | instr <<= 16; | 429 | instr <<= 16; |
428 | instr |= instr2; | 430 | instr |= instr2; |
429 | } | 431 | } |
430 | } else { | 432 | } else if (get_user(instr, (u32 __user *)pc)) { |
431 | get_user(instr, (u32 __user *)pc); | 433 | goto die_sig; |
432 | } | 434 | } |
433 | 435 | ||
434 | if (call_undef_hook(regs, instr) == 0) | 436 | if (call_undef_hook(regs, instr) == 0) |
435 | return; | 437 | return; |
436 | 438 | ||
439 | die_sig: | ||
437 | #ifdef CONFIG_DEBUG_USER | 440 | #ifdef CONFIG_DEBUG_USER |
438 | if (user_debug & UDBG_UNDEFINED) { | 441 | if (user_debug & UDBG_UNDEFINED) { |
439 | printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", | 442 | printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", |
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index d6dacc69254e..395d5fbb8fa2 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c | |||
@@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq) | |||
59 | { | 59 | { |
60 | pr_info("Switching to timer-based delay loop\n"); | 60 | pr_info("Switching to timer-based delay loop\n"); |
61 | lpj_fine = freq / HZ; | 61 | lpj_fine = freq / HZ; |
62 | loops_per_jiffy = lpj_fine; | ||
62 | arm_delay_ops.delay = __timer_delay; | 63 | arm_delay_ops.delay = __timer_delay; |
63 | arm_delay_ops.const_udelay = __timer_const_udelay; | 64 | arm_delay_ops.const_udelay = __timer_const_udelay; |
64 | arm_delay_ops.udelay = __timer_udelay; | 65 | arm_delay_ops.udelay = __timer_udelay; |
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 11093a7c3e32..9b06bb41fca6 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S | |||
@@ -16,8 +16,9 @@ | |||
16 | * __get_user_X | 16 | * __get_user_X |
17 | * | 17 | * |
18 | * Inputs: r0 contains the address | 18 | * Inputs: r0 contains the address |
19 | * r1 contains the address limit, which must be preserved | ||
19 | * Outputs: r0 is the error code | 20 | * Outputs: r0 is the error code |
20 | * r2, r3 contains the zero-extended value | 21 | * r2 contains the zero-extended value |
21 | * lr corrupted | 22 | * lr corrupted |
22 | * | 23 | * |
23 | * No other registers must be altered. (see <asm/uaccess.h> | 24 | * No other registers must be altered. (see <asm/uaccess.h> |
@@ -27,33 +28,39 @@ | |||
27 | * Note also that it is intended that __get_user_bad is not global. | 28 | * Note also that it is intended that __get_user_bad is not global. |
28 | */ | 29 | */ |
29 | #include <linux/linkage.h> | 30 | #include <linux/linkage.h> |
31 | #include <asm/assembler.h> | ||
30 | #include <asm/errno.h> | 32 | #include <asm/errno.h> |
31 | #include <asm/domain.h> | 33 | #include <asm/domain.h> |
32 | 34 | ||
33 | ENTRY(__get_user_1) | 35 | ENTRY(__get_user_1) |
36 | check_uaccess r0, 1, r1, r2, __get_user_bad | ||
34 | 1: TUSER(ldrb) r2, [r0] | 37 | 1: TUSER(ldrb) r2, [r0] |
35 | mov r0, #0 | 38 | mov r0, #0 |
36 | mov pc, lr | 39 | mov pc, lr |
37 | ENDPROC(__get_user_1) | 40 | ENDPROC(__get_user_1) |
38 | 41 | ||
39 | ENTRY(__get_user_2) | 42 | ENTRY(__get_user_2) |
40 | #ifdef CONFIG_THUMB2_KERNEL | 43 | check_uaccess r0, 2, r1, r2, __get_user_bad |
41 | 2: TUSER(ldrb) r2, [r0] | 44 | #ifdef CONFIG_CPU_USE_DOMAINS |
42 | 3: TUSER(ldrb) r3, [r0, #1] | 45 | rb .req ip |
46 | 2: ldrbt r2, [r0], #1 | ||
47 | 3: ldrbt rb, [r0], #0 | ||
43 | #else | 48 | #else |
44 | 2: TUSER(ldrb) r2, [r0], #1 | 49 | rb .req r0 |
45 | 3: TUSER(ldrb) r3, [r0] | 50 | 2: ldrb r2, [r0] |
51 | 3: ldrb rb, [r0, #1] | ||
46 | #endif | 52 | #endif |
47 | #ifndef __ARMEB__ | 53 | #ifndef __ARMEB__ |
48 | orr r2, r2, r3, lsl #8 | 54 | orr r2, r2, rb, lsl #8 |
49 | #else | 55 | #else |
50 | orr r2, r3, r2, lsl #8 | 56 | orr r2, rb, r2, lsl #8 |
51 | #endif | 57 | #endif |
52 | mov r0, #0 | 58 | mov r0, #0 |
53 | mov pc, lr | 59 | mov pc, lr |
54 | ENDPROC(__get_user_2) | 60 | ENDPROC(__get_user_2) |
55 | 61 | ||
56 | ENTRY(__get_user_4) | 62 | ENTRY(__get_user_4) |
63 | check_uaccess r0, 4, r1, r2, __get_user_bad | ||
57 | 4: TUSER(ldr) r2, [r0] | 64 | 4: TUSER(ldr) r2, [r0] |
58 | mov r0, #0 | 65 | mov r0, #0 |
59 | mov pc, lr | 66 | mov pc, lr |
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S index 7db25990c589..3d73dcb959b0 100644 --- a/arch/arm/lib/putuser.S +++ b/arch/arm/lib/putuser.S | |||
@@ -16,6 +16,7 @@ | |||
16 | * __put_user_X | 16 | * __put_user_X |
17 | * | 17 | * |
18 | * Inputs: r0 contains the address | 18 | * Inputs: r0 contains the address |
19 | * r1 contains the address limit, which must be preserved | ||
19 | * r2, r3 contains the value | 20 | * r2, r3 contains the value |
20 | * Outputs: r0 is the error code | 21 | * Outputs: r0 is the error code |
21 | * lr corrupted | 22 | * lr corrupted |
@@ -27,16 +28,19 @@ | |||
27 | * Note also that it is intended that __put_user_bad is not global. | 28 | * Note also that it is intended that __put_user_bad is not global. |
28 | */ | 29 | */ |
29 | #include <linux/linkage.h> | 30 | #include <linux/linkage.h> |
31 | #include <asm/assembler.h> | ||
30 | #include <asm/errno.h> | 32 | #include <asm/errno.h> |
31 | #include <asm/domain.h> | 33 | #include <asm/domain.h> |
32 | 34 | ||
33 | ENTRY(__put_user_1) | 35 | ENTRY(__put_user_1) |
36 | check_uaccess r0, 1, r1, ip, __put_user_bad | ||
34 | 1: TUSER(strb) r2, [r0] | 37 | 1: TUSER(strb) r2, [r0] |
35 | mov r0, #0 | 38 | mov r0, #0 |
36 | mov pc, lr | 39 | mov pc, lr |
37 | ENDPROC(__put_user_1) | 40 | ENDPROC(__put_user_1) |
38 | 41 | ||
39 | ENTRY(__put_user_2) | 42 | ENTRY(__put_user_2) |
43 | check_uaccess r0, 2, r1, ip, __put_user_bad | ||
40 | mov ip, r2, lsr #8 | 44 | mov ip, r2, lsr #8 |
41 | #ifdef CONFIG_THUMB2_KERNEL | 45 | #ifdef CONFIG_THUMB2_KERNEL |
42 | #ifndef __ARMEB__ | 46 | #ifndef __ARMEB__ |
@@ -60,12 +64,14 @@ ENTRY(__put_user_2) | |||
60 | ENDPROC(__put_user_2) | 64 | ENDPROC(__put_user_2) |
61 | 65 | ||
62 | ENTRY(__put_user_4) | 66 | ENTRY(__put_user_4) |
67 | check_uaccess r0, 4, r1, ip, __put_user_bad | ||
63 | 4: TUSER(str) r2, [r0] | 68 | 4: TUSER(str) r2, [r0] |
64 | mov r0, #0 | 69 | mov r0, #0 |
65 | mov pc, lr | 70 | mov pc, lr |
66 | ENDPROC(__put_user_4) | 71 | ENDPROC(__put_user_4) |
67 | 72 | ||
68 | ENTRY(__put_user_8) | 73 | ENTRY(__put_user_8) |
74 | check_uaccess r0, 8, r1, ip, __put_user_bad | ||
69 | #ifdef CONFIG_THUMB2_KERNEL | 75 | #ifdef CONFIG_THUMB2_KERNEL |
70 | 5: TUSER(str) r2, [r0] | 76 | 5: TUSER(str) r2, [r0] |
71 | 6: TUSER(str) r3, [r0, #4] | 77 | 6: TUSER(str) r3, [r0, #4] |
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c index fdd8cc87c9fe..4431a62fff5b 100644 --- a/arch/arm/mach-imx/clk-imx25.c +++ b/arch/arm/mach-imx/clk-imx25.c | |||
@@ -222,10 +222,8 @@ int __init mx25_clocks_init(void) | |||
222 | clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0"); | 222 | clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0"); |
223 | clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0"); | 223 | clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0"); |
224 | clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0"); | 224 | clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0"); |
225 | clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0"); | 225 | clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0"); |
226 | clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0"); | 226 | clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1"); |
227 | clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1"); | ||
228 | clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1"); | ||
229 | clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0"); | 227 | clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0"); |
230 | clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0"); | 228 | clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0"); |
231 | clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0"); | 229 | clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0"); |
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index c6422fb10bae..65fb8bcd86cb 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c | |||
@@ -230,10 +230,8 @@ int __init mx35_clocks_init() | |||
230 | clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); | 230 | clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); |
231 | clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1"); | 231 | clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1"); |
232 | clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); | 232 | clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); |
233 | clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0"); | 233 | clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0"); |
234 | clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0"); | 234 | clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1"); |
235 | clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1"); | ||
236 | clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1"); | ||
237 | /* i.mx35 has the i.mx21 type uart */ | 235 | /* i.mx35 has the i.mx21 type uart */ |
238 | clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); | 236 | clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); |
239 | clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); | 237 | clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index fcd4e85c4ddc..346fd26f3aa6 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -232,10 +232,11 @@ config MACH_OMAP3_PANDORA | |||
232 | select OMAP_PACKAGE_CBB | 232 | select OMAP_PACKAGE_CBB |
233 | select REGULATOR_FIXED_VOLTAGE if REGULATOR | 233 | select REGULATOR_FIXED_VOLTAGE if REGULATOR |
234 | 234 | ||
235 | config MACH_OMAP3_TOUCHBOOK | 235 | config MACH_TOUCHBOOK |
236 | bool "OMAP3 Touch Book" | 236 | bool "OMAP3 Touch Book" |
237 | depends on ARCH_OMAP3 | 237 | depends on ARCH_OMAP3 |
238 | default y | 238 | default y |
239 | select OMAP_PACKAGE_CBB | ||
239 | 240 | ||
240 | config MACH_OMAP_3430SDP | 241 | config MACH_OMAP_3430SDP |
241 | bool "OMAP 3430 SDP board" | 242 | bool "OMAP 3430 SDP board" |
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index eb203ec193d0..7706fdfd0252 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile | |||
@@ -235,7 +235,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o | |||
235 | obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o | 235 | obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o |
236 | obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o | 236 | obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o |
237 | obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o | 237 | obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o |
238 | obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o | 238 | obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o |
239 | obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o | 239 | obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o |
240 | obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o | 240 | obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o |
241 | 241 | ||
diff --git a/arch/arm/mach-omap2/clock33xx_data.c b/arch/arm/mach-omap2/clock33xx_data.c index 7aa5ecaee5a2..8e06de665b14 100644 --- a/arch/arm/mach-omap2/clock33xx_data.c +++ b/arch/arm/mach-omap2/clock33xx_data.c | |||
@@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = { | |||
1036 | CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX), | 1036 | CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX), |
1037 | CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX), | 1037 | CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX), |
1038 | CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX), | 1038 | CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX), |
1039 | CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX), | 1039 | CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX), |
1040 | CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX), | 1040 | CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX), |
1041 | CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX), | 1041 | CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX), |
1042 | CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX), | 1042 | CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX), |
1043 | CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX), | 1043 | CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX), |
1044 | CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX), | 1044 | CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX), |
1045 | CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX), | 1045 | CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX), |
1046 | CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX), | 1046 | CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX), |
1047 | CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX), | 1047 | CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX), |
1048 | CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX), | 1048 | CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX), |
diff --git a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c index a0d68dbecfa3..f99e65cfb862 100644 --- a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c +++ b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c | |||
@@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm) | |||
241 | _clkdm_del_autodeps(clkdm); | 241 | _clkdm_del_autodeps(clkdm); |
242 | } | 242 | } |
243 | 243 | ||
244 | static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm) | ||
245 | { | ||
246 | bool hwsup = false; | ||
247 | |||
248 | if (!clkdm->clktrctrl_mask) | ||
249 | return 0; | ||
250 | |||
251 | hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, | ||
252 | clkdm->clktrctrl_mask); | ||
253 | |||
254 | if (hwsup) { | ||
255 | /* Disable HW transitions when we are changing deps */ | ||
256 | _disable_hwsup(clkdm); | ||
257 | _clkdm_add_autodeps(clkdm); | ||
258 | _enable_hwsup(clkdm); | ||
259 | } else { | ||
260 | if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) | ||
261 | omap3_clkdm_wakeup(clkdm); | ||
262 | } | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm) | ||
268 | { | ||
269 | bool hwsup = false; | ||
270 | |||
271 | if (!clkdm->clktrctrl_mask) | ||
272 | return 0; | ||
273 | |||
274 | hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, | ||
275 | clkdm->clktrctrl_mask); | ||
276 | |||
277 | if (hwsup) { | ||
278 | /* Disable HW transitions when we are changing deps */ | ||
279 | _disable_hwsup(clkdm); | ||
280 | _clkdm_del_autodeps(clkdm); | ||
281 | _enable_hwsup(clkdm); | ||
282 | } else { | ||
283 | if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP) | ||
284 | omap3_clkdm_sleep(clkdm); | ||
285 | } | ||
286 | |||
287 | return 0; | ||
288 | } | ||
289 | |||
244 | struct clkdm_ops omap2_clkdm_operations = { | 290 | struct clkdm_ops omap2_clkdm_operations = { |
245 | .clkdm_add_wkdep = omap2_clkdm_add_wkdep, | 291 | .clkdm_add_wkdep = omap2_clkdm_add_wkdep, |
246 | .clkdm_del_wkdep = omap2_clkdm_del_wkdep, | 292 | .clkdm_del_wkdep = omap2_clkdm_del_wkdep, |
@@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = { | |||
267 | .clkdm_wakeup = omap3_clkdm_wakeup, | 313 | .clkdm_wakeup = omap3_clkdm_wakeup, |
268 | .clkdm_allow_idle = omap3_clkdm_allow_idle, | 314 | .clkdm_allow_idle = omap3_clkdm_allow_idle, |
269 | .clkdm_deny_idle = omap3_clkdm_deny_idle, | 315 | .clkdm_deny_idle = omap3_clkdm_deny_idle, |
270 | .clkdm_clk_enable = omap2_clkdm_clk_enable, | 316 | .clkdm_clk_enable = omap3xxx_clkdm_clk_enable, |
271 | .clkdm_clk_disable = omap2_clkdm_clk_disable, | 317 | .clkdm_clk_disable = omap3xxx_clkdm_clk_disable, |
272 | }; | 318 | }; |
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h index 766338fe4d34..975f6bda0e0b 100644 --- a/arch/arm/mach-omap2/cm-regbits-34xx.h +++ b/arch/arm/mach-omap2/cm-regbits-34xx.h | |||
@@ -67,6 +67,7 @@ | |||
67 | #define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0) | 67 | #define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0) |
68 | 68 | ||
69 | /* CM_IDLEST_IVA2 */ | 69 | /* CM_IDLEST_IVA2 */ |
70 | #define OMAP3430_ST_IVA2_SHIFT 0 | ||
70 | #define OMAP3430_ST_IVA2_MASK (1 << 0) | 71 | #define OMAP3430_ST_IVA2_MASK (1 << 0) |
71 | 72 | ||
72 | /* CM_IDLEST_PLL_IVA2 */ | 73 | /* CM_IDLEST_PLL_IVA2 */ |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index b54427dec2a3..ecaad7d371ee 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |||
@@ -47,7 +47,7 @@ | |||
47 | static void __iomem *wakeupgen_base; | 47 | static void __iomem *wakeupgen_base; |
48 | static void __iomem *sar_base; | 48 | static void __iomem *sar_base; |
49 | static DEFINE_SPINLOCK(wakeupgen_lock); | 49 | static DEFINE_SPINLOCK(wakeupgen_lock); |
50 | static unsigned int irq_target_cpu[NR_IRQS]; | 50 | static unsigned int irq_target_cpu[MAX_IRQS]; |
51 | static unsigned int irq_banks = MAX_NR_REG_BANKS; | 51 | static unsigned int irq_banks = MAX_NR_REG_BANKS; |
52 | static unsigned int max_irqs = MAX_IRQS; | 52 | static unsigned int max_irqs = MAX_IRQS; |
53 | static unsigned int omap_secure_apis; | 53 | static unsigned int omap_secure_apis; |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 80b7359500f1..3615e0d9ee3c 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh) | |||
1889 | _enable_sysc(oh); | 1889 | _enable_sysc(oh); |
1890 | } | 1890 | } |
1891 | } else { | 1891 | } else { |
1892 | _omap4_disable_module(oh); | ||
1892 | _disable_clocks(oh); | 1893 | _disable_clocks(oh); |
1893 | pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n", | 1894 | pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n", |
1894 | oh->name, r); | 1895 | oh->name, r); |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index a1df9d4690f7..b1675e6214d3 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = { | |||
100 | 100 | ||
101 | /* IVA2 (IVA2) */ | 101 | /* IVA2 (IVA2) */ |
102 | static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = { | 102 | static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = { |
103 | { .name = "logic", .rst_shift = 0 }, | 103 | { .name = "logic", .rst_shift = 0, .st_shift = 8 }, |
104 | { .name = "seq0", .rst_shift = 1 }, | 104 | { .name = "seq0", .rst_shift = 1, .st_shift = 9 }, |
105 | { .name = "seq1", .rst_shift = 2 }, | 105 | { .name = "seq1", .rst_shift = 2, .st_shift = 10 }, |
106 | }; | 106 | }; |
107 | 107 | ||
108 | static struct omap_hwmod omap3xxx_iva_hwmod = { | 108 | static struct omap_hwmod omap3xxx_iva_hwmod = { |
@@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = { | |||
112 | .rst_lines = omap3xxx_iva_resets, | 112 | .rst_lines = omap3xxx_iva_resets, |
113 | .rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets), | 113 | .rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets), |
114 | .main_clk = "iva2_ck", | 114 | .main_clk = "iva2_ck", |
115 | .prcm = { | ||
116 | .omap2 = { | ||
117 | .module_offs = OMAP3430_IVA2_MOD, | ||
118 | .prcm_reg_id = 1, | ||
119 | .module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT, | ||
120 | .idlest_reg_id = 1, | ||
121 | .idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT, | ||
122 | } | ||
123 | }, | ||
115 | }; | 124 | }; |
116 | 125 | ||
117 | /* timer class */ | 126 | /* timer class */ |
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index f033f950a232..f9bcb24cd515 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
@@ -4209,7 +4209,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = { | |||
4209 | }; | 4209 | }; |
4210 | 4210 | ||
4211 | /* dsp -> sl2if */ | 4211 | /* dsp -> sl2if */ |
4212 | static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = { | 4212 | static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = { |
4213 | .master = &omap44xx_dsp_hwmod, | 4213 | .master = &omap44xx_dsp_hwmod, |
4214 | .slave = &omap44xx_sl2if_hwmod, | 4214 | .slave = &omap44xx_sl2if_hwmod, |
4215 | .clk = "dpll_iva_m5x2_ck", | 4215 | .clk = "dpll_iva_m5x2_ck", |
@@ -4827,7 +4827,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = { | |||
4827 | }; | 4827 | }; |
4828 | 4828 | ||
4829 | /* iva -> sl2if */ | 4829 | /* iva -> sl2if */ |
4830 | static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = { | 4830 | static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = { |
4831 | .master = &omap44xx_iva_hwmod, | 4831 | .master = &omap44xx_iva_hwmod, |
4832 | .slave = &omap44xx_sl2if_hwmod, | 4832 | .slave = &omap44xx_sl2if_hwmod, |
4833 | .clk = "dpll_iva_m5x2_ck", | 4833 | .clk = "dpll_iva_m5x2_ck", |
@@ -5361,7 +5361,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = { | |||
5361 | }; | 5361 | }; |
5362 | 5362 | ||
5363 | /* l3_main_2 -> sl2if */ | 5363 | /* l3_main_2 -> sl2if */ |
5364 | static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = { | 5364 | static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = { |
5365 | .master = &omap44xx_l3_main_2_hwmod, | 5365 | .master = &omap44xx_l3_main_2_hwmod, |
5366 | .slave = &omap44xx_sl2if_hwmod, | 5366 | .slave = &omap44xx_sl2if_hwmod, |
5367 | .clk = "l3_div_ck", | 5367 | .clk = "l3_div_ck", |
@@ -6031,7 +6031,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { | |||
6031 | &omap44xx_l4_abe__dmic, | 6031 | &omap44xx_l4_abe__dmic, |
6032 | &omap44xx_l4_abe__dmic_dma, | 6032 | &omap44xx_l4_abe__dmic_dma, |
6033 | &omap44xx_dsp__iva, | 6033 | &omap44xx_dsp__iva, |
6034 | &omap44xx_dsp__sl2if, | 6034 | /* &omap44xx_dsp__sl2if, */ |
6035 | &omap44xx_l4_cfg__dsp, | 6035 | &omap44xx_l4_cfg__dsp, |
6036 | &omap44xx_l3_main_2__dss, | 6036 | &omap44xx_l3_main_2__dss, |
6037 | &omap44xx_l4_per__dss, | 6037 | &omap44xx_l4_per__dss, |
@@ -6067,7 +6067,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { | |||
6067 | &omap44xx_l4_per__i2c4, | 6067 | &omap44xx_l4_per__i2c4, |
6068 | &omap44xx_l3_main_2__ipu, | 6068 | &omap44xx_l3_main_2__ipu, |
6069 | &omap44xx_l3_main_2__iss, | 6069 | &omap44xx_l3_main_2__iss, |
6070 | &omap44xx_iva__sl2if, | 6070 | /* &omap44xx_iva__sl2if, */ |
6071 | &omap44xx_l3_main_2__iva, | 6071 | &omap44xx_l3_main_2__iva, |
6072 | &omap44xx_l4_wkup__kbd, | 6072 | &omap44xx_l4_wkup__kbd, |
6073 | &omap44xx_l4_cfg__mailbox, | 6073 | &omap44xx_l4_cfg__mailbox, |
@@ -6098,7 +6098,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { | |||
6098 | &omap44xx_l4_cfg__cm_core, | 6098 | &omap44xx_l4_cfg__cm_core, |
6099 | &omap44xx_l4_wkup__prm, | 6099 | &omap44xx_l4_wkup__prm, |
6100 | &omap44xx_l4_wkup__scrm, | 6100 | &omap44xx_l4_wkup__scrm, |
6101 | &omap44xx_l3_main_2__sl2if, | 6101 | /* &omap44xx_l3_main_2__sl2if, */ |
6102 | &omap44xx_l4_abe__slimbus1, | 6102 | &omap44xx_l4_abe__slimbus1, |
6103 | &omap44xx_l4_abe__slimbus1_dma, | 6103 | &omap44xx_l4_abe__slimbus1_dma, |
6104 | &omap44xx_l4_per__slimbus2, | 6104 | &omap44xx_l4_per__slimbus2, |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index e17cf974d16c..5214d5bfba27 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -262,6 +262,7 @@ static u32 notrace dmtimer_read_sched_clock(void) | |||
262 | return 0; | 262 | return 0; |
263 | } | 263 | } |
264 | 264 | ||
265 | #ifdef CONFIG_OMAP_32K_TIMER | ||
265 | /* Setup free-running counter for clocksource */ | 266 | /* Setup free-running counter for clocksource */ |
266 | static int __init omap2_sync32k_clocksource_init(void) | 267 | static int __init omap2_sync32k_clocksource_init(void) |
267 | { | 268 | { |
@@ -301,6 +302,12 @@ static int __init omap2_sync32k_clocksource_init(void) | |||
301 | 302 | ||
302 | return ret; | 303 | return ret; |
303 | } | 304 | } |
305 | #else | ||
306 | static inline int omap2_sync32k_clocksource_init(void) | ||
307 | { | ||
308 | return -ENODEV; | ||
309 | } | ||
310 | #endif | ||
304 | 311 | ||
305 | static void __init omap2_gptimer_clocksource_init(int gptimer_id, | 312 | static void __init omap2_gptimer_clocksource_init(int gptimer_id, |
306 | const char *fck_source) | 313 | const char *fck_source) |
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index b3226f80c985..5f3c03b61f8e 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig | |||
@@ -110,13 +110,6 @@ config TEGRA_DEBUG_UART_AUTO_SCRATCH | |||
110 | 110 | ||
111 | endchoice | 111 | endchoice |
112 | 112 | ||
113 | config TEGRA_SYSTEM_DMA | ||
114 | bool "Enable system DMA driver for NVIDIA Tegra SoCs" | ||
115 | default y | ||
116 | help | ||
117 | Adds system DMA functionality for NVIDIA Tegra SoCs, used by | ||
118 | several Tegra device drivers | ||
119 | |||
120 | config TEGRA_EMC_SCALING_ENABLE | 113 | config TEGRA_EMC_SCALING_ENABLE |
121 | bool "Enable scaling the memory frequency" | 114 | bool "Enable scaling the memory frequency" |
122 | 115 | ||
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index 56065acbd816..0974ace45558 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile | |||
@@ -18,7 +18,6 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30_clocks.o | |||
18 | obj-$(CONFIG_SMP) += platsmp.o headsmp.o | 18 | obj-$(CONFIG_SMP) += platsmp.o headsmp.o |
19 | obj-$(CONFIG_SMP) += reset.o | 19 | obj-$(CONFIG_SMP) += reset.o |
20 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o | 20 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o |
21 | obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o | ||
22 | obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o | 21 | obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o |
23 | obj-$(CONFIG_TEGRA_PCI) += pcie.o | 22 | obj-$(CONFIG_TEGRA_PCI) += pcie.o |
24 | obj-$(CONFIG_USB_SUPPORT) += usb_phy.o | 23 | obj-$(CONFIG_USB_SUPPORT) += usb_phy.o |
diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c index 643a37809a15..b5015d0f1912 100644 --- a/arch/arm/mach-tegra/apbio.c +++ b/arch/arm/mach-tegra/apbio.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | #include "apbio.h" | 29 | #include "apbio.h" |
30 | 30 | ||
31 | #if defined(CONFIG_TEGRA_SYSTEM_DMA) || defined(CONFIG_TEGRA20_APB_DMA) | 31 | #if defined(CONFIG_TEGRA20_APB_DMA) |
32 | static DEFINE_MUTEX(tegra_apb_dma_lock); | 32 | static DEFINE_MUTEX(tegra_apb_dma_lock); |
33 | static u32 *tegra_apb_bb; | 33 | static u32 *tegra_apb_bb; |
34 | static dma_addr_t tegra_apb_bb_phys; | 34 | static dma_addr_t tegra_apb_bb_phys; |
@@ -37,121 +37,6 @@ static DECLARE_COMPLETION(tegra_apb_wait); | |||
37 | static u32 tegra_apb_readl_direct(unsigned long offset); | 37 | static u32 tegra_apb_readl_direct(unsigned long offset); |
38 | static void tegra_apb_writel_direct(u32 value, unsigned long offset); | 38 | static void tegra_apb_writel_direct(u32 value, unsigned long offset); |
39 | 39 | ||
40 | #if defined(CONFIG_TEGRA_SYSTEM_DMA) | ||
41 | static struct tegra_dma_channel *tegra_apb_dma; | ||
42 | |||
43 | bool tegra_apb_init(void) | ||
44 | { | ||
45 | struct tegra_dma_channel *ch; | ||
46 | |||
47 | mutex_lock(&tegra_apb_dma_lock); | ||
48 | |||
49 | /* Check to see if we raced to setup */ | ||
50 | if (tegra_apb_dma) | ||
51 | goto out; | ||
52 | |||
53 | ch = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT | | ||
54 | TEGRA_DMA_SHARED); | ||
55 | |||
56 | if (!ch) | ||
57 | goto out_fail; | ||
58 | |||
59 | tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32), | ||
60 | &tegra_apb_bb_phys, GFP_KERNEL); | ||
61 | if (!tegra_apb_bb) { | ||
62 | pr_err("%s: can not allocate bounce buffer\n", __func__); | ||
63 | tegra_dma_free_channel(ch); | ||
64 | goto out_fail; | ||
65 | } | ||
66 | |||
67 | tegra_apb_dma = ch; | ||
68 | out: | ||
69 | mutex_unlock(&tegra_apb_dma_lock); | ||
70 | return true; | ||
71 | |||
72 | out_fail: | ||
73 | mutex_unlock(&tegra_apb_dma_lock); | ||
74 | return false; | ||
75 | } | ||
76 | |||
77 | static void apb_dma_complete(struct tegra_dma_req *req) | ||
78 | { | ||
79 | complete(&tegra_apb_wait); | ||
80 | } | ||
81 | |||
82 | static u32 tegra_apb_readl_using_dma(unsigned long offset) | ||
83 | { | ||
84 | struct tegra_dma_req req; | ||
85 | int ret; | ||
86 | |||
87 | if (!tegra_apb_dma && !tegra_apb_init()) | ||
88 | return tegra_apb_readl_direct(offset); | ||
89 | |||
90 | mutex_lock(&tegra_apb_dma_lock); | ||
91 | req.complete = apb_dma_complete; | ||
92 | req.to_memory = 1; | ||
93 | req.dest_addr = tegra_apb_bb_phys; | ||
94 | req.dest_bus_width = 32; | ||
95 | req.dest_wrap = 1; | ||
96 | req.source_addr = offset; | ||
97 | req.source_bus_width = 32; | ||
98 | req.source_wrap = 4; | ||
99 | req.req_sel = TEGRA_DMA_REQ_SEL_CNTR; | ||
100 | req.size = 4; | ||
101 | |||
102 | INIT_COMPLETION(tegra_apb_wait); | ||
103 | |||
104 | tegra_dma_enqueue_req(tegra_apb_dma, &req); | ||
105 | |||
106 | ret = wait_for_completion_timeout(&tegra_apb_wait, | ||
107 | msecs_to_jiffies(50)); | ||
108 | |||
109 | if (WARN(ret == 0, "apb read dma timed out")) { | ||
110 | tegra_dma_dequeue_req(tegra_apb_dma, &req); | ||
111 | *(u32 *)tegra_apb_bb = 0; | ||
112 | } | ||
113 | |||
114 | mutex_unlock(&tegra_apb_dma_lock); | ||
115 | return *((u32 *)tegra_apb_bb); | ||
116 | } | ||
117 | |||
118 | static void tegra_apb_writel_using_dma(u32 value, unsigned long offset) | ||
119 | { | ||
120 | struct tegra_dma_req req; | ||
121 | int ret; | ||
122 | |||
123 | if (!tegra_apb_dma && !tegra_apb_init()) { | ||
124 | tegra_apb_writel_direct(value, offset); | ||
125 | return; | ||
126 | } | ||
127 | |||
128 | mutex_lock(&tegra_apb_dma_lock); | ||
129 | *((u32 *)tegra_apb_bb) = value; | ||
130 | req.complete = apb_dma_complete; | ||
131 | req.to_memory = 0; | ||
132 | req.dest_addr = offset; | ||
133 | req.dest_wrap = 4; | ||
134 | req.dest_bus_width = 32; | ||
135 | req.source_addr = tegra_apb_bb_phys; | ||
136 | req.source_bus_width = 32; | ||
137 | req.source_wrap = 1; | ||
138 | req.req_sel = TEGRA_DMA_REQ_SEL_CNTR; | ||
139 | req.size = 4; | ||
140 | |||
141 | INIT_COMPLETION(tegra_apb_wait); | ||
142 | |||
143 | tegra_dma_enqueue_req(tegra_apb_dma, &req); | ||
144 | |||
145 | ret = wait_for_completion_timeout(&tegra_apb_wait, | ||
146 | msecs_to_jiffies(50)); | ||
147 | |||
148 | if (WARN(ret == 0, "apb write dma timed out")) | ||
149 | tegra_dma_dequeue_req(tegra_apb_dma, &req); | ||
150 | |||
151 | mutex_unlock(&tegra_apb_dma_lock); | ||
152 | } | ||
153 | |||
154 | #else | ||
155 | static struct dma_chan *tegra_apb_dma_chan; | 40 | static struct dma_chan *tegra_apb_dma_chan; |
156 | static struct dma_slave_config dma_sconfig; | 41 | static struct dma_slave_config dma_sconfig; |
157 | 42 | ||
@@ -279,7 +164,6 @@ static void tegra_apb_writel_using_dma(u32 value, unsigned long offset) | |||
279 | pr_err("error in writing offset 0x%08lx using dma\n", offset); | 164 | pr_err("error in writing offset 0x%08lx using dma\n", offset); |
280 | mutex_unlock(&tegra_apb_dma_lock); | 165 | mutex_unlock(&tegra_apb_dma_lock); |
281 | } | 166 | } |
282 | #endif | ||
283 | #else | 167 | #else |
284 | #define tegra_apb_readl_using_dma tegra_apb_readl_direct | 168 | #define tegra_apb_readl_using_dma tegra_apb_readl_direct |
285 | #define tegra_apb_writel_using_dma tegra_apb_writel_direct | 169 | #define tegra_apb_writel_using_dma tegra_apb_writel_direct |
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c deleted file mode 100644 index 29c5114d607c..000000000000 --- a/arch/arm/mach-tegra/dma.c +++ /dev/null | |||
@@ -1,823 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/dma.c | ||
3 | * | ||
4 | * System DMA driver for NVIDIA Tegra SoCs | ||
5 | * | ||
6 | * Copyright (c) 2008-2009, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/io.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/err.h> | ||
28 | #include <linux/irq.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/clk.h> | ||
31 | #include <mach/dma.h> | ||
32 | #include <mach/irqs.h> | ||
33 | #include <mach/iomap.h> | ||
34 | #include <mach/suspend.h> | ||
35 | |||
36 | #include "apbio.h" | ||
37 | |||
38 | #define APB_DMA_GEN 0x000 | ||
39 | #define GEN_ENABLE (1<<31) | ||
40 | |||
41 | #define APB_DMA_CNTRL 0x010 | ||
42 | |||
43 | #define APB_DMA_IRQ_MASK 0x01c | ||
44 | |||
45 | #define APB_DMA_IRQ_MASK_SET 0x020 | ||
46 | |||
47 | #define APB_DMA_CHAN_CSR 0x000 | ||
48 | #define CSR_ENB (1<<31) | ||
49 | #define CSR_IE_EOC (1<<30) | ||
50 | #define CSR_HOLD (1<<29) | ||
51 | #define CSR_DIR (1<<28) | ||
52 | #define CSR_ONCE (1<<27) | ||
53 | #define CSR_FLOW (1<<21) | ||
54 | #define CSR_REQ_SEL_SHIFT 16 | ||
55 | #define CSR_WCOUNT_SHIFT 2 | ||
56 | #define CSR_WCOUNT_MASK 0xFFFC | ||
57 | |||
58 | #define APB_DMA_CHAN_STA 0x004 | ||
59 | #define STA_BUSY (1<<31) | ||
60 | #define STA_ISE_EOC (1<<30) | ||
61 | #define STA_HALT (1<<29) | ||
62 | #define STA_PING_PONG (1<<28) | ||
63 | #define STA_COUNT_SHIFT 2 | ||
64 | #define STA_COUNT_MASK 0xFFFC | ||
65 | |||
66 | #define APB_DMA_CHAN_AHB_PTR 0x010 | ||
67 | |||
68 | #define APB_DMA_CHAN_AHB_SEQ 0x014 | ||
69 | #define AHB_SEQ_INTR_ENB (1<<31) | ||
70 | #define AHB_SEQ_BUS_WIDTH_SHIFT 28 | ||
71 | #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
72 | #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
73 | #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
74 | #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
75 | #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
76 | #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
77 | #define AHB_SEQ_DATA_SWAP (1<<27) | ||
78 | #define AHB_SEQ_BURST_MASK (0x7<<24) | ||
79 | #define AHB_SEQ_BURST_1 (4<<24) | ||
80 | #define AHB_SEQ_BURST_4 (5<<24) | ||
81 | #define AHB_SEQ_BURST_8 (6<<24) | ||
82 | #define AHB_SEQ_DBL_BUF (1<<19) | ||
83 | #define AHB_SEQ_WRAP_SHIFT 16 | ||
84 | #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT) | ||
85 | |||
86 | #define APB_DMA_CHAN_APB_PTR 0x018 | ||
87 | |||
88 | #define APB_DMA_CHAN_APB_SEQ 0x01c | ||
89 | #define APB_SEQ_BUS_WIDTH_SHIFT 28 | ||
90 | #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
91 | #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
92 | #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
93 | #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
94 | #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
95 | #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
96 | #define APB_SEQ_DATA_SWAP (1<<27) | ||
97 | #define APB_SEQ_WRAP_SHIFT 16 | ||
98 | #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT) | ||
99 | |||
100 | #define TEGRA_SYSTEM_DMA_CH_NR 16 | ||
101 | #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4 | ||
102 | #define TEGRA_SYSTEM_DMA_CH_MIN 0 | ||
103 | #define TEGRA_SYSTEM_DMA_CH_MAX \ | ||
104 | (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1) | ||
105 | |||
106 | #define NV_DMA_MAX_TRASFER_SIZE 0x10000 | ||
107 | |||
108 | static const unsigned int ahb_addr_wrap_table[8] = { | ||
109 | 0, 32, 64, 128, 256, 512, 1024, 2048 | ||
110 | }; | ||
111 | |||
112 | static const unsigned int apb_addr_wrap_table[8] = { | ||
113 | 0, 1, 2, 4, 8, 16, 32, 64 | ||
114 | }; | ||
115 | |||
116 | static const unsigned int bus_width_table[5] = { | ||
117 | 8, 16, 32, 64, 128 | ||
118 | }; | ||
119 | |||
120 | #define TEGRA_DMA_NAME_SIZE 16 | ||
121 | struct tegra_dma_channel { | ||
122 | struct list_head list; | ||
123 | int id; | ||
124 | spinlock_t lock; | ||
125 | char name[TEGRA_DMA_NAME_SIZE]; | ||
126 | void __iomem *addr; | ||
127 | int mode; | ||
128 | int irq; | ||
129 | int req_transfer_count; | ||
130 | }; | ||
131 | |||
132 | #define NV_DMA_MAX_CHANNELS 32 | ||
133 | |||
134 | static bool tegra_dma_initialized; | ||
135 | static DEFINE_MUTEX(tegra_dma_lock); | ||
136 | static DEFINE_SPINLOCK(enable_lock); | ||
137 | |||
138 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); | ||
139 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; | ||
140 | |||
141 | static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | ||
142 | struct tegra_dma_req *req); | ||
143 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | ||
144 | struct tegra_dma_req *req); | ||
145 | static void tegra_dma_stop(struct tegra_dma_channel *ch); | ||
146 | |||
147 | void tegra_dma_flush(struct tegra_dma_channel *ch) | ||
148 | { | ||
149 | } | ||
150 | EXPORT_SYMBOL(tegra_dma_flush); | ||
151 | |||
152 | void tegra_dma_dequeue(struct tegra_dma_channel *ch) | ||
153 | { | ||
154 | struct tegra_dma_req *req; | ||
155 | |||
156 | if (tegra_dma_is_empty(ch)) | ||
157 | return; | ||
158 | |||
159 | req = list_entry(ch->list.next, typeof(*req), node); | ||
160 | |||
161 | tegra_dma_dequeue_req(ch, req); | ||
162 | return; | ||
163 | } | ||
164 | |||
165 | static void tegra_dma_stop(struct tegra_dma_channel *ch) | ||
166 | { | ||
167 | u32 csr; | ||
168 | u32 status; | ||
169 | |||
170 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); | ||
171 | csr &= ~CSR_IE_EOC; | ||
172 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
173 | |||
174 | csr &= ~CSR_ENB; | ||
175 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
176 | |||
177 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
178 | if (status & STA_ISE_EOC) | ||
179 | writel(status, ch->addr + APB_DMA_CHAN_STA); | ||
180 | } | ||
181 | |||
182 | static int tegra_dma_cancel(struct tegra_dma_channel *ch) | ||
183 | { | ||
184 | unsigned long irq_flags; | ||
185 | |||
186 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
187 | while (!list_empty(&ch->list)) | ||
188 | list_del(ch->list.next); | ||
189 | |||
190 | tegra_dma_stop(ch); | ||
191 | |||
192 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static unsigned int get_channel_status(struct tegra_dma_channel *ch, | ||
197 | struct tegra_dma_req *req, bool is_stop_dma) | ||
198 | { | ||
199 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | ||
200 | unsigned int status; | ||
201 | |||
202 | if (is_stop_dma) { | ||
203 | /* | ||
204 | * STOP the DMA and get the transfer count. | ||
205 | * Getting the transfer count is tricky. | ||
206 | * - Globally disable DMA on all channels | ||
207 | * - Read the channel's status register to know the number | ||
208 | * of pending bytes to be transfered. | ||
209 | * - Stop the dma channel | ||
210 | * - Globally re-enable DMA to resume other transfers | ||
211 | */ | ||
212 | spin_lock(&enable_lock); | ||
213 | writel(0, addr + APB_DMA_GEN); | ||
214 | udelay(20); | ||
215 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
216 | tegra_dma_stop(ch); | ||
217 | writel(GEN_ENABLE, addr + APB_DMA_GEN); | ||
218 | spin_unlock(&enable_lock); | ||
219 | if (status & STA_ISE_EOC) { | ||
220 | pr_err("Got Dma Int here clearing"); | ||
221 | writel(status, ch->addr + APB_DMA_CHAN_STA); | ||
222 | } | ||
223 | req->status = TEGRA_DMA_REQ_ERROR_ABORTED; | ||
224 | } else { | ||
225 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
226 | } | ||
227 | return status; | ||
228 | } | ||
229 | |||
230 | /* should be called with the channel lock held */ | ||
231 | static unsigned int dma_active_count(struct tegra_dma_channel *ch, | ||
232 | struct tegra_dma_req *req, unsigned int status) | ||
233 | { | ||
234 | unsigned int to_transfer; | ||
235 | unsigned int req_transfer_count; | ||
236 | unsigned int bytes_transferred; | ||
237 | |||
238 | to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1; | ||
239 | req_transfer_count = ch->req_transfer_count + 1; | ||
240 | bytes_transferred = req_transfer_count; | ||
241 | if (status & STA_BUSY) | ||
242 | bytes_transferred -= to_transfer; | ||
243 | /* | ||
244 | * In continuous transfer mode, DMA only tracks the count of the | ||
245 | * half DMA buffer. So, if the DMA already finished half the DMA | ||
246 | * then add the half buffer to the completed count. | ||
247 | */ | ||
248 | if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) { | ||
249 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) | ||
250 | bytes_transferred += req_transfer_count; | ||
251 | if (status & STA_ISE_EOC) | ||
252 | bytes_transferred += req_transfer_count; | ||
253 | } | ||
254 | bytes_transferred *= 4; | ||
255 | return bytes_transferred; | ||
256 | } | ||
257 | |||
258 | int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | ||
259 | struct tegra_dma_req *_req) | ||
260 | { | ||
261 | unsigned int status; | ||
262 | struct tegra_dma_req *req = NULL; | ||
263 | int found = 0; | ||
264 | unsigned long irq_flags; | ||
265 | int stop = 0; | ||
266 | |||
267 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
268 | |||
269 | if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req) | ||
270 | stop = 1; | ||
271 | |||
272 | list_for_each_entry(req, &ch->list, node) { | ||
273 | if (req == _req) { | ||
274 | list_del(&req->node); | ||
275 | found = 1; | ||
276 | break; | ||
277 | } | ||
278 | } | ||
279 | if (!found) { | ||
280 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | if (!stop) | ||
285 | goto skip_stop_dma; | ||
286 | |||
287 | status = get_channel_status(ch, req, true); | ||
288 | req->bytes_transferred = dma_active_count(ch, req, status); | ||
289 | |||
290 | if (!list_empty(&ch->list)) { | ||
291 | /* if the list is not empty, queue the next request */ | ||
292 | struct tegra_dma_req *next_req; | ||
293 | next_req = list_entry(ch->list.next, | ||
294 | typeof(*next_req), node); | ||
295 | tegra_dma_update_hw(ch, next_req); | ||
296 | } | ||
297 | |||
298 | skip_stop_dma: | ||
299 | req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; | ||
300 | |||
301 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
302 | |||
303 | /* Callback should be called without any lock */ | ||
304 | req->complete(req); | ||
305 | return 0; | ||
306 | } | ||
307 | EXPORT_SYMBOL(tegra_dma_dequeue_req); | ||
308 | |||
309 | bool tegra_dma_is_empty(struct tegra_dma_channel *ch) | ||
310 | { | ||
311 | unsigned long irq_flags; | ||
312 | bool is_empty; | ||
313 | |||
314 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
315 | if (list_empty(&ch->list)) | ||
316 | is_empty = true; | ||
317 | else | ||
318 | is_empty = false; | ||
319 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
320 | return is_empty; | ||
321 | } | ||
322 | EXPORT_SYMBOL(tegra_dma_is_empty); | ||
323 | |||
324 | bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch, | ||
325 | struct tegra_dma_req *_req) | ||
326 | { | ||
327 | unsigned long irq_flags; | ||
328 | struct tegra_dma_req *req; | ||
329 | |||
330 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
331 | list_for_each_entry(req, &ch->list, node) { | ||
332 | if (req == _req) { | ||
333 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
334 | return true; | ||
335 | } | ||
336 | } | ||
337 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
338 | return false; | ||
339 | } | ||
340 | EXPORT_SYMBOL(tegra_dma_is_req_inflight); | ||
341 | |||
342 | int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, | ||
343 | struct tegra_dma_req *req) | ||
344 | { | ||
345 | unsigned long irq_flags; | ||
346 | struct tegra_dma_req *_req; | ||
347 | int start_dma = 0; | ||
348 | |||
349 | if (req->size > NV_DMA_MAX_TRASFER_SIZE || | ||
350 | req->source_addr & 0x3 || req->dest_addr & 0x3) { | ||
351 | pr_err("Invalid DMA request for channel %d\n", ch->id); | ||
352 | return -EINVAL; | ||
353 | } | ||
354 | |||
355 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
356 | |||
357 | list_for_each_entry(_req, &ch->list, node) { | ||
358 | if (req == _req) { | ||
359 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
360 | return -EEXIST; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | req->bytes_transferred = 0; | ||
365 | req->status = 0; | ||
366 | req->buffer_status = 0; | ||
367 | if (list_empty(&ch->list)) | ||
368 | start_dma = 1; | ||
369 | |||
370 | list_add_tail(&req->node, &ch->list); | ||
371 | |||
372 | if (start_dma) | ||
373 | tegra_dma_update_hw(ch, req); | ||
374 | |||
375 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | EXPORT_SYMBOL(tegra_dma_enqueue_req); | ||
380 | |||
381 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | ||
382 | { | ||
383 | int channel; | ||
384 | struct tegra_dma_channel *ch = NULL; | ||
385 | |||
386 | if (!tegra_dma_initialized) | ||
387 | return NULL; | ||
388 | |||
389 | mutex_lock(&tegra_dma_lock); | ||
390 | |||
391 | /* first channel is the shared channel */ | ||
392 | if (mode & TEGRA_DMA_SHARED) { | ||
393 | channel = TEGRA_SYSTEM_DMA_CH_MIN; | ||
394 | } else { | ||
395 | channel = find_first_zero_bit(channel_usage, | ||
396 | ARRAY_SIZE(dma_channels)); | ||
397 | if (channel >= ARRAY_SIZE(dma_channels)) | ||
398 | goto out; | ||
399 | } | ||
400 | __set_bit(channel, channel_usage); | ||
401 | ch = &dma_channels[channel]; | ||
402 | ch->mode = mode; | ||
403 | |||
404 | out: | ||
405 | mutex_unlock(&tegra_dma_lock); | ||
406 | return ch; | ||
407 | } | ||
408 | EXPORT_SYMBOL(tegra_dma_allocate_channel); | ||
409 | |||
410 | void tegra_dma_free_channel(struct tegra_dma_channel *ch) | ||
411 | { | ||
412 | if (ch->mode & TEGRA_DMA_SHARED) | ||
413 | return; | ||
414 | tegra_dma_cancel(ch); | ||
415 | mutex_lock(&tegra_dma_lock); | ||
416 | __clear_bit(ch->id, channel_usage); | ||
417 | mutex_unlock(&tegra_dma_lock); | ||
418 | } | ||
419 | EXPORT_SYMBOL(tegra_dma_free_channel); | ||
420 | |||
421 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | ||
422 | struct tegra_dma_req *req) | ||
423 | { | ||
424 | u32 apb_ptr; | ||
425 | u32 ahb_ptr; | ||
426 | |||
427 | if (req->to_memory) { | ||
428 | apb_ptr = req->source_addr; | ||
429 | ahb_ptr = req->dest_addr; | ||
430 | } else { | ||
431 | apb_ptr = req->dest_addr; | ||
432 | ahb_ptr = req->source_addr; | ||
433 | } | ||
434 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | ||
435 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | ||
436 | |||
437 | req->status = TEGRA_DMA_REQ_INFLIGHT; | ||
438 | return; | ||
439 | } | ||
440 | |||
441 | static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | ||
442 | struct tegra_dma_req *req) | ||
443 | { | ||
444 | int ahb_addr_wrap; | ||
445 | int apb_addr_wrap; | ||
446 | int ahb_bus_width; | ||
447 | int apb_bus_width; | ||
448 | int index; | ||
449 | |||
450 | u32 ahb_seq; | ||
451 | u32 apb_seq; | ||
452 | u32 ahb_ptr; | ||
453 | u32 apb_ptr; | ||
454 | u32 csr; | ||
455 | |||
456 | csr = CSR_IE_EOC | CSR_FLOW; | ||
457 | ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1; | ||
458 | apb_seq = 0; | ||
459 | |||
460 | csr |= req->req_sel << CSR_REQ_SEL_SHIFT; | ||
461 | |||
462 | /* One shot mode is always single buffered, | ||
463 | * continuous mode is always double buffered | ||
464 | * */ | ||
465 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { | ||
466 | csr |= CSR_ONCE; | ||
467 | ch->req_transfer_count = (req->size >> 2) - 1; | ||
468 | } else { | ||
469 | ahb_seq |= AHB_SEQ_DBL_BUF; | ||
470 | |||
471 | /* In double buffered mode, we set the size to half the | ||
472 | * requested size and interrupt when half the buffer | ||
473 | * is full */ | ||
474 | ch->req_transfer_count = (req->size >> 3) - 1; | ||
475 | } | ||
476 | |||
477 | csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT; | ||
478 | |||
479 | if (req->to_memory) { | ||
480 | apb_ptr = req->source_addr; | ||
481 | ahb_ptr = req->dest_addr; | ||
482 | |||
483 | apb_addr_wrap = req->source_wrap; | ||
484 | ahb_addr_wrap = req->dest_wrap; | ||
485 | apb_bus_width = req->source_bus_width; | ||
486 | ahb_bus_width = req->dest_bus_width; | ||
487 | |||
488 | } else { | ||
489 | csr |= CSR_DIR; | ||
490 | apb_ptr = req->dest_addr; | ||
491 | ahb_ptr = req->source_addr; | ||
492 | |||
493 | apb_addr_wrap = req->dest_wrap; | ||
494 | ahb_addr_wrap = req->source_wrap; | ||
495 | apb_bus_width = req->dest_bus_width; | ||
496 | ahb_bus_width = req->source_bus_width; | ||
497 | } | ||
498 | |||
499 | apb_addr_wrap >>= 2; | ||
500 | ahb_addr_wrap >>= 2; | ||
501 | |||
502 | /* set address wrap for APB size */ | ||
503 | index = 0; | ||
504 | do { | ||
505 | if (apb_addr_wrap_table[index] == apb_addr_wrap) | ||
506 | break; | ||
507 | index++; | ||
508 | } while (index < ARRAY_SIZE(apb_addr_wrap_table)); | ||
509 | BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); | ||
510 | apb_seq |= index << APB_SEQ_WRAP_SHIFT; | ||
511 | |||
512 | /* set address wrap for AHB size */ | ||
513 | index = 0; | ||
514 | do { | ||
515 | if (ahb_addr_wrap_table[index] == ahb_addr_wrap) | ||
516 | break; | ||
517 | index++; | ||
518 | } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); | ||
519 | BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); | ||
520 | ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; | ||
521 | |||
522 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | ||
523 | if (bus_width_table[index] == ahb_bus_width) | ||
524 | break; | ||
525 | } | ||
526 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | ||
527 | ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; | ||
528 | |||
529 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | ||
530 | if (bus_width_table[index] == apb_bus_width) | ||
531 | break; | ||
532 | } | ||
533 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | ||
534 | apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; | ||
535 | |||
536 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
537 | writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); | ||
538 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | ||
539 | writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); | ||
540 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | ||
541 | |||
542 | csr |= CSR_ENB; | ||
543 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
544 | |||
545 | req->status = TEGRA_DMA_REQ_INFLIGHT; | ||
546 | } | ||
547 | |||
548 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) | ||
549 | { | ||
550 | struct tegra_dma_req *req; | ||
551 | unsigned long irq_flags; | ||
552 | |||
553 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
554 | if (list_empty(&ch->list)) { | ||
555 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
556 | return; | ||
557 | } | ||
558 | |||
559 | req = list_entry(ch->list.next, typeof(*req), node); | ||
560 | if (req) { | ||
561 | int bytes_transferred; | ||
562 | |||
563 | bytes_transferred = ch->req_transfer_count; | ||
564 | bytes_transferred += 1; | ||
565 | bytes_transferred <<= 2; | ||
566 | |||
567 | list_del(&req->node); | ||
568 | req->bytes_transferred = bytes_transferred; | ||
569 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
570 | |||
571 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
572 | /* Callback should be called without any lock */ | ||
573 | pr_debug("%s: transferred %d bytes\n", __func__, | ||
574 | req->bytes_transferred); | ||
575 | req->complete(req); | ||
576 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
577 | } | ||
578 | |||
579 | if (!list_empty(&ch->list)) { | ||
580 | req = list_entry(ch->list.next, typeof(*req), node); | ||
581 | /* the complete function we just called may have enqueued | ||
582 | another req, in which case dma has already started */ | ||
583 | if (req->status != TEGRA_DMA_REQ_INFLIGHT) | ||
584 | tegra_dma_update_hw(ch, req); | ||
585 | } | ||
586 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
587 | } | ||
588 | |||
589 | static void handle_continuous_dma(struct tegra_dma_channel *ch) | ||
590 | { | ||
591 | struct tegra_dma_req *req; | ||
592 | unsigned long irq_flags; | ||
593 | |||
594 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
595 | if (list_empty(&ch->list)) { | ||
596 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
597 | return; | ||
598 | } | ||
599 | |||
600 | req = list_entry(ch->list.next, typeof(*req), node); | ||
601 | if (req) { | ||
602 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { | ||
603 | bool is_dma_ping_complete; | ||
604 | is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA) | ||
605 | & STA_PING_PONG) ? true : false; | ||
606 | if (req->to_memory) | ||
607 | is_dma_ping_complete = !is_dma_ping_complete; | ||
608 | /* Out of sync - Release current buffer */ | ||
609 | if (!is_dma_ping_complete) { | ||
610 | int bytes_transferred; | ||
611 | |||
612 | bytes_transferred = ch->req_transfer_count; | ||
613 | bytes_transferred += 1; | ||
614 | bytes_transferred <<= 3; | ||
615 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | ||
616 | req->bytes_transferred = bytes_transferred; | ||
617 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
618 | tegra_dma_stop(ch); | ||
619 | |||
620 | if (!list_is_last(&req->node, &ch->list)) { | ||
621 | struct tegra_dma_req *next_req; | ||
622 | |||
623 | next_req = list_entry(req->node.next, | ||
624 | typeof(*next_req), node); | ||
625 | tegra_dma_update_hw(ch, next_req); | ||
626 | } | ||
627 | |||
628 | list_del(&req->node); | ||
629 | |||
630 | /* DMA lock is NOT held when callbak is called */ | ||
631 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
632 | req->complete(req); | ||
633 | return; | ||
634 | } | ||
635 | /* Load the next request into the hardware, if available | ||
636 | * */ | ||
637 | if (!list_is_last(&req->node, &ch->list)) { | ||
638 | struct tegra_dma_req *next_req; | ||
639 | |||
640 | next_req = list_entry(req->node.next, | ||
641 | typeof(*next_req), node); | ||
642 | tegra_dma_update_hw_partial(ch, next_req); | ||
643 | } | ||
644 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; | ||
645 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
646 | /* DMA lock is NOT held when callback is called */ | ||
647 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
648 | if (likely(req->threshold)) | ||
649 | req->threshold(req); | ||
650 | return; | ||
651 | |||
652 | } else if (req->buffer_status == | ||
653 | TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) { | ||
654 | /* Callback when the buffer is completely full (i.e on | ||
655 | * the second interrupt */ | ||
656 | int bytes_transferred; | ||
657 | |||
658 | bytes_transferred = ch->req_transfer_count; | ||
659 | bytes_transferred += 1; | ||
660 | bytes_transferred <<= 3; | ||
661 | |||
662 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | ||
663 | req->bytes_transferred = bytes_transferred; | ||
664 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
665 | list_del(&req->node); | ||
666 | |||
667 | /* DMA lock is NOT held when callbak is called */ | ||
668 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
669 | req->complete(req); | ||
670 | return; | ||
671 | |||
672 | } else { | ||
673 | BUG(); | ||
674 | } | ||
675 | } | ||
676 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
677 | } | ||
678 | |||
679 | static irqreturn_t dma_isr(int irq, void *data) | ||
680 | { | ||
681 | struct tegra_dma_channel *ch = data; | ||
682 | unsigned long status; | ||
683 | |||
684 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
685 | if (status & STA_ISE_EOC) | ||
686 | writel(status, ch->addr + APB_DMA_CHAN_STA); | ||
687 | else { | ||
688 | pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id); | ||
689 | return IRQ_HANDLED; | ||
690 | } | ||
691 | return IRQ_WAKE_THREAD; | ||
692 | } | ||
693 | |||
694 | static irqreturn_t dma_thread_fn(int irq, void *data) | ||
695 | { | ||
696 | struct tegra_dma_channel *ch = data; | ||
697 | |||
698 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) | ||
699 | handle_oneshot_dma(ch); | ||
700 | else | ||
701 | handle_continuous_dma(ch); | ||
702 | |||
703 | |||
704 | return IRQ_HANDLED; | ||
705 | } | ||
706 | |||
707 | int __init tegra_dma_init(void) | ||
708 | { | ||
709 | int ret = 0; | ||
710 | int i; | ||
711 | unsigned int irq; | ||
712 | void __iomem *addr; | ||
713 | struct clk *c; | ||
714 | |||
715 | bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS); | ||
716 | |||
717 | c = clk_get_sys("tegra-apbdma", NULL); | ||
718 | if (IS_ERR(c)) { | ||
719 | pr_err("Unable to get clock for APB DMA\n"); | ||
720 | ret = PTR_ERR(c); | ||
721 | goto fail; | ||
722 | } | ||
723 | ret = clk_prepare_enable(c); | ||
724 | if (ret != 0) { | ||
725 | pr_err("Unable to enable clock for APB DMA\n"); | ||
726 | goto fail; | ||
727 | } | ||
728 | |||
729 | addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | ||
730 | writel(GEN_ENABLE, addr + APB_DMA_GEN); | ||
731 | writel(0, addr + APB_DMA_CNTRL); | ||
732 | writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), | ||
733 | addr + APB_DMA_IRQ_MASK_SET); | ||
734 | |||
735 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | ||
736 | struct tegra_dma_channel *ch = &dma_channels[i]; | ||
737 | |||
738 | ch->id = i; | ||
739 | snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); | ||
740 | |||
741 | ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | ||
742 | TEGRA_APB_DMA_CH0_SIZE * i); | ||
743 | |||
744 | spin_lock_init(&ch->lock); | ||
745 | INIT_LIST_HEAD(&ch->list); | ||
746 | |||
747 | irq = INT_APB_DMA_CH0 + i; | ||
748 | ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, | ||
749 | dma_channels[i].name, ch); | ||
750 | if (ret) { | ||
751 | pr_err("Failed to register IRQ %d for DMA %d\n", | ||
752 | irq, i); | ||
753 | goto fail; | ||
754 | } | ||
755 | ch->irq = irq; | ||
756 | |||
757 | __clear_bit(i, channel_usage); | ||
758 | } | ||
759 | /* mark the shared channel allocated */ | ||
760 | __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); | ||
761 | |||
762 | tegra_dma_initialized = true; | ||
763 | |||
764 | return 0; | ||
765 | fail: | ||
766 | writel(0, addr + APB_DMA_GEN); | ||
767 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | ||
768 | struct tegra_dma_channel *ch = &dma_channels[i]; | ||
769 | if (ch->irq) | ||
770 | free_irq(ch->irq, ch); | ||
771 | } | ||
772 | return ret; | ||
773 | } | ||
774 | postcore_initcall(tegra_dma_init); | ||
775 | |||
776 | #ifdef CONFIG_PM | ||
777 | static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; | ||
778 | |||
779 | void tegra_dma_suspend(void) | ||
780 | { | ||
781 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | ||
782 | u32 *ctx = apb_dma; | ||
783 | int i; | ||
784 | |||
785 | *ctx++ = readl(addr + APB_DMA_GEN); | ||
786 | *ctx++ = readl(addr + APB_DMA_CNTRL); | ||
787 | *ctx++ = readl(addr + APB_DMA_IRQ_MASK); | ||
788 | |||
789 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | ||
790 | addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | ||
791 | TEGRA_APB_DMA_CH0_SIZE * i); | ||
792 | |||
793 | *ctx++ = readl(addr + APB_DMA_CHAN_CSR); | ||
794 | *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR); | ||
795 | *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ); | ||
796 | *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR); | ||
797 | *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ); | ||
798 | } | ||
799 | } | ||
800 | |||
801 | void tegra_dma_resume(void) | ||
802 | { | ||
803 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | ||
804 | u32 *ctx = apb_dma; | ||
805 | int i; | ||
806 | |||
807 | writel(*ctx++, addr + APB_DMA_GEN); | ||
808 | writel(*ctx++, addr + APB_DMA_CNTRL); | ||
809 | writel(*ctx++, addr + APB_DMA_IRQ_MASK); | ||
810 | |||
811 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | ||
812 | addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | ||
813 | TEGRA_APB_DMA_CH0_SIZE * i); | ||
814 | |||
815 | writel(*ctx++, addr + APB_DMA_CHAN_CSR); | ||
816 | writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR); | ||
817 | writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ); | ||
818 | writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR); | ||
819 | writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ); | ||
820 | } | ||
821 | } | ||
822 | |||
823 | #endif | ||
diff --git a/arch/arm/mach-tegra/include/mach/dma.h b/arch/arm/mach-tegra/include/mach/dma.h index 9077092812c0..3081cc6dda3b 100644 --- a/arch/arm/mach-tegra/include/mach/dma.h +++ b/arch/arm/mach-tegra/include/mach/dma.h | |||
@@ -51,101 +51,4 @@ | |||
51 | #define TEGRA_DMA_REQ_SEL_OWR 25 | 51 | #define TEGRA_DMA_REQ_SEL_OWR 25 |
52 | #define TEGRA_DMA_REQ_SEL_INVALID 31 | 52 | #define TEGRA_DMA_REQ_SEL_INVALID 31 |
53 | 53 | ||
54 | struct tegra_dma_req; | ||
55 | struct tegra_dma_channel; | ||
56 | |||
57 | enum tegra_dma_mode { | ||
58 | TEGRA_DMA_SHARED = 1, | ||
59 | TEGRA_DMA_MODE_CONTINOUS = 2, | ||
60 | TEGRA_DMA_MODE_ONESHOT = 4, | ||
61 | }; | ||
62 | |||
63 | enum tegra_dma_req_error { | ||
64 | TEGRA_DMA_REQ_SUCCESS = 0, | ||
65 | TEGRA_DMA_REQ_ERROR_ABORTED, | ||
66 | TEGRA_DMA_REQ_INFLIGHT, | ||
67 | }; | ||
68 | |||
69 | enum tegra_dma_req_buff_status { | ||
70 | TEGRA_DMA_REQ_BUF_STATUS_EMPTY = 0, | ||
71 | TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL, | ||
72 | TEGRA_DMA_REQ_BUF_STATUS_FULL, | ||
73 | }; | ||
74 | |||
75 | struct tegra_dma_req { | ||
76 | struct list_head node; | ||
77 | unsigned int modid; | ||
78 | int instance; | ||
79 | |||
80 | /* Called when the req is complete and from the DMA ISR context. | ||
81 | * When this is called the req structure is no longer queued by | ||
82 | * the DMA channel. | ||
83 | * | ||
84 | * State of the DMA depends on the number of req it has. If there are | ||
85 | * no DMA requests queued up, then it will STOP the DMA. It there are | ||
86 | * more requests in the DMA, then it will queue the next request. | ||
87 | */ | ||
88 | void (*complete)(struct tegra_dma_req *req); | ||
89 | |||
90 | /* This is a called from the DMA ISR context when the DMA is still in | ||
91 | * progress and is actively filling same buffer. | ||
92 | * | ||
93 | * In case of continuous mode receive, this threshold is 1/2 the buffer | ||
94 | * size. In other cases, this will not even be called as there is no | ||
95 | * hardware support for it. | ||
96 | * | ||
97 | * In the case of continuous mode receive, if there is next req already | ||
98 | * queued, DMA programs the HW to use that req when this req is | ||
99 | * completed. If there is no "next req" queued, then DMA ISR doesn't do | ||
100 | * anything before calling this callback. | ||
101 | * | ||
102 | * This is mainly used by the cases, where the clients has queued | ||
103 | * only one req and want to get some sort of DMA threshold | ||
104 | * callback to program the next buffer. | ||
105 | * | ||
106 | */ | ||
107 | void (*threshold)(struct tegra_dma_req *req); | ||
108 | |||
109 | /* 1 to copy to memory. | ||
110 | * 0 to copy from the memory to device FIFO */ | ||
111 | int to_memory; | ||
112 | |||
113 | void *virt_addr; | ||
114 | |||
115 | unsigned long source_addr; | ||
116 | unsigned long dest_addr; | ||
117 | unsigned long dest_wrap; | ||
118 | unsigned long source_wrap; | ||
119 | unsigned long source_bus_width; | ||
120 | unsigned long dest_bus_width; | ||
121 | unsigned long req_sel; | ||
122 | unsigned int size; | ||
123 | |||
124 | /* Updated by the DMA driver on the conpletion of the request. */ | ||
125 | int bytes_transferred; | ||
126 | int status; | ||
127 | |||
128 | /* DMA completion tracking information */ | ||
129 | int buffer_status; | ||
130 | |||
131 | /* Client specific data */ | ||
132 | void *dev; | ||
133 | }; | ||
134 | |||
135 | int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, | ||
136 | struct tegra_dma_req *req); | ||
137 | int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | ||
138 | struct tegra_dma_req *req); | ||
139 | void tegra_dma_dequeue(struct tegra_dma_channel *ch); | ||
140 | void tegra_dma_flush(struct tegra_dma_channel *ch); | ||
141 | |||
142 | bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch, | ||
143 | struct tegra_dma_req *req); | ||
144 | bool tegra_dma_is_empty(struct tegra_dma_channel *ch); | ||
145 | |||
146 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode); | ||
147 | void tegra_dma_free_channel(struct tegra_dma_channel *ch); | ||
148 | |||
149 | int __init tegra_dma_init(void); | ||
150 | |||
151 | #endif | 54 | #endif |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 119bc52ab93e..4e07eec1270d 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, | |||
63 | pid = task_pid_nr(thread->task) << ASID_BITS; | 63 | pid = task_pid_nr(thread->task) << ASID_BITS; |
64 | asm volatile( | 64 | asm volatile( |
65 | " mrc p15, 0, %0, c13, c0, 1\n" | 65 | " mrc p15, 0, %0, c13, c0, 1\n" |
66 | " bfi %1, %0, #0, %2\n" | 66 | " and %0, %0, %2\n" |
67 | " mcr p15, 0, %1, c13, c0, 1\n" | 67 | " orr %0, %0, %1\n" |
68 | " mcr p15, 0, %0, c13, c0, 1\n" | ||
68 | : "=r" (contextidr), "+r" (pid) | 69 | : "=r" (contextidr), "+r" (pid) |
69 | : "I" (ASID_BITS)); | 70 | : "I" (~ASID_MASK)); |
70 | isb(); | 71 | isb(); |
71 | 72 | ||
72 | return NOTIFY_OK; | 73 | return NOTIFY_OK; |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 051204fc4617..e59c4ab71bcb 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -489,7 +489,7 @@ static bool __in_atomic_pool(void *start, size_t size) | |||
489 | void *pool_start = pool->vaddr; | 489 | void *pool_start = pool->vaddr; |
490 | void *pool_end = pool->vaddr + pool->size; | 490 | void *pool_end = pool->vaddr + pool->size; |
491 | 491 | ||
492 | if (start < pool_start || start > pool_end) | 492 | if (start < pool_start || start >= pool_end) |
493 | return false; | 493 | return false; |
494 | 494 | ||
495 | if (end <= pool_end) | 495 | if (end <= pool_end) |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index a7a9e41fa2c2..18144e6a3115 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -990,8 +990,8 @@ void __init sanity_check_meminfo(void) | |||
990 | * Check whether this memory bank would partially overlap | 990 | * Check whether this memory bank would partially overlap |
991 | * the vmalloc area. | 991 | * the vmalloc area. |
992 | */ | 992 | */ |
993 | if (__va(bank->start + bank->size) > vmalloc_min || | 993 | if (__va(bank->start + bank->size - 1) >= vmalloc_min || |
994 | __va(bank->start + bank->size) < __va(bank->start)) { | 994 | __va(bank->start + bank->size - 1) <= __va(bank->start)) { |
995 | unsigned long newsize = vmalloc_min - __va(bank->start); | 995 | unsigned long newsize = vmalloc_min - __va(bank->start); |
996 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " | 996 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " |
997 | "to -%.8llx (vmalloc region overlap).\n", | 997 | "to -%.8llx (vmalloc region overlap).\n", |
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index d861aa73299d..28acb383e7df 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c | |||
@@ -67,6 +67,7 @@ | |||
67 | 67 | ||
68 | static unsigned long omap_sram_start; | 68 | static unsigned long omap_sram_start; |
69 | static void __iomem *omap_sram_base; | 69 | static void __iomem *omap_sram_base; |
70 | static unsigned long omap_sram_skip; | ||
70 | static unsigned long omap_sram_size; | 71 | static unsigned long omap_sram_size; |
71 | static void __iomem *omap_sram_ceil; | 72 | static void __iomem *omap_sram_ceil; |
72 | 73 | ||
@@ -105,6 +106,7 @@ static int is_sram_locked(void) | |||
105 | */ | 106 | */ |
106 | static void __init omap_detect_sram(void) | 107 | static void __init omap_detect_sram(void) |
107 | { | 108 | { |
109 | omap_sram_skip = SRAM_BOOTLOADER_SZ; | ||
108 | if (cpu_class_is_omap2()) { | 110 | if (cpu_class_is_omap2()) { |
109 | if (is_sram_locked()) { | 111 | if (is_sram_locked()) { |
110 | if (cpu_is_omap34xx()) { | 112 | if (cpu_is_omap34xx()) { |
@@ -112,6 +114,7 @@ static void __init omap_detect_sram(void) | |||
112 | if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) || | 114 | if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) || |
113 | (omap_type() == OMAP2_DEVICE_TYPE_SEC)) { | 115 | (omap_type() == OMAP2_DEVICE_TYPE_SEC)) { |
114 | omap_sram_size = 0x7000; /* 28K */ | 116 | omap_sram_size = 0x7000; /* 28K */ |
117 | omap_sram_skip += SZ_16K; | ||
115 | } else { | 118 | } else { |
116 | omap_sram_size = 0x8000; /* 32K */ | 119 | omap_sram_size = 0x8000; /* 32K */ |
117 | } | 120 | } |
@@ -174,8 +177,10 @@ static void __init omap_map_sram(void) | |||
174 | return; | 177 | return; |
175 | 178 | ||
176 | #ifdef CONFIG_OMAP4_ERRATA_I688 | 179 | #ifdef CONFIG_OMAP4_ERRATA_I688 |
180 | if (cpu_is_omap44xx()) { | ||
177 | omap_sram_start += PAGE_SIZE; | 181 | omap_sram_start += PAGE_SIZE; |
178 | omap_sram_size -= SZ_16K; | 182 | omap_sram_size -= SZ_16K; |
183 | } | ||
179 | #endif | 184 | #endif |
180 | if (cpu_is_omap34xx()) { | 185 | if (cpu_is_omap34xx()) { |
181 | /* | 186 | /* |
@@ -202,8 +207,8 @@ static void __init omap_map_sram(void) | |||
202 | * Looks like we need to preserve some bootloader code at the | 207 | * Looks like we need to preserve some bootloader code at the |
203 | * beginning of SRAM for jumping to flash for reboot to work... | 208 | * beginning of SRAM for jumping to flash for reboot to work... |
204 | */ | 209 | */ |
205 | memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0, | 210 | memset_io(omap_sram_base + omap_sram_skip, 0, |
206 | omap_sram_size - SRAM_BOOTLOADER_SZ); | 211 | omap_sram_size - omap_sram_skip); |
207 | } | 212 | } |
208 | 213 | ||
209 | /* | 214 | /* |
@@ -217,7 +222,7 @@ void *omap_sram_push_address(unsigned long size) | |||
217 | { | 222 | { |
218 | unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; | 223 | unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; |
219 | 224 | ||
220 | available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ); | 225 | available = omap_sram_ceil - (omap_sram_base + omap_sram_skip); |
221 | 226 | ||
222 | if (size > available) { | 227 | if (size > available) { |
223 | pr_err("Not enough space in SRAM\n"); | 228 | pr_err("Not enough space in SRAM\n"); |