aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/copyuser_64.S17
-rw-r--r--arch/powerpc/lib/dma-noncoherent.c25
-rw-r--r--arch/powerpc/lib/memcpy_64.S16
3 files changed, 41 insertions, 17 deletions
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 25ec5378afa4..70693a5c12a1 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -26,11 +26,24 @@ _GLOBAL(__copy_tofrom_user)
26 andi. r6,r6,7 26 andi. r6,r6,7
27 PPC_MTOCRF 0x01,r5 27 PPC_MTOCRF 0x01,r5
28 blt cr1,.Lshort_copy 28 blt cr1,.Lshort_copy
29/* Below we want to nop out the bne if we're on a CPU that has the
30 * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
31 * cleared.
32 * At the time of writing the only CPU that has this combination of bits
33 * set is Power6.
34 */
35BEGIN_FTR_SECTION
36 nop
37FTR_SECTION_ELSE
29 bne .Ldst_unaligned 38 bne .Ldst_unaligned
39ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
40 CPU_FTR_UNALIGNED_LD_STD)
30.Ldst_aligned: 41.Ldst_aligned:
31 andi. r0,r4,7
32 addi r3,r3,-16 42 addi r3,r3,-16
43BEGIN_FTR_SECTION
44 andi. r0,r4,7
33 bne .Lsrc_unaligned 45 bne .Lsrc_unaligned
46END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
34 srdi r7,r5,4 47 srdi r7,r5,4
3520: ld r9,0(r4) 4820: ld r9,0(r4)
36 addi r4,r4,-8 49 addi r4,r4,-8
@@ -138,7 +151,7 @@ _GLOBAL(__copy_tofrom_user)
138 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ 151 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */
139 subf r5,r6,r5 152 subf r5,r6,r5
140 li r7,0 153 li r7,0
141 cmpldi r1,r5,16 154 cmpldi cr1,r5,16
142 bf cr7*4+3,1f 155 bf cr7*4+3,1f
14335: lbz r0,0(r4) 15635: lbz r0,0(r4)
14481: stb r0,0(r3) 15781: stb r0,0(r3)
diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c
index 31734c0969cd..b7dc4c19f582 100644
--- a/arch/powerpc/lib/dma-noncoherent.c
+++ b/arch/powerpc/lib/dma-noncoherent.c
@@ -77,26 +77,26 @@ static DEFINE_SPINLOCK(consistent_lock);
77 * the amount of RAM found at boot time.) I would imagine that get_vm_area() 77 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
78 * would have to initialise this each time prior to calling vm_region_alloc(). 78 * would have to initialise this each time prior to calling vm_region_alloc().
79 */ 79 */
80struct vm_region { 80struct ppc_vm_region {
81 struct list_head vm_list; 81 struct list_head vm_list;
82 unsigned long vm_start; 82 unsigned long vm_start;
83 unsigned long vm_end; 83 unsigned long vm_end;
84}; 84};
85 85
86static struct vm_region consistent_head = { 86static struct ppc_vm_region consistent_head = {
87 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), 87 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
88 .vm_start = CONSISTENT_BASE, 88 .vm_start = CONSISTENT_BASE,
89 .vm_end = CONSISTENT_END, 89 .vm_end = CONSISTENT_END,
90}; 90};
91 91
92static struct vm_region * 92static struct ppc_vm_region *
93vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) 93ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
94{ 94{
95 unsigned long addr = head->vm_start, end = head->vm_end - size; 95 unsigned long addr = head->vm_start, end = head->vm_end - size;
96 unsigned long flags; 96 unsigned long flags;
97 struct vm_region *c, *new; 97 struct ppc_vm_region *c, *new;
98 98
99 new = kmalloc(sizeof(struct vm_region), gfp); 99 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
100 if (!new) 100 if (!new)
101 goto out; 101 goto out;
102 102
@@ -130,9 +130,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
130 return NULL; 130 return NULL;
131} 131}
132 132
133static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) 133static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
134{ 134{
135 struct vm_region *c; 135 struct ppc_vm_region *c;
136 136
137 list_for_each_entry(c, &head->vm_list, vm_list) { 137 list_for_each_entry(c, &head->vm_list, vm_list) {
138 if (c->vm_start == addr) 138 if (c->vm_start == addr)
@@ -151,7 +151,7 @@ void *
151__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) 151__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
152{ 152{
153 struct page *page; 153 struct page *page;
154 struct vm_region *c; 154 struct ppc_vm_region *c;
155 unsigned long order; 155 unsigned long order;
156 u64 mask = 0x00ffffff, limit; /* ISA default */ 156 u64 mask = 0x00ffffff, limit; /* ISA default */
157 157
@@ -191,7 +191,7 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
191 /* 191 /*
192 * Allocate a virtual address in the consistent mapping region. 192 * Allocate a virtual address in the consistent mapping region.
193 */ 193 */
194 c = vm_region_alloc(&consistent_head, size, 194 c = ppc_vm_region_alloc(&consistent_head, size,
195 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 195 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
196 if (c) { 196 if (c) {
197 unsigned long vaddr = c->vm_start; 197 unsigned long vaddr = c->vm_start;
@@ -239,7 +239,7 @@ EXPORT_SYMBOL(__dma_alloc_coherent);
239 */ 239 */
240void __dma_free_coherent(size_t size, void *vaddr) 240void __dma_free_coherent(size_t size, void *vaddr)
241{ 241{
242 struct vm_region *c; 242 struct ppc_vm_region *c;
243 unsigned long flags, addr; 243 unsigned long flags, addr;
244 pte_t *ptep; 244 pte_t *ptep;
245 245
@@ -247,7 +247,7 @@ void __dma_free_coherent(size_t size, void *vaddr)
247 247
248 spin_lock_irqsave(&consistent_lock, flags); 248 spin_lock_irqsave(&consistent_lock, flags);
249 249
250 c = vm_region_find(&consistent_head, (unsigned long)vaddr); 250 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
251 if (!c) 251 if (!c)
252 goto no_area; 252 goto no_area;
253 253
@@ -320,7 +320,6 @@ static int __init dma_alloc_init(void)
320 ret = -ENOMEM; 320 ret = -ENOMEM;
321 break; 321 break;
322 } 322 }
323 WARN_ON(!pmd_none(*pmd));
324 323
325 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); 324 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
326 if (!pte) { 325 if (!pte) {
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 3f131129d1c1..fe2d34e5332d 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -18,11 +18,23 @@ _GLOBAL(memcpy)
18 andi. r6,r6,7 18 andi. r6,r6,7
19 dcbt 0,r4 19 dcbt 0,r4
20 blt cr1,.Lshort_copy 20 blt cr1,.Lshort_copy
21/* Below we want to nop out the bne if we're on a CPU that has the
22 CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
23 cleared.
24 At the time of writing the only CPU that has this combination of bits
25 set is Power6. */
26BEGIN_FTR_SECTION
27 nop
28FTR_SECTION_ELSE
21 bne .Ldst_unaligned 29 bne .Ldst_unaligned
30ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
31 CPU_FTR_UNALIGNED_LD_STD)
22.Ldst_aligned: 32.Ldst_aligned:
23 andi. r0,r4,7
24 addi r3,r3,-16 33 addi r3,r3,-16
34BEGIN_FTR_SECTION
35 andi. r0,r4,7
25 bne .Lsrc_unaligned 36 bne .Lsrc_unaligned
37END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
26 srdi r7,r5,4 38 srdi r7,r5,4
27 ld r9,0(r4) 39 ld r9,0(r4)
28 addi r4,r4,-8 40 addi r4,r4,-8
@@ -131,7 +143,7 @@ _GLOBAL(memcpy)
131 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 143 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7
132 subf r5,r6,r5 144 subf r5,r6,r5
133 li r7,0 145 li r7,0
134 cmpldi r1,r5,16 146 cmpldi cr1,r5,16
135 bf cr7*4+3,1f 147 bf cr7*4+3,1f
136 lbz r0,0(r4) 148 lbz r0,0(r4)
137 stb r0,0(r3) 149 stb r0,0(r3)