aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/iommu.c17
-rw-r--r--arch/powerpc/mm/slb.c10
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c1
-rw-r--r--arch/sparc/kernel/time.c9
-rw-r--r--arch/sparc64/kernel/ktlb.S6
5 files changed, 29 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 2d0c9ef555e9..79a85d656871 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -278,6 +278,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
278 unsigned long flags; 278 unsigned long flags;
279 struct scatterlist *s, *outs, *segstart; 279 struct scatterlist *s, *outs, *segstart;
280 int outcount, incount, i; 280 int outcount, incount, i;
281 unsigned int align;
281 unsigned long handle; 282 unsigned long handle;
282 283
283 BUG_ON(direction == DMA_NONE); 284 BUG_ON(direction == DMA_NONE);
@@ -309,7 +310,12 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
309 /* Allocate iommu entries for that segment */ 310 /* Allocate iommu entries for that segment */
310 vaddr = (unsigned long) sg_virt(s); 311 vaddr = (unsigned long) sg_virt(s);
311 npages = iommu_num_pages(vaddr, slen); 312 npages = iommu_num_pages(vaddr, slen);
312 entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0); 313 align = 0;
314 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
315 (vaddr & ~PAGE_MASK) == 0)
316 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
317 entry = iommu_range_alloc(tbl, npages, &handle,
318 mask >> IOMMU_PAGE_SHIFT, align);
313 319
314 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 320 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
315 321
@@ -572,7 +578,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
572{ 578{
573 dma_addr_t dma_handle = DMA_ERROR_CODE; 579 dma_addr_t dma_handle = DMA_ERROR_CODE;
574 unsigned long uaddr; 580 unsigned long uaddr;
575 unsigned int npages; 581 unsigned int npages, align;
576 582
577 BUG_ON(direction == DMA_NONE); 583 BUG_ON(direction == DMA_NONE);
578 584
@@ -580,8 +586,13 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
580 npages = iommu_num_pages(uaddr, size); 586 npages = iommu_num_pages(uaddr, size);
581 587
582 if (tbl) { 588 if (tbl) {
589 align = 0;
590 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
591 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
592 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
593
583 dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 594 dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
584 mask >> IOMMU_PAGE_SHIFT, 0); 595 mask >> IOMMU_PAGE_SHIFT, align);
585 if (dma_handle == DMA_ERROR_CODE) { 596 if (dma_handle == DMA_ERROR_CODE) {
586 if (printk_ratelimit()) { 597 if (printk_ratelimit()) {
587 printk(KERN_INFO "iommu_alloc failed, " 598 printk(KERN_INFO "iommu_alloc failed, "
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a282bc212e80..50d7372bc2ce 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -82,14 +82,6 @@ static inline void slb_shadow_clear(unsigned long entry)
82 get_slb_shadow()->save_area[entry].esid = 0; 82 get_slb_shadow()->save_area[entry].esid = 0;
83} 83}
84 84
85void slb_shadow_clear_all(void)
86{
87 int i;
88
89 for (i = 0; i < SLB_NUM_BOLTED; i++)
90 slb_shadow_clear(i);
91}
92
93static inline void create_shadowed_slbe(unsigned long ea, int ssize, 85static inline void create_shadowed_slbe(unsigned long ea, int ssize,
94 unsigned long flags, 86 unsigned long flags,
95 unsigned long entry) 87 unsigned long entry)
@@ -300,6 +292,8 @@ void slb_initialize(void)
300 292
301 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); 293 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
302 294
295 slb_shadow_clear(2);
296
303 /* We don't bolt the stack for the time being - we're in boot, 297 /* We don't bolt the stack for the time being - we're in boot,
304 * so the stack is in the bolted segment. By the time it goes 298 * so the stack is in the bolted segment. By the time it goes
305 * elsewhere, we'll call _switch() which will bolt in the new 299 * elsewhere, we'll call _switch() which will bolt in the new
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 34317aa148a8..9a455d46379d 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -272,7 +272,6 @@ void vpa_init(int cpu)
272 */ 272 */
273 addr = __pa(&slb_shadow[cpu]); 273 addr = __pa(&slb_shadow[cpu]);
274 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 274 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
275 slb_shadow_clear_all();
276 ret = register_slb_shadow(hwcpu, addr); 275 ret = register_slb_shadow(hwcpu, addr);
277 if (ret) 276 if (ret)
278 printk(KERN_ERR 277 printk(KERN_ERR
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 45cb7c5286d7..00b393c3a4a0 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -436,7 +436,14 @@ void __init time_init(void)
436 436
437static inline unsigned long do_gettimeoffset(void) 437static inline unsigned long do_gettimeoffset(void)
438{ 438{
439 return (*master_l10_counter >> 10) & 0x1fffff; 439 unsigned long val = *master_l10_counter;
440 unsigned long usec = (val >> 10) & 0x1fffff;
441
442 /* Limit hit? */
443 if (val & 0x80000000)
444 usec += 1000000 / HZ;
445
446 return usec;
440} 447}
441 448
442/* Ok, my cute asm atomicity trick doesn't work anymore. 449/* Ok, my cute asm atomicity trick doesn't work anymore.
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 964527d2ffa0..cef8defcd7a9 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -1,6 +1,6 @@
1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling. 1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
2 * 2 *
3 * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) 4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -226,6 +226,7 @@ kvmap_dtlb_load:
226 ba,pt %xcc, sun4v_dtlb_load 226 ba,pt %xcc, sun4v_dtlb_load
227 mov %g5, %g3 227 mov %g5, %g3
228 228
229#ifdef CONFIG_SPARSEMEM_VMEMMAP
229kvmap_vmemmap: 230kvmap_vmemmap:
230 sub %g4, %g5, %g5 231 sub %g4, %g5, %g5
231 srlx %g5, 22, %g5 232 srlx %g5, 22, %g5
@@ -234,6 +235,7 @@ kvmap_vmemmap:
234 or %g1, %lo(vmemmap_table), %g1 235 or %g1, %lo(vmemmap_table), %g1
235 ba,pt %xcc, kvmap_dtlb_load 236 ba,pt %xcc, kvmap_dtlb_load
236 ldx [%g1 + %g5], %g5 237 ldx [%g1 + %g5], %g5
238#endif
237 239
238kvmap_dtlb_nonlinear: 240kvmap_dtlb_nonlinear:
239 /* Catch kernel NULL pointer derefs. */ 241 /* Catch kernel NULL pointer derefs. */
@@ -242,12 +244,14 @@ kvmap_dtlb_nonlinear:
242 bleu,pn %xcc, kvmap_dtlb_longpath 244 bleu,pn %xcc, kvmap_dtlb_longpath
243 nop 245 nop
244 246
247#ifdef CONFIG_SPARSEMEM_VMEMMAP
245 /* Do not use the TSB for vmemmap. */ 248 /* Do not use the TSB for vmemmap. */
246 mov (VMEMMAP_BASE >> 24), %g5 249 mov (VMEMMAP_BASE >> 24), %g5
247 sllx %g5, 24, %g5 250 sllx %g5, 24, %g5
248 cmp %g4,%g5 251 cmp %g4,%g5
249 bgeu,pn %xcc, kvmap_vmemmap 252 bgeu,pn %xcc, kvmap_vmemmap
250 nop 253 nop
254#endif
251 255
252 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) 256 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
253 257