aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/iommu.c12
-rw-r--r--arch/sparc64/kernel/iommu_common.h18
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c12
-rw-r--r--arch/sparc64/mm/tlb.c7
4 files changed, 31 insertions, 18 deletions
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index b781d3d54fb8..756fa24eeefa 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -516,9 +516,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
516 unsigned long flags, handle, prot, ctx; 516 unsigned long flags, handle, prot, ctx;
517 dma_addr_t dma_next = 0, dma_addr; 517 dma_addr_t dma_next = 0, dma_addr;
518 unsigned int max_seg_size; 518 unsigned int max_seg_size;
519 unsigned long seg_boundary_size;
519 int outcount, incount, i; 520 int outcount, incount, i;
520 struct strbuf *strbuf; 521 struct strbuf *strbuf;
521 struct iommu *iommu; 522 struct iommu *iommu;
523 unsigned long base_shift;
522 524
523 BUG_ON(direction == DMA_NONE); 525 BUG_ON(direction == DMA_NONE);
524 526
@@ -549,8 +551,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
549 outs->dma_length = 0; 551 outs->dma_length = 0;
550 552
551 max_seg_size = dma_get_max_seg_size(dev); 553 max_seg_size = dma_get_max_seg_size(dev);
554 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
555 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
556 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
552 for_each_sg(sglist, s, nelems, i) { 557 for_each_sg(sglist, s, nelems, i) {
553 unsigned long paddr, npages, entry, slen; 558 unsigned long paddr, npages, entry, out_entry = 0, slen;
554 iopte_t *base; 559 iopte_t *base;
555 560
556 slen = s->length; 561 slen = s->length;
@@ -593,7 +598,9 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
593 * - allocated dma_addr isn't contiguous to previous allocation 598 * - allocated dma_addr isn't contiguous to previous allocation
594 */ 599 */
595 if ((dma_addr != dma_next) || 600 if ((dma_addr != dma_next) ||
596 (outs->dma_length + s->length > max_seg_size)) { 601 (outs->dma_length + s->length > max_seg_size) ||
602 (is_span_boundary(out_entry, base_shift,
603 seg_boundary_size, outs, s))) {
597 /* Can't merge: create a new segment */ 604 /* Can't merge: create a new segment */
598 segstart = s; 605 segstart = s;
599 outcount++; 606 outcount++;
@@ -607,6 +614,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
607 /* This is a new segment, fill entries */ 614 /* This is a new segment, fill entries */
608 outs->dma_address = dma_addr; 615 outs->dma_address = dma_addr;
609 outs->dma_length = slen; 616 outs->dma_length = slen;
617 out_entry = entry;
610 } 618 }
611 619
612 /* Calculate next page pointer for contiguous check */ 620 /* Calculate next page pointer for contiguous check */
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
index 0713bd58499c..f3575a614fa2 100644
--- a/arch/sparc64/kernel/iommu_common.h
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/scatterlist.h> 13#include <linux/scatterlist.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/iommu-helper.h>
15 16
16#include <asm/iommu.h> 17#include <asm/iommu.h>
17#include <asm/scatterlist.h> 18#include <asm/scatterlist.h>
@@ -45,17 +46,16 @@ static inline unsigned long iommu_num_pages(unsigned long vaddr,
45 return npages; 46 return npages;
46} 47}
47 48
48static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems) 49static inline int is_span_boundary(unsigned long entry,
50 unsigned long shift,
51 unsigned long boundary_size,
52 struct scatterlist *outs,
53 struct scatterlist *sg)
49{ 54{
50 unsigned long i, npages = 0; 55 unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs);
51 struct scatterlist *sg; 56 int nr = iommu_num_pages(paddr, outs->dma_length + sg->length);
52 57
53 for_each_sg(sglist, sg, nelems, i) { 58 return iommu_is_span_boundary(entry, nr, shift, boundary_size);
54 unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
55 npages += iommu_num_pages(paddr, sg->length);
56 }
57
58 return npages;
59} 59}
60 60
61extern unsigned long iommu_range_alloc(struct device *dev, 61extern unsigned long iommu_range_alloc(struct device *dev,
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index ddca6c6c0b49..01839706bd52 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -335,8 +335,10 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
335 unsigned long flags, handle, prot; 335 unsigned long flags, handle, prot;
336 dma_addr_t dma_next = 0, dma_addr; 336 dma_addr_t dma_next = 0, dma_addr;
337 unsigned int max_seg_size; 337 unsigned int max_seg_size;
338 unsigned long seg_boundary_size;
338 int outcount, incount, i; 339 int outcount, incount, i;
339 struct iommu *iommu; 340 struct iommu *iommu;
341 unsigned long base_shift;
340 long err; 342 long err;
341 343
342 BUG_ON(direction == DMA_NONE); 344 BUG_ON(direction == DMA_NONE);
@@ -362,8 +364,11 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
362 iommu_batch_start(dev, prot, ~0UL); 364 iommu_batch_start(dev, prot, ~0UL);
363 365
364 max_seg_size = dma_get_max_seg_size(dev); 366 max_seg_size = dma_get_max_seg_size(dev);
367 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
368 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
369 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
365 for_each_sg(sglist, s, nelems, i) { 370 for_each_sg(sglist, s, nelems, i) {
366 unsigned long paddr, npages, entry, slen; 371 unsigned long paddr, npages, entry, out_entry = 0, slen;
367 372
368 slen = s->length; 373 slen = s->length;
369 /* Sanity check */ 374 /* Sanity check */
@@ -406,7 +411,9 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
406 * - allocated dma_addr isn't contiguous to previous allocation 411 * - allocated dma_addr isn't contiguous to previous allocation
407 */ 412 */
408 if ((dma_addr != dma_next) || 413 if ((dma_addr != dma_next) ||
409 (outs->dma_length + s->length > max_seg_size)) { 414 (outs->dma_length + s->length > max_seg_size) ||
415 (is_span_boundary(out_entry, base_shift,
416 seg_boundary_size, outs, s))) {
410 /* Can't merge: create a new segment */ 417 /* Can't merge: create a new segment */
411 segstart = s; 418 segstart = s;
412 outcount++; 419 outcount++;
@@ -420,6 +427,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
420 /* This is a new segment, fill entries */ 427 /* This is a new segment, fill entries */
421 outs->dma_address = dma_addr; 428 outs->dma_address = dma_addr;
422 outs->dma_length = slen; 429 outs->dma_length = slen;
430 out_entry = entry;
423 } 431 }
424 432
425 /* Calculate next page pointer for contiguous check */ 433 /* Calculate next page pointer for contiguous check */
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
index a0f000b293de..ae24919cba7c 100644
--- a/arch/sparc64/mm/tlb.c
+++ b/arch/sparc64/mm/tlb.c
@@ -23,11 +23,8 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
23 23
24void flush_tlb_pending(void) 24void flush_tlb_pending(void)
25{ 25{
26 struct mmu_gather *mp; 26 struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
27 27
28 preempt_disable();
29
30 mp = &__get_cpu_var(mmu_gathers);
31 if (mp->tlb_nr) { 28 if (mp->tlb_nr) {
32 flush_tsb_user(mp); 29 flush_tsb_user(mp);
33 30
@@ -43,7 +40,7 @@ void flush_tlb_pending(void)
43 mp->tlb_nr = 0; 40 mp->tlb_nr = 0;
44 } 41 }
45 42
46 preempt_enable(); 43 put_cpu_var(mmu_gathers);
47} 44}
48 45
49void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) 46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)