aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/iov_iter.c54
-rw-r--r--lib/radix-tree.c11
-rw-r--r--lib/swiotlb.c58
5 files changed, 90 insertions, 36 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b06848a104e6..eb9e9a7870fa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -164,7 +164,7 @@ config DEBUG_INFO_REDUCED
164 164
165config DEBUG_INFO_SPLIT 165config DEBUG_INFO_SPLIT
166 bool "Produce split debuginfo in .dwo files" 166 bool "Produce split debuginfo in .dwo files"
167 depends on DEBUG_INFO 167 depends on DEBUG_INFO && !FRV
168 help 168 help
169 Generate debug info into separate .dwo files. This significantly 169 Generate debug info into separate .dwo files. This significantly
170 reduces the build directory size for builds with DEBUG_INFO, 170 reduces the build directory size for builds with DEBUG_INFO,
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 86c8911b0e3a..a3e14ce92a56 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
144 144
145 return err; 145 return err;
146} 146}
147EXPORT_SYMBOL_GPL(ioremap_page_range);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 25f572303801..e68604ae3ced 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -730,43 +730,50 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
730} 730}
731EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); 731EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
732 732
733static inline void pipe_truncate(struct iov_iter *i)
734{
735 struct pipe_inode_info *pipe = i->pipe;
736 if (pipe->nrbufs) {
737 size_t off = i->iov_offset;
738 int idx = i->idx;
739 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
740 if (off) {
741 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
742 idx = next_idx(idx, pipe);
743 nrbufs++;
744 }
745 while (pipe->nrbufs > nrbufs) {
746 pipe_buf_release(pipe, &pipe->bufs[idx]);
747 idx = next_idx(idx, pipe);
748 pipe->nrbufs--;
749 }
750 }
751}
752
733static void pipe_advance(struct iov_iter *i, size_t size) 753static void pipe_advance(struct iov_iter *i, size_t size)
734{ 754{
735 struct pipe_inode_info *pipe = i->pipe; 755 struct pipe_inode_info *pipe = i->pipe;
736 struct pipe_buffer *buf;
737 int idx = i->idx;
738 size_t off = i->iov_offset, orig_sz;
739
740 if (unlikely(i->count < size)) 756 if (unlikely(i->count < size))
741 size = i->count; 757 size = i->count;
742 orig_sz = size;
743
744 if (size) { 758 if (size) {
759 struct pipe_buffer *buf;
760 size_t off = i->iov_offset, left = size;
761 int idx = i->idx;
745 if (off) /* make it relative to the beginning of buffer */ 762 if (off) /* make it relative to the beginning of buffer */
746 size += off - pipe->bufs[idx].offset; 763 left += off - pipe->bufs[idx].offset;
747 while (1) { 764 while (1) {
748 buf = &pipe->bufs[idx]; 765 buf = &pipe->bufs[idx];
749 if (size <= buf->len) 766 if (left <= buf->len)
750 break; 767 break;
751 size -= buf->len; 768 left -= buf->len;
752 idx = next_idx(idx, pipe); 769 idx = next_idx(idx, pipe);
753 } 770 }
754 buf->len = size;
755 i->idx = idx; 771 i->idx = idx;
756 off = i->iov_offset = buf->offset + size; 772 i->iov_offset = buf->offset + left;
757 }
758 if (off)
759 idx = next_idx(idx, pipe);
760 if (pipe->nrbufs) {
761 int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
762 /* [curbuf,unused) is in use. Free [idx,unused) */
763 while (idx != unused) {
764 pipe_buf_release(pipe, &pipe->bufs[idx]);
765 idx = next_idx(idx, pipe);
766 pipe->nrbufs--;
767 }
768 } 773 }
769 i->count -= orig_sz; 774 i->count -= size;
775 /* ... and discard everything past that point */
776 pipe_truncate(i);
770} 777}
771 778
772void iov_iter_advance(struct iov_iter *i, size_t size) 779void iov_iter_advance(struct iov_iter *i, size_t size)
@@ -826,6 +833,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
826 size_t count) 833 size_t count)
827{ 834{
828 BUG_ON(direction != ITER_PIPE); 835 BUG_ON(direction != ITER_PIPE);
836 WARN_ON(pipe->nrbufs == pipe->buffers);
829 i->type = direction; 837 i->type = direction;
830 i->pipe = pipe; 838 i->pipe = pipe;
831 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 839 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6f382e07de77..84812a9fb16f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -640,6 +640,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
640 update_node(node, private); 640 update_node(node, private);
641 } 641 }
642 642
643 WARN_ON_ONCE(!list_empty(&node->private_list));
643 radix_tree_node_free(node); 644 radix_tree_node_free(node);
644 } 645 }
645} 646}
@@ -666,6 +667,7 @@ static void delete_node(struct radix_tree_root *root,
666 root->rnode = NULL; 667 root->rnode = NULL;
667 } 668 }
668 669
670 WARN_ON_ONCE(!list_empty(&node->private_list));
669 radix_tree_node_free(node); 671 radix_tree_node_free(node);
670 672
671 node = parent; 673 node = parent;
@@ -767,6 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
767 struct radix_tree_node *old = child; 769 struct radix_tree_node *old = child;
768 offset = child->offset + 1; 770 offset = child->offset + 1;
769 child = child->parent; 771 child = child->parent;
772 WARN_ON_ONCE(!list_empty(&old->private_list));
770 radix_tree_node_free(old); 773 radix_tree_node_free(old);
771 if (old == entry_to_node(node)) 774 if (old == entry_to_node(node))
772 return; 775 return;
@@ -1824,15 +1827,19 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1824 * __radix_tree_delete_node - try to free node after clearing a slot 1827 * __radix_tree_delete_node - try to free node after clearing a slot
1825 * @root: radix tree root 1828 * @root: radix tree root
1826 * @node: node containing @index 1829 * @node: node containing @index
1830 * @update_node: callback for changing leaf nodes
1831 * @private: private data to pass to @update_node
1827 * 1832 *
1828 * After clearing the slot at @index in @node from radix tree 1833 * After clearing the slot at @index in @node from radix tree
1829 * rooted at @root, call this function to attempt freeing the 1834 * rooted at @root, call this function to attempt freeing the
1830 * node and shrinking the tree. 1835 * node and shrinking the tree.
1831 */ 1836 */
1832void __radix_tree_delete_node(struct radix_tree_root *root, 1837void __radix_tree_delete_node(struct radix_tree_root *root,
1833 struct radix_tree_node *node) 1838 struct radix_tree_node *node,
1839 radix_tree_update_node_t update_node,
1840 void *private)
1834{ 1841{
1835 delete_node(root, node, NULL, NULL); 1842 delete_node(root, node, update_node, private);
1836} 1843}
1837 1844
1838/** 1845/**
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index cb1b54ee8527..a8d74a733a38 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -53,7 +53,7 @@
53 */ 53 */
54#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 54#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
55 55
56int swiotlb_force; 56enum swiotlb_force swiotlb_force;
57 57
58/* 58/*
59 * Used to do a quick range check in swiotlb_tbl_unmap_single and 59 * Used to do a quick range check in swiotlb_tbl_unmap_single and
@@ -83,6 +83,12 @@ static unsigned int *io_tlb_list;
83static unsigned int io_tlb_index; 83static unsigned int io_tlb_index;
84 84
85/* 85/*
86 * Max segment that we can provide which (if pages are contingous) will
87 * not be bounced (unless SWIOTLB_FORCE is set).
88 */
89unsigned int max_segment;
90
91/*
86 * We need to save away the original address corresponding to a mapped entry 92 * We need to save away the original address corresponding to a mapped entry
87 * for the sync operations. 93 * for the sync operations.
88 */ 94 */
@@ -106,8 +112,12 @@ setup_io_tlb_npages(char *str)
106 } 112 }
107 if (*str == ',') 113 if (*str == ',')
108 ++str; 114 ++str;
109 if (!strcmp(str, "force")) 115 if (!strcmp(str, "force")) {
110 swiotlb_force = 1; 116 swiotlb_force = SWIOTLB_FORCE;
117 } else if (!strcmp(str, "noforce")) {
118 swiotlb_force = SWIOTLB_NO_FORCE;
119 io_tlb_nslabs = 1;
120 }
111 121
112 return 0; 122 return 0;
113} 123}
@@ -120,6 +130,20 @@ unsigned long swiotlb_nr_tbl(void)
120} 130}
121EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); 131EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
122 132
133unsigned int swiotlb_max_segment(void)
134{
135 return max_segment;
136}
137EXPORT_SYMBOL_GPL(swiotlb_max_segment);
138
139void swiotlb_set_max_segment(unsigned int val)
140{
141 if (swiotlb_force == SWIOTLB_FORCE)
142 max_segment = 1;
143 else
144 max_segment = rounddown(val, PAGE_SIZE);
145}
146
123/* default to 64MB */ 147/* default to 64MB */
124#define IO_TLB_DEFAULT_SIZE (64UL<<20) 148#define IO_TLB_DEFAULT_SIZE (64UL<<20)
125unsigned long swiotlb_size_or_default(void) 149unsigned long swiotlb_size_or_default(void)
@@ -201,6 +225,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
201 if (verbose) 225 if (verbose)
202 swiotlb_print_info(); 226 swiotlb_print_info();
203 227
228 swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
204 return 0; 229 return 0;
205} 230}
206 231
@@ -279,6 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
279 rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); 304 rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
280 if (rc) 305 if (rc)
281 free_pages((unsigned long)vstart, order); 306 free_pages((unsigned long)vstart, order);
307
282 return rc; 308 return rc;
283} 309}
284 310
@@ -333,6 +359,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
333 359
334 late_alloc = 1; 360 late_alloc = 1;
335 361
362 swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
363
336 return 0; 364 return 0;
337 365
338cleanup4: 366cleanup4:
@@ -347,6 +375,7 @@ cleanup2:
347 io_tlb_end = 0; 375 io_tlb_end = 0;
348 io_tlb_start = 0; 376 io_tlb_start = 0;
349 io_tlb_nslabs = 0; 377 io_tlb_nslabs = 0;
378 max_segment = 0;
350 return -ENOMEM; 379 return -ENOMEM;
351} 380}
352 381
@@ -375,6 +404,7 @@ void __init swiotlb_free(void)
375 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 404 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
376 } 405 }
377 io_tlb_nslabs = 0; 406 io_tlb_nslabs = 0;
407 max_segment = 0;
378} 408}
379 409
380int is_swiotlb_buffer(phys_addr_t paddr) 410int is_swiotlb_buffer(phys_addr_t paddr)
@@ -453,11 +483,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
453 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 483 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
454 484
455 /* 485 /*
456 * For mappings greater than a page, we limit the stride (and 486 * For mappings greater than or equal to a page, we limit the stride
457 * hence alignment) to a page size. 487 * (and hence alignment) to a page size.
458 */ 488 */
459 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 489 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
460 if (size > PAGE_SIZE) 490 if (size >= PAGE_SIZE)
461 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); 491 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
462 else 492 else
463 stride = 1; 493 stride = 1;
@@ -543,8 +573,15 @@ static phys_addr_t
543map_single(struct device *hwdev, phys_addr_t phys, size_t size, 573map_single(struct device *hwdev, phys_addr_t phys, size_t size,
544 enum dma_data_direction dir, unsigned long attrs) 574 enum dma_data_direction dir, unsigned long attrs)
545{ 575{
546 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); 576 dma_addr_t start_dma_addr;
547 577
578 if (swiotlb_force == SWIOTLB_NO_FORCE) {
579 dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
580 &phys);
581 return SWIOTLB_MAP_ERROR;
582 }
583
584 start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
548 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, 585 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
549 dir, attrs); 586 dir, attrs);
550} 587}
@@ -721,6 +758,9 @@ static void
721swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, 758swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
722 int do_panic) 759 int do_panic)
723{ 760{
761 if (swiotlb_force == SWIOTLB_NO_FORCE)
762 return;
763
724 /* 764 /*
725 * Ran out of IOMMU space for this operation. This is very bad. 765 * Ran out of IOMMU space for this operation. This is very bad.
726 * Unfortunately the drivers cannot handle this operation properly. 766 * Unfortunately the drivers cannot handle this operation properly.
@@ -763,7 +803,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
763 * we can safely return the device addr and not worry about bounce 803 * we can safely return the device addr and not worry about bounce
764 * buffering it. 804 * buffering it.
765 */ 805 */
766 if (dma_capable(dev, dev_addr, size) && !swiotlb_force) 806 if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
767 return dev_addr; 807 return dev_addr;
768 808
769 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); 809 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
@@ -904,7 +944,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
904 phys_addr_t paddr = sg_phys(sg); 944 phys_addr_t paddr = sg_phys(sg);
905 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); 945 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
906 946
907 if (swiotlb_force || 947 if (swiotlb_force == SWIOTLB_FORCE ||
908 !dma_capable(hwdev, dev_addr, sg->length)) { 948 !dma_capable(hwdev, dev_addr, sg->length)) {
909 phys_addr_t map = map_single(hwdev, sg_phys(sg), 949 phys_addr_t map = map_single(hwdev, sg_phys(sg),
910 sg->length, dir, attrs); 950 sg->length, dir, attrs);