diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-09 18:53:03 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-09 18:53:03 -0500 |
| commit | a50243b1ddcdd766d0d17fbfeeb1a22e62fdc461 (patch) | |
| tree | 3dbf847105558eaac3658a46c4934df503c866a2 /lib | |
| parent | 2901752c14b8e1b7dd898d2e5245c93e531aa624 (diff) | |
| parent | fca22e7e595f1799cfbfdfa13e16d48ece0d136c (diff) | |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This has been a slightly more active cycle than normal with ongoing
core changes and quite a lot of collected driver updates.
- Various driver fixes for bnxt_re, cxgb4, hns, mlx5, pvrdma, rxe
- A new data transfer mode for HFI1 giving higher performance
- Significant functional and bug fix update to the mlx5
On-Demand-Paging MR feature
- A chip hang reset recovery system for hns
- Change mm->pinned_vm to an atomic64
- Update bnxt_re to support a new 57500 chip
- A sane netlink 'rdma link add' method for creating rxe devices and
fixing the various unregistration race conditions in rxe's
unregister flow
- Allow lookup up objects by an ID over netlink
- Various reworking of the core to driver interface:
- drivers should not assume umem SGLs are in PAGE_SIZE chunks
- ucontext is accessed via udata not other means
- start to make the core code responsible for object memory
allocation
- drivers should convert struct device to struct ib_device via a
helper
- drivers have more tools to avoid use after unregister problems"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (280 commits)
net/mlx5: ODP support for XRC transport is not enabled by default in FW
IB/hfi1: Close race condition on user context disable and close
RDMA/umem: Revert broken 'off by one' fix
RDMA/umem: minor bug fix in error handling path
RDMA/hns: Use GFP_ATOMIC in hns_roce_v2_modify_qp
cxgb4: kfree mhp after the debug print
IB/rdmavt: Fix concurrency panics in QP post_send and modify to error
IB/rdmavt: Fix loopback send with invalidate ordering
IB/iser: Fix dma_nents type definition
IB/mlx5: Set correct write permissions for implicit ODP MR
bnxt_re: Clean cq for kernel consumers only
RDMA/uverbs: Don't do double free of allocated PD
RDMA: Handle ucontext allocations by IB/core
RDMA/core: Fix a WARN() message
bnxt_re: fix the regression due to changes in alloc_pbl
IB/mlx4: Increase the timeout for CM cache
IB/core: Abort page fault handler silently during owning process exit
IB/mlx5: Validate correct PD before prefetch MR
IB/mlx5: Protect against prefetch of invalid MR
RDMA/uverbs: Store PR pointer before it is overwritten
...
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/irq_poll.c | 2 | ||||
| -rw-r--r-- | lib/scatterlist.c | 26 |
2 files changed, 27 insertions, 1 deletions
diff --git a/lib/irq_poll.c b/lib/irq_poll.c index 86a709954f5a..2f17b488d58e 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c | |||
| @@ -35,7 +35,7 @@ void irq_poll_sched(struct irq_poll *iop) | |||
| 35 | 35 | ||
| 36 | local_irq_save(flags); | 36 | local_irq_save(flags); |
| 37 | list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); | 37 | list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); |
| 38 | __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); | 38 | raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); |
| 39 | local_irq_restore(flags); | 39 | local_irq_restore(flags); |
| 40 | } | 40 | } |
| 41 | EXPORT_SYMBOL(irq_poll_sched); | 41 | EXPORT_SYMBOL(irq_poll_sched); |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 9ba349e775ef..739dc9fe2c55 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -625,6 +625,32 @@ bool __sg_page_iter_next(struct sg_page_iter *piter) | |||
| 625 | } | 625 | } |
| 626 | EXPORT_SYMBOL(__sg_page_iter_next); | 626 | EXPORT_SYMBOL(__sg_page_iter_next); |
| 627 | 627 | ||
| 628 | static int sg_dma_page_count(struct scatterlist *sg) | ||
| 629 | { | ||
| 630 | return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT; | ||
| 631 | } | ||
| 632 | |||
| 633 | bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter) | ||
| 634 | { | ||
| 635 | struct sg_page_iter *piter = &dma_iter->base; | ||
| 636 | |||
| 637 | if (!piter->__nents || !piter->sg) | ||
| 638 | return false; | ||
| 639 | |||
| 640 | piter->sg_pgoffset += piter->__pg_advance; | ||
| 641 | piter->__pg_advance = 1; | ||
| 642 | |||
| 643 | while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) { | ||
| 644 | piter->sg_pgoffset -= sg_dma_page_count(piter->sg); | ||
| 645 | piter->sg = sg_next(piter->sg); | ||
| 646 | if (!--piter->__nents || !piter->sg) | ||
| 647 | return false; | ||
| 648 | } | ||
| 649 | |||
| 650 | return true; | ||
| 651 | } | ||
| 652 | EXPORT_SYMBOL(__sg_page_iter_dma_next); | ||
| 653 | |||
| 628 | /** | 654 | /** |
| 629 | * sg_miter_start - start mapping iteration over a sg list | 655 | * sg_miter_start - start mapping iteration over a sg list |
| 630 | * @miter: sg mapping iter to be started | 656 | * @miter: sg mapping iter to be started |
