diff options
author | David S. Miller <davem@davemloft.net> | 2018-03-23 11:24:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-23 11:31:58 -0400 |
commit | 03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch) | |
tree | fbaf8738296b2e9dcba81c6daef2d515b6c4948c /drivers/infiniband/core/cq.c | |
parent | 6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff) | |
parent | f36b7534b83357cf52e747905de6d65b4f7c2512 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds. Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
Due to bug fixes found by the syzkaller bot and taken into the for-rc
branch after development for the 4.17 merge window had already started
being taken into the for-next branch, there were fairly non-trivial
merge issues that would need to be resolved between the for-rc branch
and the for-next branch. This merge resolves those conflicts and
provides a unified base upon which ongoing development for 4.17 can
be based.
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524
(IB/mlx5: Fix cleanup order on unload) added to for-rc and
commit b5ca15ad7e61 (IB/mlx5: Add proper representors support)
add as part of the devel cycle both needed to modify the
init/de-init functions used by mlx5. To support the new
representors, the new functions added by the cleanup patch
needed to be made non-static, and the init/de-init list
added by the representors patch needed to be modified to
match the init/de-init list changes made by the cleanup
patch.
Updates:
drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
prototypes added by representors patch to reflect new function
names as changed by cleanup patch
drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/core/cq.c')
-rw-r--r-- | drivers/infiniband/core/cq.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index bc79ca8215d7..af5ad6a56ae4 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | /* # of WCs to poll for with a single call to ib_poll_cq */ | 18 | /* # of WCs to poll for with a single call to ib_poll_cq */ |
19 | #define IB_POLL_BATCH 16 | 19 | #define IB_POLL_BATCH 16 |
20 | #define IB_POLL_BATCH_DIRECT 8 | ||
20 | 21 | ||
21 | /* # of WCs to iterate over before yielding */ | 22 | /* # of WCs to iterate over before yielding */ |
22 | #define IB_POLL_BUDGET_IRQ 256 | 23 | #define IB_POLL_BUDGET_IRQ 256 |
@@ -25,18 +26,18 @@ | |||
25 | #define IB_POLL_FLAGS \ | 26 | #define IB_POLL_FLAGS \ |
26 | (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) | 27 | (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) |
27 | 28 | ||
28 | static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) | 29 | static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, |
30 | int batch) | ||
29 | { | 31 | { |
30 | int i, n, completed = 0; | 32 | int i, n, completed = 0; |
31 | struct ib_wc *wcs = poll_wc ? : cq->wc; | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * budget might be (-1) if the caller does not | 35 | * budget might be (-1) if the caller does not |
35 | * want to bound this call, thus we need unsigned | 36 | * want to bound this call, thus we need unsigned |
36 | * minimum here. | 37 | * minimum here. |
37 | */ | 38 | */ |
38 | while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, | 39 | while ((n = ib_poll_cq(cq, min_t(u32, batch, |
39 | budget - completed), wcs)) > 0) { | 40 | budget - completed), wcs)) > 0) { |
40 | for (i = 0; i < n; i++) { | 41 | for (i = 0; i < n; i++) { |
41 | struct ib_wc *wc = &wcs[i]; | 42 | struct ib_wc *wc = &wcs[i]; |
42 | 43 | ||
@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) | |||
48 | 49 | ||
49 | completed += n; | 50 | completed += n; |
50 | 51 | ||
51 | if (n != IB_POLL_BATCH || | 52 | if (n != batch || (budget != -1 && completed >= budget)) |
52 | (budget != -1 && completed >= budget)) | ||
53 | break; | 53 | break; |
54 | } | 54 | } |
55 | 55 | ||
@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) | |||
72 | */ | 72 | */ |
73 | int ib_process_cq_direct(struct ib_cq *cq, int budget) | 73 | int ib_process_cq_direct(struct ib_cq *cq, int budget) |
74 | { | 74 | { |
75 | struct ib_wc wcs[IB_POLL_BATCH]; | 75 | struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; |
76 | 76 | ||
77 | return __ib_process_cq(cq, budget, wcs); | 77 | return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); |
78 | } | 78 | } |
79 | EXPORT_SYMBOL(ib_process_cq_direct); | 79 | EXPORT_SYMBOL(ib_process_cq_direct); |
80 | 80 | ||
@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget) | |||
88 | struct ib_cq *cq = container_of(iop, struct ib_cq, iop); | 88 | struct ib_cq *cq = container_of(iop, struct ib_cq, iop); |
89 | int completed; | 89 | int completed; |
90 | 90 | ||
91 | completed = __ib_process_cq(cq, budget, NULL); | 91 | completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); |
92 | if (completed < budget) { | 92 | if (completed < budget) { |
93 | irq_poll_complete(&cq->iop); | 93 | irq_poll_complete(&cq->iop); |
94 | if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) | 94 | if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) |
@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work) | |||
108 | struct ib_cq *cq = container_of(work, struct ib_cq, work); | 108 | struct ib_cq *cq = container_of(work, struct ib_cq, work); |
109 | int completed; | 109 | int completed; |
110 | 110 | ||
111 | completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); | 111 | completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, |
112 | IB_POLL_BATCH); | ||
112 | if (completed >= IB_POLL_BUDGET_WORKQUEUE || | 113 | if (completed >= IB_POLL_BUDGET_WORKQUEUE || |
113 | ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) | 114 | ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) |
114 | queue_work(ib_comp_wq, &cq->work); | 115 | queue_work(ib_comp_wq, &cq->work); |