diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-09-06 15:33:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-09-06 15:33:12 -0400 |
commit | d060e0f603a4156087813d221d818bb39ec91429 (patch) | |
tree | 066f0539bfd26cbb23da4b4643df119520abca6e | |
parent | 46738ab31fe668ea1d4413dd459af2632f6fef8d (diff) | |
parent | 16170d9c102764f76c58aad244e947f4e3f44590 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma fixes from Doug Ledford:
"This is the second pull request for the rdma subsystem. Most of the
patches are small and obvious. I took two patches in that are larger
than I wanted this late in the cycle.
The first is the hfi1 patch that implements a work queue to test the
QSFP read state. I originally rejected the first patch for this
(which would have place up to 20 seconds worth of udelays in their
probe routine). They then rewrote it the way I wanted (use delayed
work tasks to wait asynchronously up to 20 seconds for the QSFP to
come alive), so I can't really complain about the size of getting what
I asked for :-/.
The second is large because it switches the rcu locking in the debugfs
code. Since a locking change like this is done all at once, the size
it what it is. It resolves a litany of debug messages from the
kernel, so I pulled it in for -rc.
The rest are all typical -rc worthy patches I think.
There will still be a third -rc pull request from the rdma subsystem
this release. I hope to have that one ready to go by the end of this
week or early next.
Summary:
- a smattering of small fixes across the core, ipoib, i40iw, isert,
cxgb4, and mlx4
- a slightly larger group of fixes to each of mlx5 and hfi1"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
IB/hfi1: Rework debugfs to use SRCU
IB/hfi1: Make n_krcvqs be an unsigned long integer
IB/hfi1: Add QSFP sanity pre-check
IB/hfi1: Fix AHG KDETH Intr shift
IB/hfi1: Fix SGE length for misaligned PIO copy
IB/mlx5: Don't return errors from poll_cq
IB/mlx5: Use TIR number based on selector
IB/mlx5: Simplify code by removing return variable
IB/mlx5: Return EINVAL when caller specifies too many SGEs
IB/mlx4: Don't return errors from poll_cq
Revert "IB/mlx4: Return EAGAIN for any error in mlx4_ib_poll_one"
IB/ipoib: Fix memory corruption in ipoib cm mode connect flow
IB/core: Fix use after free in send_leave function
IB/cxgb4: Make _free_qp static to silence build warning
IB/isert: Properly release resources on DEVICE_REMOVAL
IB/hfi1: Fix the size parameter to find_first_bit
IB/mlx5: Fix the size parameter to find_first_bit
IB/hfi1: Clean up type used and casting
i40iw: Receive notification events correctly
i40iw: Update hw_iwarp_state
23 files changed, 226 insertions, 177 deletions
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 3a3c5d73bbfc..51c79b2fb0b8 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c | |||
@@ -106,7 +106,6 @@ struct mcast_group { | |||
106 | atomic_t refcount; | 106 | atomic_t refcount; |
107 | enum mcast_group_state state; | 107 | enum mcast_group_state state; |
108 | struct ib_sa_query *query; | 108 | struct ib_sa_query *query; |
109 | int query_id; | ||
110 | u16 pkey_index; | 109 | u16 pkey_index; |
111 | u8 leave_state; | 110 | u8 leave_state; |
112 | int retries; | 111 | int retries; |
@@ -340,11 +339,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member) | |||
340 | member->multicast.comp_mask, | 339 | member->multicast.comp_mask, |
341 | 3000, GFP_KERNEL, join_handler, group, | 340 | 3000, GFP_KERNEL, join_handler, group, |
342 | &group->query); | 341 | &group->query); |
343 | if (ret >= 0) { | 342 | return (ret > 0) ? 0 : ret; |
344 | group->query_id = ret; | ||
345 | ret = 0; | ||
346 | } | ||
347 | return ret; | ||
348 | } | 343 | } |
349 | 344 | ||
350 | static int send_leave(struct mcast_group *group, u8 leave_state) | 345 | static int send_leave(struct mcast_group *group, u8 leave_state) |
@@ -364,11 +359,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state) | |||
364 | IB_SA_MCMEMBER_REC_JOIN_STATE, | 359 | IB_SA_MCMEMBER_REC_JOIN_STATE, |
365 | 3000, GFP_KERNEL, leave_handler, | 360 | 3000, GFP_KERNEL, leave_handler, |
366 | group, &group->query); | 361 | group, &group->query); |
367 | if (ret >= 0) { | 362 | return (ret > 0) ? 0 : ret; |
368 | group->query_id = ret; | ||
369 | ret = 0; | ||
370 | } | ||
371 | return ret; | ||
372 | } | 363 | } |
373 | 364 | ||
374 | static void join_group(struct mcast_group *group, struct mcast_member *member, | 365 | static void join_group(struct mcast_group *group, struct mcast_member *member, |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index edb1172b6f54..690435229be7 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -683,7 +683,7 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, | |||
683 | return 0; | 683 | return 0; |
684 | } | 684 | } |
685 | 685 | ||
686 | void _free_qp(struct kref *kref) | 686 | static void _free_qp(struct kref *kref) |
687 | { | 687 | { |
688 | struct c4iw_qp *qhp; | 688 | struct c4iw_qp *qhp; |
689 | 689 | ||
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index b32638d58ae8..cc38004cea42 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -9490,6 +9490,78 @@ static void init_lcb(struct hfi1_devdata *dd) | |||
9490 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); | 9490 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); |
9491 | } | 9491 | } |
9492 | 9492 | ||
9493 | /* | ||
9494 | * Perform a test read on the QSFP. Return 0 on success, -ERRNO | ||
9495 | * on error. | ||
9496 | */ | ||
9497 | static int test_qsfp_read(struct hfi1_pportdata *ppd) | ||
9498 | { | ||
9499 | int ret; | ||
9500 | u8 status; | ||
9501 | |||
9502 | /* report success if not a QSFP */ | ||
9503 | if (ppd->port_type != PORT_TYPE_QSFP) | ||
9504 | return 0; | ||
9505 | |||
9506 | /* read byte 2, the status byte */ | ||
9507 | ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); | ||
9508 | if (ret < 0) | ||
9509 | return ret; | ||
9510 | if (ret != 1) | ||
9511 | return -EIO; | ||
9512 | |||
9513 | return 0; /* success */ | ||
9514 | } | ||
9515 | |||
9516 | /* | ||
9517 | * Values for QSFP retry. | ||
9518 | * | ||
9519 | * Give up after 10s (20 x 500ms). The overall timeout was empirically | ||
9520 | * arrived at from experience on a large cluster. | ||
9521 | */ | ||
9522 | #define MAX_QSFP_RETRIES 20 | ||
9523 | #define QSFP_RETRY_WAIT 500 /* msec */ | ||
9524 | |||
9525 | /* | ||
9526 | * Try a QSFP read. If it fails, schedule a retry for later. | ||
9527 | * Called on first link activation after driver load. | ||
9528 | */ | ||
9529 | static void try_start_link(struct hfi1_pportdata *ppd) | ||
9530 | { | ||
9531 | if (test_qsfp_read(ppd)) { | ||
9532 | /* read failed */ | ||
9533 | if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) { | ||
9534 | dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); | ||
9535 | return; | ||
9536 | } | ||
9537 | dd_dev_info(ppd->dd, | ||
9538 | "QSFP not responding, waiting and retrying %d\n", | ||
9539 | (int)ppd->qsfp_retry_count); | ||
9540 | ppd->qsfp_retry_count++; | ||
9541 | queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work, | ||
9542 | msecs_to_jiffies(QSFP_RETRY_WAIT)); | ||
9543 | return; | ||
9544 | } | ||
9545 | ppd->qsfp_retry_count = 0; | ||
9546 | |||
9547 | /* | ||
9548 | * Tune the SerDes to a ballpark setting for optimal signal and bit | ||
9549 | * error rate. Needs to be done before starting the link. | ||
9550 | */ | ||
9551 | tune_serdes(ppd); | ||
9552 | start_link(ppd); | ||
9553 | } | ||
9554 | |||
9555 | /* | ||
9556 | * Workqueue function to start the link after a delay. | ||
9557 | */ | ||
9558 | void handle_start_link(struct work_struct *work) | ||
9559 | { | ||
9560 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, | ||
9561 | start_link_work.work); | ||
9562 | try_start_link(ppd); | ||
9563 | } | ||
9564 | |||
9493 | int bringup_serdes(struct hfi1_pportdata *ppd) | 9565 | int bringup_serdes(struct hfi1_pportdata *ppd) |
9494 | { | 9566 | { |
9495 | struct hfi1_devdata *dd = ppd->dd; | 9567 | struct hfi1_devdata *dd = ppd->dd; |
@@ -9525,14 +9597,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd) | |||
9525 | set_qsfp_int_n(ppd, 1); | 9597 | set_qsfp_int_n(ppd, 1); |
9526 | } | 9598 | } |
9527 | 9599 | ||
9528 | /* | 9600 | try_start_link(ppd); |
9529 | * Tune the SerDes to a ballpark setting for | 9601 | return 0; |
9530 | * optimal signal and bit error rate | ||
9531 | * Needs to be done before starting the link | ||
9532 | */ | ||
9533 | tune_serdes(ppd); | ||
9534 | |||
9535 | return start_link(ppd); | ||
9536 | } | 9602 | } |
9537 | 9603 | ||
9538 | void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) | 9604 | void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) |
@@ -9549,6 +9615,10 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) | |||
9549 | ppd->driver_link_ready = 0; | 9615 | ppd->driver_link_ready = 0; |
9550 | ppd->link_enabled = 0; | 9616 | ppd->link_enabled = 0; |
9551 | 9617 | ||
9618 | ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */ | ||
9619 | flush_delayed_work(&ppd->start_link_work); | ||
9620 | cancel_delayed_work_sync(&ppd->start_link_work); | ||
9621 | |||
9552 | ppd->offline_disabled_reason = | 9622 | ppd->offline_disabled_reason = |
9553 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); | 9623 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); |
9554 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0, | 9624 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0, |
@@ -12865,7 +12935,7 @@ fail: | |||
12865 | */ | 12935 | */ |
12866 | static int set_up_context_variables(struct hfi1_devdata *dd) | 12936 | static int set_up_context_variables(struct hfi1_devdata *dd) |
12867 | { | 12937 | { |
12868 | int num_kernel_contexts; | 12938 | unsigned long num_kernel_contexts; |
12869 | int total_contexts; | 12939 | int total_contexts; |
12870 | int ret; | 12940 | int ret; |
12871 | unsigned ngroups; | 12941 | unsigned ngroups; |
@@ -12894,9 +12964,9 @@ static int set_up_context_variables(struct hfi1_devdata *dd) | |||
12894 | */ | 12964 | */ |
12895 | if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) { | 12965 | if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) { |
12896 | dd_dev_err(dd, | 12966 | dd_dev_err(dd, |
12897 | "Reducing # kernel rcv contexts to: %d, from %d\n", | 12967 | "Reducing # kernel rcv contexts to: %d, from %lu\n", |
12898 | (int)(dd->chip_send_contexts - num_vls - 1), | 12968 | (int)(dd->chip_send_contexts - num_vls - 1), |
12899 | (int)num_kernel_contexts); | 12969 | num_kernel_contexts); |
12900 | num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; | 12970 | num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; |
12901 | } | 12971 | } |
12902 | /* | 12972 | /* |
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index ed11107c50fe..e29573769efc 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h | |||
@@ -706,6 +706,7 @@ void handle_link_up(struct work_struct *work); | |||
706 | void handle_link_down(struct work_struct *work); | 706 | void handle_link_down(struct work_struct *work); |
707 | void handle_link_downgrade(struct work_struct *work); | 707 | void handle_link_downgrade(struct work_struct *work); |
708 | void handle_link_bounce(struct work_struct *work); | 708 | void handle_link_bounce(struct work_struct *work); |
709 | void handle_start_link(struct work_struct *work); | ||
709 | void handle_sma_message(struct work_struct *work); | 710 | void handle_sma_message(struct work_struct *work); |
710 | void reset_qsfp(struct hfi1_pportdata *ppd); | 711 | void reset_qsfp(struct hfi1_pportdata *ppd); |
711 | void qsfp_event(struct work_struct *work); | 712 | void qsfp_event(struct work_struct *work); |
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index a49cc88f08a2..5e9be16f6cd3 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c | |||
@@ -59,6 +59,40 @@ | |||
59 | 59 | ||
60 | static struct dentry *hfi1_dbg_root; | 60 | static struct dentry *hfi1_dbg_root; |
61 | 61 | ||
62 | /* wrappers to enforce srcu in seq file */ | ||
63 | static ssize_t hfi1_seq_read( | ||
64 | struct file *file, | ||
65 | char __user *buf, | ||
66 | size_t size, | ||
67 | loff_t *ppos) | ||
68 | { | ||
69 | struct dentry *d = file->f_path.dentry; | ||
70 | int srcu_idx; | ||
71 | ssize_t r; | ||
72 | |||
73 | r = debugfs_use_file_start(d, &srcu_idx); | ||
74 | if (likely(!r)) | ||
75 | r = seq_read(file, buf, size, ppos); | ||
76 | debugfs_use_file_finish(srcu_idx); | ||
77 | return r; | ||
78 | } | ||
79 | |||
80 | static loff_t hfi1_seq_lseek( | ||
81 | struct file *file, | ||
82 | loff_t offset, | ||
83 | int whence) | ||
84 | { | ||
85 | struct dentry *d = file->f_path.dentry; | ||
86 | int srcu_idx; | ||
87 | loff_t r; | ||
88 | |||
89 | r = debugfs_use_file_start(d, &srcu_idx); | ||
90 | if (likely(!r)) | ||
91 | r = seq_lseek(file, offset, whence); | ||
92 | debugfs_use_file_finish(srcu_idx); | ||
93 | return r; | ||
94 | } | ||
95 | |||
62 | #define private2dd(file) (file_inode(file)->i_private) | 96 | #define private2dd(file) (file_inode(file)->i_private) |
63 | #define private2ppd(file) (file_inode(file)->i_private) | 97 | #define private2ppd(file) (file_inode(file)->i_private) |
64 | 98 | ||
@@ -87,8 +121,8 @@ static int _##name##_open(struct inode *inode, struct file *s) \ | |||
87 | static const struct file_operations _##name##_file_ops = { \ | 121 | static const struct file_operations _##name##_file_ops = { \ |
88 | .owner = THIS_MODULE, \ | 122 | .owner = THIS_MODULE, \ |
89 | .open = _##name##_open, \ | 123 | .open = _##name##_open, \ |
90 | .read = seq_read, \ | 124 | .read = hfi1_seq_read, \ |
91 | .llseek = seq_lseek, \ | 125 | .llseek = hfi1_seq_lseek, \ |
92 | .release = seq_release \ | 126 | .release = seq_release \ |
93 | } | 127 | } |
94 | 128 | ||
@@ -105,11 +139,9 @@ do { \ | |||
105 | DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO) | 139 | DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO) |
106 | 140 | ||
107 | static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos) | 141 | static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos) |
108 | __acquires(RCU) | ||
109 | { | 142 | { |
110 | struct hfi1_opcode_stats_perctx *opstats; | 143 | struct hfi1_opcode_stats_perctx *opstats; |
111 | 144 | ||
112 | rcu_read_lock(); | ||
113 | if (*pos >= ARRAY_SIZE(opstats->stats)) | 145 | if (*pos >= ARRAY_SIZE(opstats->stats)) |
114 | return NULL; | 146 | return NULL; |
115 | return pos; | 147 | return pos; |
@@ -126,9 +158,7 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
126 | } | 158 | } |
127 | 159 | ||
128 | static void _opcode_stats_seq_stop(struct seq_file *s, void *v) | 160 | static void _opcode_stats_seq_stop(struct seq_file *s, void *v) |
129 | __releases(RCU) | ||
130 | { | 161 | { |
131 | rcu_read_unlock(); | ||
132 | } | 162 | } |
133 | 163 | ||
134 | static int _opcode_stats_seq_show(struct seq_file *s, void *v) | 164 | static int _opcode_stats_seq_show(struct seq_file *s, void *v) |
@@ -285,12 +315,10 @@ DEBUGFS_SEQ_FILE_OPEN(qp_stats) | |||
285 | DEBUGFS_FILE_OPS(qp_stats); | 315 | DEBUGFS_FILE_OPS(qp_stats); |
286 | 316 | ||
287 | static void *_sdes_seq_start(struct seq_file *s, loff_t *pos) | 317 | static void *_sdes_seq_start(struct seq_file *s, loff_t *pos) |
288 | __acquires(RCU) | ||
289 | { | 318 | { |
290 | struct hfi1_ibdev *ibd; | 319 | struct hfi1_ibdev *ibd; |
291 | struct hfi1_devdata *dd; | 320 | struct hfi1_devdata *dd; |
292 | 321 | ||
293 | rcu_read_lock(); | ||
294 | ibd = (struct hfi1_ibdev *)s->private; | 322 | ibd = (struct hfi1_ibdev *)s->private; |
295 | dd = dd_from_dev(ibd); | 323 | dd = dd_from_dev(ibd); |
296 | if (!dd->per_sdma || *pos >= dd->num_sdma) | 324 | if (!dd->per_sdma || *pos >= dd->num_sdma) |
@@ -310,9 +338,7 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
310 | } | 338 | } |
311 | 339 | ||
312 | static void _sdes_seq_stop(struct seq_file *s, void *v) | 340 | static void _sdes_seq_stop(struct seq_file *s, void *v) |
313 | __releases(RCU) | ||
314 | { | 341 | { |
315 | rcu_read_unlock(); | ||
316 | } | 342 | } |
317 | 343 | ||
318 | static int _sdes_seq_show(struct seq_file *s, void *v) | 344 | static int _sdes_seq_show(struct seq_file *s, void *v) |
@@ -339,11 +365,9 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf, | |||
339 | struct hfi1_devdata *dd; | 365 | struct hfi1_devdata *dd; |
340 | ssize_t rval; | 366 | ssize_t rval; |
341 | 367 | ||
342 | rcu_read_lock(); | ||
343 | dd = private2dd(file); | 368 | dd = private2dd(file); |
344 | avail = hfi1_read_cntrs(dd, NULL, &counters); | 369 | avail = hfi1_read_cntrs(dd, NULL, &counters); |
345 | rval = simple_read_from_buffer(buf, count, ppos, counters, avail); | 370 | rval = simple_read_from_buffer(buf, count, ppos, counters, avail); |
346 | rcu_read_unlock(); | ||
347 | return rval; | 371 | return rval; |
348 | } | 372 | } |
349 | 373 | ||
@@ -356,11 +380,9 @@ static ssize_t dev_names_read(struct file *file, char __user *buf, | |||
356 | struct hfi1_devdata *dd; | 380 | struct hfi1_devdata *dd; |
357 | ssize_t rval; | 381 | ssize_t rval; |
358 | 382 | ||
359 | rcu_read_lock(); | ||
360 | dd = private2dd(file); | 383 | dd = private2dd(file); |
361 | avail = hfi1_read_cntrs(dd, &names, NULL); | 384 | avail = hfi1_read_cntrs(dd, &names, NULL); |
362 | rval = simple_read_from_buffer(buf, count, ppos, names, avail); | 385 | rval = simple_read_from_buffer(buf, count, ppos, names, avail); |
363 | rcu_read_unlock(); | ||
364 | return rval; | 386 | return rval; |
365 | } | 387 | } |
366 | 388 | ||
@@ -383,11 +405,9 @@ static ssize_t portnames_read(struct file *file, char __user *buf, | |||
383 | struct hfi1_devdata *dd; | 405 | struct hfi1_devdata *dd; |
384 | ssize_t rval; | 406 | ssize_t rval; |
385 | 407 | ||
386 | rcu_read_lock(); | ||
387 | dd = private2dd(file); | 408 | dd = private2dd(file); |
388 | avail = hfi1_read_portcntrs(dd->pport, &names, NULL); | 409 | avail = hfi1_read_portcntrs(dd->pport, &names, NULL); |
389 | rval = simple_read_from_buffer(buf, count, ppos, names, avail); | 410 | rval = simple_read_from_buffer(buf, count, ppos, names, avail); |
390 | rcu_read_unlock(); | ||
391 | return rval; | 411 | return rval; |
392 | } | 412 | } |
393 | 413 | ||
@@ -400,11 +420,9 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf, | |||
400 | struct hfi1_pportdata *ppd; | 420 | struct hfi1_pportdata *ppd; |
401 | ssize_t rval; | 421 | ssize_t rval; |
402 | 422 | ||
403 | rcu_read_lock(); | ||
404 | ppd = private2ppd(file); | 423 | ppd = private2ppd(file); |
405 | avail = hfi1_read_portcntrs(ppd, NULL, &counters); | 424 | avail = hfi1_read_portcntrs(ppd, NULL, &counters); |
406 | rval = simple_read_from_buffer(buf, count, ppos, counters, avail); | 425 | rval = simple_read_from_buffer(buf, count, ppos, counters, avail); |
407 | rcu_read_unlock(); | ||
408 | return rval; | 426 | return rval; |
409 | } | 427 | } |
410 | 428 | ||
@@ -434,16 +452,13 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf, | |||
434 | int used; | 452 | int used; |
435 | int i; | 453 | int i; |
436 | 454 | ||
437 | rcu_read_lock(); | ||
438 | ppd = private2ppd(file); | 455 | ppd = private2ppd(file); |
439 | dd = ppd->dd; | 456 | dd = ppd->dd; |
440 | size = PAGE_SIZE; | 457 | size = PAGE_SIZE; |
441 | used = 0; | 458 | used = 0; |
442 | tmp = kmalloc(size, GFP_KERNEL); | 459 | tmp = kmalloc(size, GFP_KERNEL); |
443 | if (!tmp) { | 460 | if (!tmp) |
444 | rcu_read_unlock(); | ||
445 | return -ENOMEM; | 461 | return -ENOMEM; |
446 | } | ||
447 | 462 | ||
448 | scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); | 463 | scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); |
449 | used += scnprintf(tmp + used, size - used, | 464 | used += scnprintf(tmp + used, size - used, |
@@ -470,7 +485,6 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf, | |||
470 | used += scnprintf(tmp + used, size - used, "Write bits to clear\n"); | 485 | used += scnprintf(tmp + used, size - used, "Write bits to clear\n"); |
471 | 486 | ||
472 | ret = simple_read_from_buffer(buf, count, ppos, tmp, used); | 487 | ret = simple_read_from_buffer(buf, count, ppos, tmp, used); |
473 | rcu_read_unlock(); | ||
474 | kfree(tmp); | 488 | kfree(tmp); |
475 | return ret; | 489 | return ret; |
476 | } | 490 | } |
@@ -486,15 +500,12 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf, | |||
486 | u64 scratch0; | 500 | u64 scratch0; |
487 | u64 clear; | 501 | u64 clear; |
488 | 502 | ||
489 | rcu_read_lock(); | ||
490 | ppd = private2ppd(file); | 503 | ppd = private2ppd(file); |
491 | dd = ppd->dd; | 504 | dd = ppd->dd; |
492 | 505 | ||
493 | buff = kmalloc(count + 1, GFP_KERNEL); | 506 | buff = kmalloc(count + 1, GFP_KERNEL); |
494 | if (!buff) { | 507 | if (!buff) |
495 | ret = -ENOMEM; | 508 | return -ENOMEM; |
496 | goto do_return; | ||
497 | } | ||
498 | 509 | ||
499 | ret = copy_from_user(buff, buf, count); | 510 | ret = copy_from_user(buff, buf, count); |
500 | if (ret > 0) { | 511 | if (ret > 0) { |
@@ -527,8 +538,6 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf, | |||
527 | 538 | ||
528 | do_free: | 539 | do_free: |
529 | kfree(buff); | 540 | kfree(buff); |
530 | do_return: | ||
531 | rcu_read_unlock(); | ||
532 | return ret; | 541 | return ret; |
533 | } | 542 | } |
534 | 543 | ||
@@ -542,18 +551,14 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf, | |||
542 | char *tmp; | 551 | char *tmp; |
543 | int ret; | 552 | int ret; |
544 | 553 | ||
545 | rcu_read_lock(); | ||
546 | ppd = private2ppd(file); | 554 | ppd = private2ppd(file); |
547 | tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); | 555 | tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); |
548 | if (!tmp) { | 556 | if (!tmp) |
549 | rcu_read_unlock(); | ||
550 | return -ENOMEM; | 557 | return -ENOMEM; |
551 | } | ||
552 | 558 | ||
553 | ret = qsfp_dump(ppd, tmp, PAGE_SIZE); | 559 | ret = qsfp_dump(ppd, tmp, PAGE_SIZE); |
554 | if (ret > 0) | 560 | if (ret > 0) |
555 | ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); | 561 | ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); |
556 | rcu_read_unlock(); | ||
557 | kfree(tmp); | 562 | kfree(tmp); |
558 | return ret; | 563 | return ret; |
559 | } | 564 | } |
@@ -569,7 +574,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, | |||
569 | int offset; | 574 | int offset; |
570 | int total_written; | 575 | int total_written; |
571 | 576 | ||
572 | rcu_read_lock(); | ||
573 | ppd = private2ppd(file); | 577 | ppd = private2ppd(file); |
574 | 578 | ||
575 | /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ | 579 | /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ |
@@ -577,16 +581,12 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, | |||
577 | offset = *ppos & 0xffff; | 581 | offset = *ppos & 0xffff; |
578 | 582 | ||
579 | /* explicitly reject invalid address 0 to catch cp and cat */ | 583 | /* explicitly reject invalid address 0 to catch cp and cat */ |
580 | if (i2c_addr == 0) { | 584 | if (i2c_addr == 0) |
581 | ret = -EINVAL; | 585 | return -EINVAL; |
582 | goto _return; | ||
583 | } | ||
584 | 586 | ||
585 | buff = kmalloc(count, GFP_KERNEL); | 587 | buff = kmalloc(count, GFP_KERNEL); |
586 | if (!buff) { | 588 | if (!buff) |
587 | ret = -ENOMEM; | 589 | return -ENOMEM; |
588 | goto _return; | ||
589 | } | ||
590 | 590 | ||
591 | ret = copy_from_user(buff, buf, count); | 591 | ret = copy_from_user(buff, buf, count); |
592 | if (ret > 0) { | 592 | if (ret > 0) { |
@@ -606,8 +606,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, | |||
606 | 606 | ||
607 | _free: | 607 | _free: |
608 | kfree(buff); | 608 | kfree(buff); |
609 | _return: | ||
610 | rcu_read_unlock(); | ||
611 | return ret; | 609 | return ret; |
612 | } | 610 | } |
613 | 611 | ||
@@ -636,7 +634,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, | |||
636 | int offset; | 634 | int offset; |
637 | int total_read; | 635 | int total_read; |
638 | 636 | ||
639 | rcu_read_lock(); | ||
640 | ppd = private2ppd(file); | 637 | ppd = private2ppd(file); |
641 | 638 | ||
642 | /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ | 639 | /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ |
@@ -644,16 +641,12 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, | |||
644 | offset = *ppos & 0xffff; | 641 | offset = *ppos & 0xffff; |
645 | 642 | ||
646 | /* explicitly reject invalid address 0 to catch cp and cat */ | 643 | /* explicitly reject invalid address 0 to catch cp and cat */ |
647 | if (i2c_addr == 0) { | 644 | if (i2c_addr == 0) |
648 | ret = -EINVAL; | 645 | return -EINVAL; |
649 | goto _return; | ||
650 | } | ||
651 | 646 | ||
652 | buff = kmalloc(count, GFP_KERNEL); | 647 | buff = kmalloc(count, GFP_KERNEL); |
653 | if (!buff) { | 648 | if (!buff) |
654 | ret = -ENOMEM; | 649 | return -ENOMEM; |
655 | goto _return; | ||
656 | } | ||
657 | 650 | ||
658 | total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); | 651 | total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); |
659 | if (total_read < 0) { | 652 | if (total_read < 0) { |
@@ -673,8 +666,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, | |||
673 | 666 | ||
674 | _free: | 667 | _free: |
675 | kfree(buff); | 668 | kfree(buff); |
676 | _return: | ||
677 | rcu_read_unlock(); | ||
678 | return ret; | 669 | return ret; |
679 | } | 670 | } |
680 | 671 | ||
@@ -701,26 +692,20 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, | |||
701 | int ret; | 692 | int ret; |
702 | int total_written; | 693 | int total_written; |
703 | 694 | ||
704 | rcu_read_lock(); | 695 | if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */ |
705 | if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ | 696 | return -EINVAL; |
706 | ret = -EINVAL; | ||
707 | goto _return; | ||
708 | } | ||
709 | 697 | ||
710 | ppd = private2ppd(file); | 698 | ppd = private2ppd(file); |
711 | 699 | ||
712 | buff = kmalloc(count, GFP_KERNEL); | 700 | buff = kmalloc(count, GFP_KERNEL); |
713 | if (!buff) { | 701 | if (!buff) |
714 | ret = -ENOMEM; | 702 | return -ENOMEM; |
715 | goto _return; | ||
716 | } | ||
717 | 703 | ||
718 | ret = copy_from_user(buff, buf, count); | 704 | ret = copy_from_user(buff, buf, count); |
719 | if (ret > 0) { | 705 | if (ret > 0) { |
720 | ret = -EFAULT; | 706 | ret = -EFAULT; |
721 | goto _free; | 707 | goto _free; |
722 | } | 708 | } |
723 | |||
724 | total_written = qsfp_write(ppd, target, *ppos, buff, count); | 709 | total_written = qsfp_write(ppd, target, *ppos, buff, count); |
725 | if (total_written < 0) { | 710 | if (total_written < 0) { |
726 | ret = total_written; | 711 | ret = total_written; |
@@ -733,8 +718,6 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, | |||
733 | 718 | ||
734 | _free: | 719 | _free: |
735 | kfree(buff); | 720 | kfree(buff); |
736 | _return: | ||
737 | rcu_read_unlock(); | ||
738 | return ret; | 721 | return ret; |
739 | } | 722 | } |
740 | 723 | ||
@@ -761,7 +744,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, | |||
761 | int ret; | 744 | int ret; |
762 | int total_read; | 745 | int total_read; |
763 | 746 | ||
764 | rcu_read_lock(); | ||
765 | if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ | 747 | if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ |
766 | ret = -EINVAL; | 748 | ret = -EINVAL; |
767 | goto _return; | 749 | goto _return; |
@@ -794,7 +776,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, | |||
794 | _free: | 776 | _free: |
795 | kfree(buff); | 777 | kfree(buff); |
796 | _return: | 778 | _return: |
797 | rcu_read_unlock(); | ||
798 | return ret; | 779 | return ret; |
799 | } | 780 | } |
800 | 781 | ||
@@ -1010,7 +991,6 @@ void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd) | |||
1010 | debugfs_remove_recursive(ibd->hfi1_ibdev_dbg); | 991 | debugfs_remove_recursive(ibd->hfi1_ibdev_dbg); |
1011 | out: | 992 | out: |
1012 | ibd->hfi1_ibdev_dbg = NULL; | 993 | ibd->hfi1_ibdev_dbg = NULL; |
1013 | synchronize_rcu(); | ||
1014 | } | 994 | } |
1015 | 995 | ||
1016 | /* | 996 | /* |
@@ -1035,9 +1015,7 @@ static const char * const hfi1_statnames[] = { | |||
1035 | }; | 1015 | }; |
1036 | 1016 | ||
1037 | static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos) | 1017 | static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos) |
1038 | __acquires(RCU) | ||
1039 | { | 1018 | { |
1040 | rcu_read_lock(); | ||
1041 | if (*pos >= ARRAY_SIZE(hfi1_statnames)) | 1019 | if (*pos >= ARRAY_SIZE(hfi1_statnames)) |
1042 | return NULL; | 1020 | return NULL; |
1043 | return pos; | 1021 | return pos; |
@@ -1055,9 +1033,7 @@ static void *_driver_stats_names_seq_next( | |||
1055 | } | 1033 | } |
1056 | 1034 | ||
1057 | static void _driver_stats_names_seq_stop(struct seq_file *s, void *v) | 1035 | static void _driver_stats_names_seq_stop(struct seq_file *s, void *v) |
1058 | __releases(RCU) | ||
1059 | { | 1036 | { |
1060 | rcu_read_unlock(); | ||
1061 | } | 1037 | } |
1062 | 1038 | ||
1063 | static int _driver_stats_names_seq_show(struct seq_file *s, void *v) | 1039 | static int _driver_stats_names_seq_show(struct seq_file *s, void *v) |
@@ -1073,9 +1049,7 @@ DEBUGFS_SEQ_FILE_OPEN(driver_stats_names) | |||
1073 | DEBUGFS_FILE_OPS(driver_stats_names); | 1049 | DEBUGFS_FILE_OPS(driver_stats_names); |
1074 | 1050 | ||
1075 | static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos) | 1051 | static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos) |
1076 | __acquires(RCU) | ||
1077 | { | 1052 | { |
1078 | rcu_read_lock(); | ||
1079 | if (*pos >= ARRAY_SIZE(hfi1_statnames)) | 1053 | if (*pos >= ARRAY_SIZE(hfi1_statnames)) |
1080 | return NULL; | 1054 | return NULL; |
1081 | return pos; | 1055 | return pos; |
@@ -1090,9 +1064,7 @@ static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
1090 | } | 1064 | } |
1091 | 1065 | ||
1092 | static void _driver_stats_seq_stop(struct seq_file *s, void *v) | 1066 | static void _driver_stats_seq_stop(struct seq_file *s, void *v) |
1093 | __releases(RCU) | ||
1094 | { | 1067 | { |
1095 | rcu_read_unlock(); | ||
1096 | } | 1068 | } |
1097 | 1069 | ||
1098 | static u64 hfi1_sps_ints(void) | 1070 | static u64 hfi1_sps_ints(void) |
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index a021e660d482..325ec211370f 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
@@ -605,6 +605,7 @@ struct hfi1_pportdata { | |||
605 | struct work_struct freeze_work; | 605 | struct work_struct freeze_work; |
606 | struct work_struct link_downgrade_work; | 606 | struct work_struct link_downgrade_work; |
607 | struct work_struct link_bounce_work; | 607 | struct work_struct link_bounce_work; |
608 | struct delayed_work start_link_work; | ||
608 | /* host link state variables */ | 609 | /* host link state variables */ |
609 | struct mutex hls_lock; | 610 | struct mutex hls_lock; |
610 | u32 host_link_state; | 611 | u32 host_link_state; |
@@ -659,6 +660,7 @@ struct hfi1_pportdata { | |||
659 | u8 linkinit_reason; | 660 | u8 linkinit_reason; |
660 | u8 local_tx_rate; /* rate given to 8051 firmware */ | 661 | u8 local_tx_rate; /* rate given to 8051 firmware */ |
661 | u8 last_pstate; /* info only */ | 662 | u8 last_pstate; /* info only */ |
663 | u8 qsfp_retry_count; | ||
662 | 664 | ||
663 | /* placeholders for IB MAD packet settings */ | 665 | /* placeholders for IB MAD packet settings */ |
664 | u8 overrun_threshold; | 666 | u8 overrun_threshold; |
@@ -1804,7 +1806,7 @@ extern unsigned int hfi1_max_mtu; | |||
1804 | extern unsigned int hfi1_cu; | 1806 | extern unsigned int hfi1_cu; |
1805 | extern unsigned int user_credit_return_threshold; | 1807 | extern unsigned int user_credit_return_threshold; |
1806 | extern int num_user_contexts; | 1808 | extern int num_user_contexts; |
1807 | extern unsigned n_krcvqs; | 1809 | extern unsigned long n_krcvqs; |
1808 | extern uint krcvqs[]; | 1810 | extern uint krcvqs[]; |
1809 | extern int krcvqsset; | 1811 | extern int krcvqsset; |
1810 | extern uint kdeth_qp; | 1812 | extern uint kdeth_qp; |
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index b7935451093c..384b43d2fd49 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
@@ -94,7 +94,7 @@ module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); | |||
94 | MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); | 94 | MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); |
95 | 95 | ||
96 | /* computed based on above array */ | 96 | /* computed based on above array */ |
97 | unsigned n_krcvqs; | 97 | unsigned long n_krcvqs; |
98 | 98 | ||
99 | static unsigned hfi1_rcvarr_split = 25; | 99 | static unsigned hfi1_rcvarr_split = 25; |
100 | module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); | 100 | module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); |
@@ -500,6 +500,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, | |||
500 | INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); | 500 | INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); |
501 | INIT_WORK(&ppd->sma_message_work, handle_sma_message); | 501 | INIT_WORK(&ppd->sma_message_work, handle_sma_message); |
502 | INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); | 502 | INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); |
503 | INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); | ||
503 | INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); | 504 | INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); |
504 | INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); | 505 | INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); |
505 | 506 | ||
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 39e42c373a01..7ffc14f21523 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c | |||
@@ -2604,7 +2604,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, | |||
2604 | u8 lq, num_vls; | 2604 | u8 lq, num_vls; |
2605 | u8 res_lli, res_ler; | 2605 | u8 res_lli, res_ler; |
2606 | u64 port_mask; | 2606 | u64 port_mask; |
2607 | unsigned long port_num; | 2607 | u8 port_num; |
2608 | unsigned long vl; | 2608 | unsigned long vl; |
2609 | u32 vl_select_mask; | 2609 | u32 vl_select_mask; |
2610 | int vfi; | 2610 | int vfi; |
@@ -2638,9 +2638,9 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, | |||
2638 | */ | 2638 | */ |
2639 | port_mask = be64_to_cpu(req->port_select_mask[3]); | 2639 | port_mask = be64_to_cpu(req->port_select_mask[3]); |
2640 | port_num = find_first_bit((unsigned long *)&port_mask, | 2640 | port_num = find_first_bit((unsigned long *)&port_mask, |
2641 | sizeof(port_mask)); | 2641 | sizeof(port_mask) * 8); |
2642 | 2642 | ||
2643 | if ((u8)port_num != port) { | 2643 | if (port_num != port) { |
2644 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | 2644 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
2645 | return reply((struct ib_mad_hdr *)pmp); | 2645 | return reply((struct ib_mad_hdr *)pmp); |
2646 | } | 2646 | } |
@@ -2842,7 +2842,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, | |||
2842 | */ | 2842 | */ |
2843 | port_mask = be64_to_cpu(req->port_select_mask[3]); | 2843 | port_mask = be64_to_cpu(req->port_select_mask[3]); |
2844 | port_num = find_first_bit((unsigned long *)&port_mask, | 2844 | port_num = find_first_bit((unsigned long *)&port_mask, |
2845 | sizeof(port_mask)); | 2845 | sizeof(port_mask) * 8); |
2846 | 2846 | ||
2847 | if (port_num != port) { | 2847 | if (port_num != port) { |
2848 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | 2848 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
@@ -3015,7 +3015,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, | |||
3015 | */ | 3015 | */ |
3016 | port_mask = be64_to_cpu(req->port_select_mask[3]); | 3016 | port_mask = be64_to_cpu(req->port_select_mask[3]); |
3017 | port_num = find_first_bit((unsigned long *)&port_mask, | 3017 | port_num = find_first_bit((unsigned long *)&port_mask, |
3018 | sizeof(port_mask)); | 3018 | sizeof(port_mask) * 8); |
3019 | 3019 | ||
3020 | if (port_num != port) { | 3020 | if (port_num != port) { |
3021 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | 3021 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
@@ -3252,7 +3252,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, | |||
3252 | */ | 3252 | */ |
3253 | port_mask = be64_to_cpu(req->port_select_mask[3]); | 3253 | port_mask = be64_to_cpu(req->port_select_mask[3]); |
3254 | port_num = find_first_bit((unsigned long *)&port_mask, | 3254 | port_num = find_first_bit((unsigned long *)&port_mask, |
3255 | sizeof(port_mask)); | 3255 | sizeof(port_mask) * 8); |
3256 | 3256 | ||
3257 | if (port_num != port) { | 3257 | if (port_num != port) { |
3258 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | 3258 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c index 8c25e1b58849..3a1ef3056282 100644 --- a/drivers/infiniband/hw/hfi1/pio_copy.c +++ b/drivers/infiniband/hw/hfi1/pio_copy.c | |||
@@ -771,6 +771,9 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes) | |||
771 | read_extra_bytes(pbuf, from, to_fill); | 771 | read_extra_bytes(pbuf, from, to_fill); |
772 | from += to_fill; | 772 | from += to_fill; |
773 | nbytes -= to_fill; | 773 | nbytes -= to_fill; |
774 | /* may not be enough valid bytes left to align */ | ||
775 | if (extra > nbytes) | ||
776 | extra = nbytes; | ||
774 | 777 | ||
775 | /* ...now write carry */ | 778 | /* ...now write carry */ |
776 | dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); | 779 | dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); |
@@ -798,6 +801,15 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes) | |||
798 | read_low_bytes(pbuf, from, extra); | 801 | read_low_bytes(pbuf, from, extra); |
799 | from += extra; | 802 | from += extra; |
800 | nbytes -= extra; | 803 | nbytes -= extra; |
804 | /* | ||
805 | * If no bytes are left, return early - we are done. | ||
806 | * NOTE: This short-circuit is *required* because | ||
807 | * "extra" may have been reduced in size and "from" | ||
808 | * is not aligned, as required when leaving this | ||
809 | * if block. | ||
810 | */ | ||
811 | if (nbytes == 0) | ||
812 | return; | ||
801 | } | 813 | } |
802 | 814 | ||
803 | /* at this point, from is QW aligned */ | 815 | /* at this point, from is QW aligned */ |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 0ecf27903dc2..1694037d1eee 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
@@ -114,6 +114,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 | |||
114 | #define KDETH_HCRC_LOWER_SHIFT 24 | 114 | #define KDETH_HCRC_LOWER_SHIFT 24 |
115 | #define KDETH_HCRC_LOWER_MASK 0xff | 115 | #define KDETH_HCRC_LOWER_MASK 0xff |
116 | 116 | ||
117 | #define AHG_KDETH_INTR_SHIFT 12 | ||
118 | |||
117 | #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4) | 119 | #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4) |
118 | #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff) | 120 | #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff) |
119 | 121 | ||
@@ -1480,7 +1482,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, | |||
1480 | /* Clear KDETH.SH on last packet */ | 1482 | /* Clear KDETH.SH on last packet */ |
1481 | if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) { | 1483 | if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) { |
1482 | val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset, | 1484 | val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset, |
1483 | INTR) >> 16); | 1485 | INTR) << |
1486 | AHG_KDETH_INTR_SHIFT); | ||
1484 | val &= cpu_to_le16(~(1U << 13)); | 1487 | val &= cpu_to_le16(~(1U << 13)); |
1485 | AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); | 1488 | AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); |
1486 | } else { | 1489 | } else { |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c index 3ee0cad96bc6..0c92a40b3e86 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c | |||
@@ -265,6 +265,7 @@ void i40iw_next_iw_state(struct i40iw_qp *iwqp, | |||
265 | info.dont_send_fin = false; | 265 | info.dont_send_fin = false; |
266 | if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR)) | 266 | if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR)) |
267 | info.reset_tcp_conn = true; | 267 | info.reset_tcp_conn = true; |
268 | iwqp->hw_iwarp_state = state; | ||
268 | i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); | 269 | i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); |
269 | } | 270 | } |
270 | 271 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 0cbbe4038298..445e230d5ff8 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c | |||
@@ -100,7 +100,7 @@ static struct notifier_block i40iw_net_notifier = { | |||
100 | .notifier_call = i40iw_net_event | 100 | .notifier_call = i40iw_net_event |
101 | }; | 101 | }; |
102 | 102 | ||
103 | static int i40iw_notifiers_registered; | 103 | static atomic_t i40iw_notifiers_registered; |
104 | 104 | ||
105 | /** | 105 | /** |
106 | * i40iw_find_i40e_handler - find a handler given a client info | 106 | * i40iw_find_i40e_handler - find a handler given a client info |
@@ -1342,12 +1342,11 @@ exit: | |||
1342 | */ | 1342 | */ |
1343 | static void i40iw_register_notifiers(void) | 1343 | static void i40iw_register_notifiers(void) |
1344 | { | 1344 | { |
1345 | if (!i40iw_notifiers_registered) { | 1345 | if (atomic_inc_return(&i40iw_notifiers_registered) == 1) { |
1346 | register_inetaddr_notifier(&i40iw_inetaddr_notifier); | 1346 | register_inetaddr_notifier(&i40iw_inetaddr_notifier); |
1347 | register_inet6addr_notifier(&i40iw_inetaddr6_notifier); | 1347 | register_inet6addr_notifier(&i40iw_inetaddr6_notifier); |
1348 | register_netevent_notifier(&i40iw_net_notifier); | 1348 | register_netevent_notifier(&i40iw_net_notifier); |
1349 | } | 1349 | } |
1350 | i40iw_notifiers_registered++; | ||
1351 | } | 1350 | } |
1352 | 1351 | ||
1353 | /** | 1352 | /** |
@@ -1429,8 +1428,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del | |||
1429 | i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); | 1428 | i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); |
1430 | /* fallthrough */ | 1429 | /* fallthrough */ |
1431 | case INET_NOTIFIER: | 1430 | case INET_NOTIFIER: |
1432 | if (i40iw_notifiers_registered > 0) { | 1431 | if (!atomic_dec_return(&i40iw_notifiers_registered)) { |
1433 | i40iw_notifiers_registered--; | ||
1434 | unregister_netevent_notifier(&i40iw_net_notifier); | 1432 | unregister_netevent_notifier(&i40iw_net_notifier); |
1435 | unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); | 1433 | unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); |
1436 | unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); | 1434 | unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 006db6436e3b..5df63dacaaa3 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -687,12 +687,6 @@ repoll: | |||
687 | is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == | 687 | is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == |
688 | MLX4_CQE_OPCODE_ERROR; | 688 | MLX4_CQE_OPCODE_ERROR; |
689 | 689 | ||
690 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && | ||
691 | is_send)) { | ||
692 | pr_warn("Completion for NOP opcode detected!\n"); | ||
693 | return -EAGAIN; | ||
694 | } | ||
695 | |||
696 | /* Resize CQ in progress */ | 690 | /* Resize CQ in progress */ |
697 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { | 691 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { |
698 | if (cq->resize_buf) { | 692 | if (cq->resize_buf) { |
@@ -718,12 +712,6 @@ repoll: | |||
718 | */ | 712 | */ |
719 | mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, | 713 | mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, |
720 | be32_to_cpu(cqe->vlan_my_qpn)); | 714 | be32_to_cpu(cqe->vlan_my_qpn)); |
721 | if (unlikely(!mqp)) { | ||
722 | pr_warn("CQ %06x with entry for unknown QPN %06x\n", | ||
723 | cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); | ||
724 | return -EAGAIN; | ||
725 | } | ||
726 | |||
727 | *cur_qp = to_mibqp(mqp); | 715 | *cur_qp = to_mibqp(mqp); |
728 | } | 716 | } |
729 | 717 | ||
@@ -736,11 +724,6 @@ repoll: | |||
736 | /* SRQ is also in the radix tree */ | 724 | /* SRQ is also in the radix tree */ |
737 | msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, | 725 | msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, |
738 | srq_num); | 726 | srq_num); |
739 | if (unlikely(!msrq)) { | ||
740 | pr_warn("CQ %06x with entry for unknown SRQN %06x\n", | ||
741 | cq->mcq.cqn, srq_num); | ||
742 | return -EAGAIN; | ||
743 | } | ||
744 | } | 727 | } |
745 | 728 | ||
746 | if (is_send) { | 729 | if (is_send) { |
@@ -891,7 +874,6 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
891 | struct mlx4_ib_qp *cur_qp = NULL; | 874 | struct mlx4_ib_qp *cur_qp = NULL; |
892 | unsigned long flags; | 875 | unsigned long flags; |
893 | int npolled; | 876 | int npolled; |
894 | int err = 0; | ||
895 | struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); | 877 | struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); |
896 | 878 | ||
897 | spin_lock_irqsave(&cq->lock, flags); | 879 | spin_lock_irqsave(&cq->lock, flags); |
@@ -901,8 +883,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
901 | } | 883 | } |
902 | 884 | ||
903 | for (npolled = 0; npolled < num_entries; ++npolled) { | 885 | for (npolled = 0; npolled < num_entries; ++npolled) { |
904 | err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); | 886 | if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled)) |
905 | if (err) | ||
906 | break; | 887 | break; |
907 | } | 888 | } |
908 | 889 | ||
@@ -911,10 +892,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
911 | out: | 892 | out: |
912 | spin_unlock_irqrestore(&cq->lock, flags); | 893 | spin_unlock_irqrestore(&cq->lock, flags); |
913 | 894 | ||
914 | if (err == 0 || err == -EAGAIN) | 895 | return npolled; |
915 | return npolled; | ||
916 | else | ||
917 | return err; | ||
918 | } | 896 | } |
919 | 897 | ||
920 | int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | 898 | int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 308a358e5b46..e4fac9292e4a 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -553,12 +553,6 @@ repoll: | |||
553 | * from the table. | 553 | * from the table. |
554 | */ | 554 | */ |
555 | mqp = __mlx5_qp_lookup(dev->mdev, qpn); | 555 | mqp = __mlx5_qp_lookup(dev->mdev, qpn); |
556 | if (unlikely(!mqp)) { | ||
557 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", | ||
558 | cq->mcq.cqn, qpn); | ||
559 | return -EINVAL; | ||
560 | } | ||
561 | |||
562 | *cur_qp = to_mibqp(mqp); | 556 | *cur_qp = to_mibqp(mqp); |
563 | } | 557 | } |
564 | 558 | ||
@@ -619,13 +613,6 @@ repoll: | |||
619 | read_lock(&dev->mdev->priv.mkey_table.lock); | 613 | read_lock(&dev->mdev->priv.mkey_table.lock); |
620 | mmkey = __mlx5_mr_lookup(dev->mdev, | 614 | mmkey = __mlx5_mr_lookup(dev->mdev, |
621 | mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); | 615 | mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); |
622 | if (unlikely(!mmkey)) { | ||
623 | read_unlock(&dev->mdev->priv.mkey_table.lock); | ||
624 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n", | ||
625 | cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | |||
629 | mr = to_mibmr(mmkey); | 616 | mr = to_mibmr(mmkey); |
630 | get_sig_err_item(sig_err_cqe, &mr->sig->err_item); | 617 | get_sig_err_item(sig_err_cqe, &mr->sig->err_item); |
631 | mr->sig->sig_err_exists = true; | 618 | mr->sig->sig_err_exists = true; |
@@ -676,7 +663,6 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
676 | unsigned long flags; | 663 | unsigned long flags; |
677 | int soft_polled = 0; | 664 | int soft_polled = 0; |
678 | int npolled; | 665 | int npolled; |
679 | int err = 0; | ||
680 | 666 | ||
681 | spin_lock_irqsave(&cq->lock, flags); | 667 | spin_lock_irqsave(&cq->lock, flags); |
682 | if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { | 668 | if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { |
@@ -688,8 +674,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
688 | soft_polled = poll_soft_wc(cq, num_entries, wc); | 674 | soft_polled = poll_soft_wc(cq, num_entries, wc); |
689 | 675 | ||
690 | for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { | 676 | for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { |
691 | err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled); | 677 | if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) |
692 | if (err) | ||
693 | break; | 678 | break; |
694 | } | 679 | } |
695 | 680 | ||
@@ -698,10 +683,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
698 | out: | 683 | out: |
699 | spin_unlock_irqrestore(&cq->lock, flags); | 684 | spin_unlock_irqrestore(&cq->lock, flags); |
700 | 685 | ||
701 | if (err == 0 || err == -EAGAIN) | 686 | return soft_polled + npolled; |
702 | return soft_polled + npolled; | ||
703 | else | ||
704 | return err; | ||
705 | } | 687 | } |
706 | 688 | ||
707 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | 689 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 1b4094baa2de..8150ea372c53 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -1849,6 +1849,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, | |||
1849 | int domain) | 1849 | int domain) |
1850 | { | 1850 | { |
1851 | struct mlx5_ib_dev *dev = to_mdev(qp->device); | 1851 | struct mlx5_ib_dev *dev = to_mdev(qp->device); |
1852 | struct mlx5_ib_qp *mqp = to_mqp(qp); | ||
1852 | struct mlx5_ib_flow_handler *handler = NULL; | 1853 | struct mlx5_ib_flow_handler *handler = NULL; |
1853 | struct mlx5_flow_destination *dst = NULL; | 1854 | struct mlx5_flow_destination *dst = NULL; |
1854 | struct mlx5_ib_flow_prio *ft_prio; | 1855 | struct mlx5_ib_flow_prio *ft_prio; |
@@ -1875,7 +1876,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, | |||
1875 | } | 1876 | } |
1876 | 1877 | ||
1877 | dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; | 1878 | dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; |
1878 | dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn; | 1879 | if (mqp->flags & MLX5_IB_QP_RSS) |
1880 | dst->tir_num = mqp->rss_qp.tirn; | ||
1881 | else | ||
1882 | dst->tir_num = mqp->raw_packet_qp.rq.tirn; | ||
1879 | 1883 | ||
1880 | if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { | 1884 | if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { |
1881 | if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { | 1885 | if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { |
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 40df2cca0609..996b54e366b0 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c | |||
@@ -71,7 +71,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | |||
71 | 71 | ||
72 | addr = addr >> page_shift; | 72 | addr = addr >> page_shift; |
73 | tmp = (unsigned long)addr; | 73 | tmp = (unsigned long)addr; |
74 | m = find_first_bit(&tmp, sizeof(tmp)); | 74 | m = find_first_bit(&tmp, BITS_PER_LONG); |
75 | skip = 1 << m; | 75 | skip = 1 << m; |
76 | mask = skip - 1; | 76 | mask = skip - 1; |
77 | i = 0; | 77 | i = 0; |
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | |||
81 | for (k = 0; k < len; k++) { | 81 | for (k = 0; k < len; k++) { |
82 | if (!(i & mask)) { | 82 | if (!(i & mask)) { |
83 | tmp = (unsigned long)pfn; | 83 | tmp = (unsigned long)pfn; |
84 | m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp))); | 84 | m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG)); |
85 | skip = 1 << m; | 85 | skip = 1 << m; |
86 | mask = skip - 1; | 86 | mask = skip - 1; |
87 | base = pfn; | 87 | base = pfn; |
@@ -89,7 +89,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | |||
89 | } else { | 89 | } else { |
90 | if (base + p != pfn) { | 90 | if (base + p != pfn) { |
91 | tmp = (unsigned long)p; | 91 | tmp = (unsigned long)p; |
92 | m = find_first_bit(&tmp, sizeof(tmp)); | 92 | m = find_first_bit(&tmp, BITS_PER_LONG); |
93 | skip = 1 << m; | 93 | skip = 1 << m; |
94 | mask = skip - 1; | 94 | mask = skip - 1; |
95 | base = pfn; | 95 | base = pfn; |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 372385d0f993..95146f4aa3e3 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -402,6 +402,7 @@ enum mlx5_ib_qp_flags { | |||
402 | /* QP uses 1 as its source QP number */ | 402 | /* QP uses 1 as its source QP number */ |
403 | MLX5_IB_QP_SQPN_QP1 = 1 << 6, | 403 | MLX5_IB_QP_SQPN_QP1 = 1 << 6, |
404 | MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, | 404 | MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, |
405 | MLX5_IB_QP_RSS = 1 << 8, | ||
405 | }; | 406 | }; |
406 | 407 | ||
407 | struct mlx5_umr_wr { | 408 | struct mlx5_umr_wr { |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 0dd7d93cac95..affc3f6598ca 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -1449,6 +1449,7 @@ create_tir: | |||
1449 | kvfree(in); | 1449 | kvfree(in); |
1450 | /* qpn is reserved for that QP */ | 1450 | /* qpn is reserved for that QP */ |
1451 | qp->trans_qp.base.mqp.qpn = 0; | 1451 | qp->trans_qp.base.mqp.qpn = 0; |
1452 | qp->flags |= MLX5_IB_QP_RSS; | ||
1452 | return 0; | 1453 | return 0; |
1453 | 1454 | ||
1454 | err: | 1455 | err: |
@@ -3658,12 +3659,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | |||
3658 | struct ib_send_wr *wr, unsigned *idx, | 3659 | struct ib_send_wr *wr, unsigned *idx, |
3659 | int *size, int nreq) | 3660 | int *size, int nreq) |
3660 | { | 3661 | { |
3661 | int err = 0; | 3662 | if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) |
3662 | 3663 | return -ENOMEM; | |
3663 | if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { | ||
3664 | err = -ENOMEM; | ||
3665 | return err; | ||
3666 | } | ||
3667 | 3664 | ||
3668 | *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); | 3665 | *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); |
3669 | *seg = mlx5_get_send_wqe(qp, *idx); | 3666 | *seg = mlx5_get_send_wqe(qp, *idx); |
@@ -3679,7 +3676,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | |||
3679 | *seg += sizeof(**ctrl); | 3676 | *seg += sizeof(**ctrl); |
3680 | *size = sizeof(**ctrl) / 16; | 3677 | *size = sizeof(**ctrl) / 16; |
3681 | 3678 | ||
3682 | return err; | 3679 | return 0; |
3683 | } | 3680 | } |
3684 | 3681 | ||
3685 | static void finish_wqe(struct mlx5_ib_qp *qp, | 3682 | static void finish_wqe(struct mlx5_ib_qp *qp, |
@@ -3758,7 +3755,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3758 | num_sge = wr->num_sge; | 3755 | num_sge = wr->num_sge; |
3759 | if (unlikely(num_sge > qp->sq.max_gs)) { | 3756 | if (unlikely(num_sge > qp->sq.max_gs)) { |
3760 | mlx5_ib_warn(dev, "\n"); | 3757 | mlx5_ib_warn(dev, "\n"); |
3761 | err = -ENOMEM; | 3758 | err = -EINVAL; |
3762 | *bad_wr = wr; | 3759 | *bad_wr = wr; |
3763 | goto out; | 3760 | goto out; |
3764 | } | 3761 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 4f7d9b48df64..9dbfcc0ab577 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -478,6 +478,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
478 | struct ipoib_ah *address, u32 qpn); | 478 | struct ipoib_ah *address, u32 qpn); |
479 | void ipoib_reap_ah(struct work_struct *work); | 479 | void ipoib_reap_ah(struct work_struct *work); |
480 | 480 | ||
481 | struct ipoib_path *__path_find(struct net_device *dev, void *gid); | ||
481 | void ipoib_mark_paths_invalid(struct net_device *dev); | 482 | void ipoib_mark_paths_invalid(struct net_device *dev); |
482 | void ipoib_flush_paths(struct net_device *dev); | 483 | void ipoib_flush_paths(struct net_device *dev); |
483 | int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv); | 484 | int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 951d9abcca8b..4ad297d3de89 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -1318,6 +1318,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) | |||
1318 | } | 1318 | } |
1319 | } | 1319 | } |
1320 | 1320 | ||
1321 | #define QPN_AND_OPTIONS_OFFSET 4 | ||
1322 | |||
1321 | static void ipoib_cm_tx_start(struct work_struct *work) | 1323 | static void ipoib_cm_tx_start(struct work_struct *work) |
1322 | { | 1324 | { |
1323 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | 1325 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
@@ -1326,6 +1328,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) | |||
1326 | struct ipoib_neigh *neigh; | 1328 | struct ipoib_neigh *neigh; |
1327 | struct ipoib_cm_tx *p; | 1329 | struct ipoib_cm_tx *p; |
1328 | unsigned long flags; | 1330 | unsigned long flags; |
1331 | struct ipoib_path *path; | ||
1329 | int ret; | 1332 | int ret; |
1330 | 1333 | ||
1331 | struct ib_sa_path_rec pathrec; | 1334 | struct ib_sa_path_rec pathrec; |
@@ -1338,7 +1341,19 @@ static void ipoib_cm_tx_start(struct work_struct *work) | |||
1338 | p = list_entry(priv->cm.start_list.next, typeof(*p), list); | 1341 | p = list_entry(priv->cm.start_list.next, typeof(*p), list); |
1339 | list_del_init(&p->list); | 1342 | list_del_init(&p->list); |
1340 | neigh = p->neigh; | 1343 | neigh = p->neigh; |
1344 | |||
1341 | qpn = IPOIB_QPN(neigh->daddr); | 1345 | qpn = IPOIB_QPN(neigh->daddr); |
1346 | /* | ||
1347 | * As long as the search is with these 2 locks, | ||
1348 | * path existence indicates its validity. | ||
1349 | */ | ||
1350 | path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET); | ||
1351 | if (!path) { | ||
1352 | pr_info("%s ignore not valid path %pI6\n", | ||
1353 | __func__, | ||
1354 | neigh->daddr + QPN_AND_OPTIONS_OFFSET); | ||
1355 | goto free_neigh; | ||
1356 | } | ||
1342 | memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); | 1357 | memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); |
1343 | 1358 | ||
1344 | spin_unlock_irqrestore(&priv->lock, flags); | 1359 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -1350,6 +1365,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) | |||
1350 | spin_lock_irqsave(&priv->lock, flags); | 1365 | spin_lock_irqsave(&priv->lock, flags); |
1351 | 1366 | ||
1352 | if (ret) { | 1367 | if (ret) { |
1368 | free_neigh: | ||
1353 | neigh = p->neigh; | 1369 | neigh = p->neigh; |
1354 | if (neigh) { | 1370 | if (neigh) { |
1355 | neigh->cm = NULL; | 1371 | neigh->cm = NULL; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 74bcaa064226..cc1c1b062ea5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -485,7 +485,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) | |||
485 | return -EINVAL; | 485 | return -EINVAL; |
486 | } | 486 | } |
487 | 487 | ||
488 | static struct ipoib_path *__path_find(struct net_device *dev, void *gid) | 488 | struct ipoib_path *__path_find(struct net_device *dev, void *gid) |
489 | { | 489 | { |
490 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 490 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
491 | struct rb_node *n = priv->path_tree.rb_node; | 491 | struct rb_node *n = priv->path_tree.rb_node; |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 7914c14478cd..cae9bbcc27e7 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -403,6 +403,7 @@ isert_init_conn(struct isert_conn *isert_conn) | |||
403 | INIT_LIST_HEAD(&isert_conn->node); | 403 | INIT_LIST_HEAD(&isert_conn->node); |
404 | init_completion(&isert_conn->login_comp); | 404 | init_completion(&isert_conn->login_comp); |
405 | init_completion(&isert_conn->login_req_comp); | 405 | init_completion(&isert_conn->login_req_comp); |
406 | init_waitqueue_head(&isert_conn->rem_wait); | ||
406 | kref_init(&isert_conn->kref); | 407 | kref_init(&isert_conn->kref); |
407 | mutex_init(&isert_conn->mutex); | 408 | mutex_init(&isert_conn->mutex); |
408 | INIT_WORK(&isert_conn->release_work, isert_release_work); | 409 | INIT_WORK(&isert_conn->release_work, isert_release_work); |
@@ -578,7 +579,8 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
578 | BUG_ON(!device); | 579 | BUG_ON(!device); |
579 | 580 | ||
580 | isert_free_rx_descriptors(isert_conn); | 581 | isert_free_rx_descriptors(isert_conn); |
581 | if (isert_conn->cm_id) | 582 | if (isert_conn->cm_id && |
583 | !isert_conn->dev_removed) | ||
582 | rdma_destroy_id(isert_conn->cm_id); | 584 | rdma_destroy_id(isert_conn->cm_id); |
583 | 585 | ||
584 | if (isert_conn->qp) { | 586 | if (isert_conn->qp) { |
@@ -593,7 +595,10 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
593 | 595 | ||
594 | isert_device_put(device); | 596 | isert_device_put(device); |
595 | 597 | ||
596 | kfree(isert_conn); | 598 | if (isert_conn->dev_removed) |
599 | wake_up_interruptible(&isert_conn->rem_wait); | ||
600 | else | ||
601 | kfree(isert_conn); | ||
597 | } | 602 | } |
598 | 603 | ||
599 | static void | 604 | static void |
@@ -753,6 +758,7 @@ static int | |||
753 | isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 758 | isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
754 | { | 759 | { |
755 | struct isert_np *isert_np = cma_id->context; | 760 | struct isert_np *isert_np = cma_id->context; |
761 | struct isert_conn *isert_conn; | ||
756 | int ret = 0; | 762 | int ret = 0; |
757 | 763 | ||
758 | isert_info("%s (%d): status %d id %p np %p\n", | 764 | isert_info("%s (%d): status %d id %p np %p\n", |
@@ -773,10 +779,21 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
773 | break; | 779 | break; |
774 | case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ | 780 | case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ |
775 | case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ | 781 | case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ |
776 | case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ | ||
777 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ | 782 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ |
778 | ret = isert_disconnected_handler(cma_id, event->event); | 783 | ret = isert_disconnected_handler(cma_id, event->event); |
779 | break; | 784 | break; |
785 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | ||
786 | isert_conn = cma_id->qp->qp_context; | ||
787 | isert_conn->dev_removed = true; | ||
788 | isert_disconnected_handler(cma_id, event->event); | ||
789 | wait_event_interruptible(isert_conn->rem_wait, | ||
790 | isert_conn->state == ISER_CONN_DOWN); | ||
791 | kfree(isert_conn); | ||
792 | /* | ||
793 | * return non-zero from the callback to destroy | ||
794 | * the rdma cm id | ||
795 | */ | ||
796 | return 1; | ||
780 | case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ | 797 | case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ |
781 | case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ | 798 | case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ |
782 | case RDMA_CM_EVENT_CONNECT_ERROR: | 799 | case RDMA_CM_EVENT_CONNECT_ERROR: |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index fc791efe3a10..c02ada57d7f5 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -158,6 +158,8 @@ struct isert_conn { | |||
158 | struct work_struct release_work; | 158 | struct work_struct release_work; |
159 | bool logout_posted; | 159 | bool logout_posted; |
160 | bool snd_w_inv; | 160 | bool snd_w_inv; |
161 | wait_queue_head_t rem_wait; | ||
162 | bool dev_removed; | ||
161 | }; | 163 | }; |
162 | 164 | ||
163 | #define ISERT_MAX_CQ 64 | 165 | #define ISERT_MAX_CQ 64 |