diff options
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r-- | drivers/infiniband/core/cma.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 32 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 27 | ||||
-rw-r--r-- | drivers/infiniband/core/verbs.c | 43 |
5 files changed, 62 insertions, 50 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 944cd90417bc..d2d5d004f16d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -1126,10 +1126,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev, | |||
1126 | 1126 | ||
1127 | rcu_read_lock(); | 1127 | rcu_read_lock(); |
1128 | err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); | 1128 | err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); |
1129 | if (err) | 1129 | ret = err == 0 && FIB_RES_DEV(res) == net_dev; |
1130 | return false; | ||
1131 | |||
1132 | ret = FIB_RES_DEV(res) == net_dev; | ||
1133 | rcu_read_unlock(); | 1130 | rcu_read_unlock(); |
1134 | 1131 | ||
1135 | return ret; | 1132 | return ret; |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 8d8af7a41a30..2281de122038 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -1811,6 +1811,11 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr, | |||
1811 | if (qp_num == 0) | 1811 | if (qp_num == 0) |
1812 | valid = 1; | 1812 | valid = 1; |
1813 | } else { | 1813 | } else { |
1814 | /* CM attributes other than ClassPortInfo only use Send method */ | ||
1815 | if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && | ||
1816 | (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && | ||
1817 | (mad_hdr->method != IB_MGMT_METHOD_SEND)) | ||
1818 | goto out; | ||
1814 | /* Filter GSI packets sent to QP0 */ | 1819 | /* Filter GSI packets sent to QP0 */ |
1815 | if (qp_num != 0) | 1820 | if (qp_num != 0) |
1816 | valid = 1; | 1821 | valid = 1; |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 2aba774f835b..a95a32ba596e 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -512,7 +512,7 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) | |||
512 | return len; | 512 | return len; |
513 | } | 513 | } |
514 | 514 | ||
515 | static int ib_nl_send_msg(struct ib_sa_query *query) | 515 | static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) |
516 | { | 516 | { |
517 | struct sk_buff *skb = NULL; | 517 | struct sk_buff *skb = NULL; |
518 | struct nlmsghdr *nlh; | 518 | struct nlmsghdr *nlh; |
@@ -526,7 +526,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query) | |||
526 | if (len <= 0) | 526 | if (len <= 0) |
527 | return -EMSGSIZE; | 527 | return -EMSGSIZE; |
528 | 528 | ||
529 | skb = nlmsg_new(len, GFP_KERNEL); | 529 | skb = nlmsg_new(len, gfp_mask); |
530 | if (!skb) | 530 | if (!skb) |
531 | return -ENOMEM; | 531 | return -ENOMEM; |
532 | 532 | ||
@@ -544,7 +544,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query) | |||
544 | /* Repair the nlmsg header length */ | 544 | /* Repair the nlmsg header length */ |
545 | nlmsg_end(skb, nlh); | 545 | nlmsg_end(skb, nlh); |
546 | 546 | ||
547 | ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL); | 547 | ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask); |
548 | if (!ret) | 548 | if (!ret) |
549 | ret = len; | 549 | ret = len; |
550 | else | 550 | else |
@@ -553,7 +553,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query) | |||
553 | return ret; | 553 | return ret; |
554 | } | 554 | } |
555 | 555 | ||
556 | static int ib_nl_make_request(struct ib_sa_query *query) | 556 | static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) |
557 | { | 557 | { |
558 | unsigned long flags; | 558 | unsigned long flags; |
559 | unsigned long delay; | 559 | unsigned long delay; |
@@ -562,25 +562,27 @@ static int ib_nl_make_request(struct ib_sa_query *query) | |||
562 | INIT_LIST_HEAD(&query->list); | 562 | INIT_LIST_HEAD(&query->list); |
563 | query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); | 563 | query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); |
564 | 564 | ||
565 | /* Put the request on the list first.*/ | ||
565 | spin_lock_irqsave(&ib_nl_request_lock, flags); | 566 | spin_lock_irqsave(&ib_nl_request_lock, flags); |
566 | ret = ib_nl_send_msg(query); | ||
567 | if (ret <= 0) { | ||
568 | ret = -EIO; | ||
569 | goto request_out; | ||
570 | } else { | ||
571 | ret = 0; | ||
572 | } | ||
573 | |||
574 | delay = msecs_to_jiffies(sa_local_svc_timeout_ms); | 567 | delay = msecs_to_jiffies(sa_local_svc_timeout_ms); |
575 | query->timeout = delay + jiffies; | 568 | query->timeout = delay + jiffies; |
576 | list_add_tail(&query->list, &ib_nl_request_list); | 569 | list_add_tail(&query->list, &ib_nl_request_list); |
577 | /* Start the timeout if this is the only request */ | 570 | /* Start the timeout if this is the only request */ |
578 | if (ib_nl_request_list.next == &query->list) | 571 | if (ib_nl_request_list.next == &query->list) |
579 | queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); | 572 | queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); |
580 | |||
581 | request_out: | ||
582 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | 573 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); |
583 | 574 | ||
575 | ret = ib_nl_send_msg(query, gfp_mask); | ||
576 | if (ret <= 0) { | ||
577 | ret = -EIO; | ||
578 | /* Remove the request */ | ||
579 | spin_lock_irqsave(&ib_nl_request_lock, flags); | ||
580 | list_del(&query->list); | ||
581 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | ||
582 | } else { | ||
583 | ret = 0; | ||
584 | } | ||
585 | |||
584 | return ret; | 586 | return ret; |
585 | } | 587 | } |
586 | 588 | ||
@@ -1108,7 +1110,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) | |||
1108 | 1110 | ||
1109 | if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { | 1111 | if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { |
1110 | if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { | 1112 | if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { |
1111 | if (!ib_nl_make_request(query)) | 1113 | if (!ib_nl_make_request(query, gfp_mask)) |
1112 | return id; | 1114 | return id; |
1113 | } | 1115 | } |
1114 | ib_sa_disable_local_svc(query); | 1116 | ib_sa_disable_local_svc(query); |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 94816aeb95a0..1c02deab068f 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -62,9 +62,11 @@ static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; | |||
62 | * The ib_uobject locking scheme is as follows: | 62 | * The ib_uobject locking scheme is as follows: |
63 | * | 63 | * |
64 | * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it | 64 | * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it |
65 | * needs to be held during all idr operations. When an object is | 65 | * needs to be held during all idr write operations. When an object is |
66 | * looked up, a reference must be taken on the object's kref before | 66 | * looked up, a reference must be taken on the object's kref before |
67 | * dropping this lock. | 67 | * dropping this lock. For read operations, the rcu_read_lock() |
68 | * and rcu_write_lock() but similarly the kref reference is grabbed | ||
69 | * before the rcu_read_unlock(). | ||
68 | * | 70 | * |
69 | * - Each object also has an rwsem. This rwsem must be held for | 71 | * - Each object also has an rwsem. This rwsem must be held for |
70 | * reading while an operation that uses the object is performed. | 72 | * reading while an operation that uses the object is performed. |
@@ -96,7 +98,7 @@ static void init_uobj(struct ib_uobject *uobj, u64 user_handle, | |||
96 | 98 | ||
97 | static void release_uobj(struct kref *kref) | 99 | static void release_uobj(struct kref *kref) |
98 | { | 100 | { |
99 | kfree(container_of(kref, struct ib_uobject, ref)); | 101 | kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu); |
100 | } | 102 | } |
101 | 103 | ||
102 | static void put_uobj(struct ib_uobject *uobj) | 104 | static void put_uobj(struct ib_uobject *uobj) |
@@ -145,7 +147,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, | |||
145 | { | 147 | { |
146 | struct ib_uobject *uobj; | 148 | struct ib_uobject *uobj; |
147 | 149 | ||
148 | spin_lock(&ib_uverbs_idr_lock); | 150 | rcu_read_lock(); |
149 | uobj = idr_find(idr, id); | 151 | uobj = idr_find(idr, id); |
150 | if (uobj) { | 152 | if (uobj) { |
151 | if (uobj->context == context) | 153 | if (uobj->context == context) |
@@ -153,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, | |||
153 | else | 155 | else |
154 | uobj = NULL; | 156 | uobj = NULL; |
155 | } | 157 | } |
156 | spin_unlock(&ib_uverbs_idr_lock); | 158 | rcu_read_unlock(); |
157 | 159 | ||
158 | return uobj; | 160 | return uobj; |
159 | } | 161 | } |
@@ -2446,6 +2448,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
2446 | int i, sg_ind; | 2448 | int i, sg_ind; |
2447 | int is_ud; | 2449 | int is_ud; |
2448 | ssize_t ret = -EINVAL; | 2450 | ssize_t ret = -EINVAL; |
2451 | size_t next_size; | ||
2449 | 2452 | ||
2450 | if (copy_from_user(&cmd, buf, sizeof cmd)) | 2453 | if (copy_from_user(&cmd, buf, sizeof cmd)) |
2451 | return -EFAULT; | 2454 | return -EFAULT; |
@@ -2490,7 +2493,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
2490 | goto out_put; | 2493 | goto out_put; |
2491 | } | 2494 | } |
2492 | 2495 | ||
2493 | ud = alloc_wr(sizeof(*ud), user_wr->num_sge); | 2496 | next_size = sizeof(*ud); |
2497 | ud = alloc_wr(next_size, user_wr->num_sge); | ||
2494 | if (!ud) { | 2498 | if (!ud) { |
2495 | ret = -ENOMEM; | 2499 | ret = -ENOMEM; |
2496 | goto out_put; | 2500 | goto out_put; |
@@ -2511,7 +2515,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
2511 | user_wr->opcode == IB_WR_RDMA_READ) { | 2515 | user_wr->opcode == IB_WR_RDMA_READ) { |
2512 | struct ib_rdma_wr *rdma; | 2516 | struct ib_rdma_wr *rdma; |
2513 | 2517 | ||
2514 | rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge); | 2518 | next_size = sizeof(*rdma); |
2519 | rdma = alloc_wr(next_size, user_wr->num_sge); | ||
2515 | if (!rdma) { | 2520 | if (!rdma) { |
2516 | ret = -ENOMEM; | 2521 | ret = -ENOMEM; |
2517 | goto out_put; | 2522 | goto out_put; |
@@ -2525,7 +2530,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
2525 | user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { | 2530 | user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { |
2526 | struct ib_atomic_wr *atomic; | 2531 | struct ib_atomic_wr *atomic; |
2527 | 2532 | ||
2528 | atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge); | 2533 | next_size = sizeof(*atomic); |
2534 | atomic = alloc_wr(next_size, user_wr->num_sge); | ||
2529 | if (!atomic) { | 2535 | if (!atomic) { |
2530 | ret = -ENOMEM; | 2536 | ret = -ENOMEM; |
2531 | goto out_put; | 2537 | goto out_put; |
@@ -2540,7 +2546,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
2540 | } else if (user_wr->opcode == IB_WR_SEND || | 2546 | } else if (user_wr->opcode == IB_WR_SEND || |
2541 | user_wr->opcode == IB_WR_SEND_WITH_IMM || | 2547 | user_wr->opcode == IB_WR_SEND_WITH_IMM || |
2542 | user_wr->opcode == IB_WR_SEND_WITH_INV) { | 2548 | user_wr->opcode == IB_WR_SEND_WITH_INV) { |
2543 | next = alloc_wr(sizeof(*next), user_wr->num_sge); | 2549 | next_size = sizeof(*next); |
2550 | next = alloc_wr(next_size, user_wr->num_sge); | ||
2544 | if (!next) { | 2551 | if (!next) { |
2545 | ret = -ENOMEM; | 2552 | ret = -ENOMEM; |
2546 | goto out_put; | 2553 | goto out_put; |
@@ -2572,7 +2579,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
2572 | 2579 | ||
2573 | if (next->num_sge) { | 2580 | if (next->num_sge) { |
2574 | next->sg_list = (void *) next + | 2581 | next->sg_list = (void *) next + |
2575 | ALIGN(sizeof *next, sizeof (struct ib_sge)); | 2582 | ALIGN(next_size, sizeof(struct ib_sge)); |
2576 | if (copy_from_user(next->sg_list, | 2583 | if (copy_from_user(next->sg_list, |
2577 | buf + sizeof cmd + | 2584 | buf + sizeof cmd + |
2578 | cmd.wr_count * cmd.wqe_size + | 2585 | cmd.wr_count * cmd.wqe_size + |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 043a60ee6836..545906dec26d 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -1516,7 +1516,7 @@ EXPORT_SYMBOL(ib_map_mr_sg); | |||
1516 | * @sg_nents: number of entries in sg | 1516 | * @sg_nents: number of entries in sg |
1517 | * @set_page: driver page assignment function pointer | 1517 | * @set_page: driver page assignment function pointer |
1518 | * | 1518 | * |
1519 | * Core service helper for drivers to covert the largest | 1519 | * Core service helper for drivers to convert the largest |
1520 | * prefix of given sg list to a page vector. The sg list | 1520 | * prefix of given sg list to a page vector. The sg list |
1521 | * prefix converted is the prefix that meet the requirements | 1521 | * prefix converted is the prefix that meet the requirements |
1522 | * of ib_map_mr_sg. | 1522 | * of ib_map_mr_sg. |
@@ -1533,7 +1533,7 @@ int ib_sg_to_pages(struct ib_mr *mr, | |||
1533 | u64 last_end_dma_addr = 0, last_page_addr = 0; | 1533 | u64 last_end_dma_addr = 0, last_page_addr = 0; |
1534 | unsigned int last_page_off = 0; | 1534 | unsigned int last_page_off = 0; |
1535 | u64 page_mask = ~((u64)mr->page_size - 1); | 1535 | u64 page_mask = ~((u64)mr->page_size - 1); |
1536 | int i; | 1536 | int i, ret; |
1537 | 1537 | ||
1538 | mr->iova = sg_dma_address(&sgl[0]); | 1538 | mr->iova = sg_dma_address(&sgl[0]); |
1539 | mr->length = 0; | 1539 | mr->length = 0; |
@@ -1544,27 +1544,29 @@ int ib_sg_to_pages(struct ib_mr *mr, | |||
1544 | u64 end_dma_addr = dma_addr + dma_len; | 1544 | u64 end_dma_addr = dma_addr + dma_len; |
1545 | u64 page_addr = dma_addr & page_mask; | 1545 | u64 page_addr = dma_addr & page_mask; |
1546 | 1546 | ||
1547 | if (i && page_addr != dma_addr) { | 1547 | /* |
1548 | if (last_end_dma_addr != dma_addr) { | 1548 | * For the second and later elements, check whether either the |
1549 | /* gap */ | 1549 | * end of element i-1 or the start of element i is not aligned |
1550 | goto done; | 1550 | * on a page boundary. |
1551 | 1551 | */ | |
1552 | } else if (last_page_off + dma_len <= mr->page_size) { | 1552 | if (i && (last_page_off != 0 || page_addr != dma_addr)) { |
1553 | /* chunk this fragment with the last */ | 1553 | /* Stop mapping if there is a gap. */ |
1554 | mr->length += dma_len; | 1554 | if (last_end_dma_addr != dma_addr) |
1555 | last_end_dma_addr += dma_len; | 1555 | break; |
1556 | last_page_off += dma_len; | 1556 | |
1557 | continue; | 1557 | /* |
1558 | } else { | 1558 | * Coalesce this element with the last. If it is small |
1559 | /* map starting from the next page */ | 1559 | * enough just update mr->length. Otherwise start |
1560 | page_addr = last_page_addr + mr->page_size; | 1560 | * mapping from the next page. |
1561 | dma_len -= mr->page_size - last_page_off; | 1561 | */ |
1562 | } | 1562 | goto next_page; |
1563 | } | 1563 | } |
1564 | 1564 | ||
1565 | do { | 1565 | do { |
1566 | if (unlikely(set_page(mr, page_addr))) | 1566 | ret = set_page(mr, page_addr); |
1567 | goto done; | 1567 | if (unlikely(ret < 0)) |
1568 | return i ? : ret; | ||
1569 | next_page: | ||
1568 | page_addr += mr->page_size; | 1570 | page_addr += mr->page_size; |
1569 | } while (page_addr < end_dma_addr); | 1571 | } while (page_addr < end_dma_addr); |
1570 | 1572 | ||
@@ -1574,7 +1576,6 @@ int ib_sg_to_pages(struct ib_mr *mr, | |||
1574 | last_page_off = end_dma_addr & ~page_mask; | 1576 | last_page_off = end_dma_addr & ~page_mask; |
1575 | } | 1577 | } |
1576 | 1578 | ||
1577 | done: | ||
1578 | return i; | 1579 | return i; |
1579 | } | 1580 | } |
1580 | EXPORT_SYMBOL(ib_sg_to_pages); | 1581 | EXPORT_SYMBOL(ib_sg_to_pages); |