aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/netlink.c
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@mellanox.com>2017-06-08 02:05:12 -0400
committerLeon Romanovsky <leon@kernel.org>2017-08-10 06:15:42 -0400
commit3c3e75d5ff75f9a076cac254fd32476ca80fdffc (patch)
tree357ac5b8943cda31ce53e0d29e576dc4029ec4f4 /drivers/infiniband/core/netlink.c
parent64401b69b29164c5731018cc44fc9b144ac9c5ae (diff)
RDMA/netlink: Avoid double pass for RDMA netlink messages
The standard netlink_rcv_skb function skips messages without NLM_F_REQUEST flag in it, while SA netlink client issues them. In commit bc10ed7d3d19 ("IB/core: Add rdma netlink helper functions") the local function was introduced to allow such messages. This led to double pass for every incoming message. In this patch, we unify that local implementation and netlink_rcv_skb functions, so there will be no need for double pass anymore. As a outcome, this combined function gained more strict check for NLM_F_REQUEST flag and it is now allowed for SA pathquery client only. Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Diffstat (limited to 'drivers/infiniband/core/netlink.c')
-rw-r--r--drivers/infiniband/core/netlink.c62
1 files changed, 44 insertions, 18 deletions
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index cd9b7e7b7d2c..826fbd612c7d 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -159,8 +159,8 @@ nla_put_failure:
159} 159}
160EXPORT_SYMBOL(ibnl_put_attr); 160EXPORT_SYMBOL(ibnl_put_attr);
161 161
162static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 162static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
163 struct netlink_ext_ack *extack) 163 struct netlink_ext_ack *extack)
164{ 164{
165 int type = nlh->nlmsg_type; 165 int type = nlh->nlmsg_type;
166 unsigned int index = RDMA_NL_GET_CLIENT(type); 166 unsigned int index = RDMA_NL_GET_CLIENT(type);
@@ -187,40 +187,66 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
187 return netlink_dump_start(nls, skb, nlh, &c); 187 return netlink_dump_start(nls, skb, nlh, &c);
188} 188}
189 189
190static void ibnl_rcv_reply_skb(struct sk_buff *skb) 190/*
191 * This function is similar to netlink_rcv_skb with one exception:
192 * It calls to the callback for the netlink messages without NLM_F_REQUEST
193 * flag. These messages are intended for RDMA_NL_LS consumer, so it is allowed
194 * for that consumer only.
195 */
196static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
197 struct nlmsghdr *,
198 struct netlink_ext_ack *))
191{ 199{
200 struct netlink_ext_ack extack = {};
192 struct nlmsghdr *nlh; 201 struct nlmsghdr *nlh;
193 int msglen; 202 int err;
194 203
195 /*
196 * Process responses until there is no more message or the first
197 * request. Generally speaking, it is not recommended to mix responses
198 * with requests.
199 */
200 while (skb->len >= nlmsg_total_size(0)) { 204 while (skb->len >= nlmsg_total_size(0)) {
205 int msglen;
206
201 nlh = nlmsg_hdr(skb); 207 nlh = nlmsg_hdr(skb);
208 err = 0;
202 209
203 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) 210 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
204 return; 211 return 0;
205 212
206 /* Handle response only */ 213 /*
207 if (nlh->nlmsg_flags & NLM_F_REQUEST) 214 * Generally speaking, the only requests are handled
208 return; 215 * by the kernel, but RDMA_NL_LS is different, because it
216 * runs backward netlink scheme. Kernel initiates messages
217 * and waits for reply with data to keep pathrecord cache
218 * in sync.
219 */
220 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) &&
221 (RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS))
222 goto ack;
223
224 /* Skip control messages */
225 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
226 goto ack;
209 227
210 ibnl_rcv_msg(skb, nlh, NULL); 228 err = cb(skb, nlh, &extack);
229 if (err == -EINTR)
230 goto skip;
211 231
232ack:
233 if (nlh->nlmsg_flags & NLM_F_ACK || err)
234 netlink_ack(skb, nlh, err, &extack);
235
236skip:
212 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 237 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
213 if (msglen > skb->len) 238 if (msglen > skb->len)
214 msglen = skb->len; 239 msglen = skb->len;
215 skb_pull(skb, msglen); 240 skb_pull(skb, msglen);
216 } 241 }
242
243 return 0;
217} 244}
218 245
219static void ibnl_rcv(struct sk_buff *skb) 246static void rdma_nl_rcv(struct sk_buff *skb)
220{ 247{
221 mutex_lock(&rdma_nl_mutex); 248 mutex_lock(&rdma_nl_mutex);
222 ibnl_rcv_reply_skb(skb); 249 rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
223 netlink_rcv_skb(skb, &ibnl_rcv_msg);
224 mutex_unlock(&rdma_nl_mutex); 250 mutex_unlock(&rdma_nl_mutex);
225} 251}
226 252
@@ -254,7 +280,7 @@ EXPORT_SYMBOL(ibnl_multicast);
254int __init rdma_nl_init(void) 280int __init rdma_nl_init(void)
255{ 281{
256 struct netlink_kernel_cfg cfg = { 282 struct netlink_kernel_cfg cfg = {
257 .input = ibnl_rcv, 283 .input = rdma_nl_rcv,
258 }; 284 };
259 285
260 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg); 286 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);