aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkmann@redhat.com>2013-09-05 11:48:47 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-06 14:43:48 -0400
commit5ffd5cddd4d353fe18c01b89397dcad02035bb95 (patch)
tree517e4941160237ed41c440776ac2986904fbb715
parent2e032852245b3dcfe5461d7353e34eb6da095ccf (diff)
net: netlink: filter particular protocols from analyzers
Fix finer-grained control and let only a whitelist of allowed netlink protocols pass, in our case related to networking. If later on, other subsystems decide they want to add their protocol as well to the list of allowed protocols they shall simply add it. While at it, we also need to tell what protocol is in use otherwise BPF_S_ANC_PROTOCOL can not pick it up (as it's not filled out). Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/netlink/af_netlink.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a17dda1bbee0..8df7f64c6db3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -168,16 +168,43 @@ int netlink_remove_tap(struct netlink_tap *nt)
168} 168}
169EXPORT_SYMBOL_GPL(netlink_remove_tap); 169EXPORT_SYMBOL_GPL(netlink_remove_tap);
170 170
171static bool netlink_filter_tap(const struct sk_buff *skb)
172{
173 struct sock *sk = skb->sk;
174 bool pass = false;
175
176 /* We take the more conservative approach and
177 * whitelist socket protocols that may pass.
178 */
179 switch (sk->sk_protocol) {
180 case NETLINK_ROUTE:
181 case NETLINK_USERSOCK:
182 case NETLINK_SOCK_DIAG:
183 case NETLINK_NFLOG:
184 case NETLINK_XFRM:
185 case NETLINK_FIB_LOOKUP:
186 case NETLINK_NETFILTER:
187 case NETLINK_GENERIC:
188 pass = true;
189 break;
190 }
191
192 return pass;
193}
194
171static int __netlink_deliver_tap_skb(struct sk_buff *skb, 195static int __netlink_deliver_tap_skb(struct sk_buff *skb,
172 struct net_device *dev) 196 struct net_device *dev)
173{ 197{
174 struct sk_buff *nskb; 198 struct sk_buff *nskb;
199 struct sock *sk = skb->sk;
175 int ret = -ENOMEM; 200 int ret = -ENOMEM;
176 201
177 dev_hold(dev); 202 dev_hold(dev);
178 nskb = skb_clone(skb, GFP_ATOMIC); 203 nskb = skb_clone(skb, GFP_ATOMIC);
179 if (nskb) { 204 if (nskb) {
180 nskb->dev = dev; 205 nskb->dev = dev;
206 nskb->protocol = htons((u16) sk->sk_protocol);
207
181 ret = dev_queue_xmit(nskb); 208 ret = dev_queue_xmit(nskb);
182 if (unlikely(ret > 0)) 209 if (unlikely(ret > 0))
183 ret = net_xmit_errno(ret); 210 ret = net_xmit_errno(ret);
@@ -192,6 +219,9 @@ static void __netlink_deliver_tap(struct sk_buff *skb)
192 int ret; 219 int ret;
193 struct netlink_tap *tmp; 220 struct netlink_tap *tmp;
194 221
222 if (!netlink_filter_tap(skb))
223 return;
224
195 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { 225 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
196 ret = __netlink_deliver_tap_skb(skb, tmp->dev); 226 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
197 if (unlikely(ret)) 227 if (unlikely(ret))