aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-05-17 01:26:58 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-17 01:26:58 -0400
commit6811d58fc148c393f80a9f5a9db49d7e75cdc546 (patch)
treec25d5b0e49ec848943d35f819e748d157ccb492e
parentc4949f074332a64baeb2ead6ab9319ca37642f96 (diff)
parentc02db8c6290bb992442fec1407643c94cc414375 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: include/linux/if_link.h
-rw-r--r--drivers/vhost/vhost.c7
-rw-r--r--include/linux/if_link.h23
-rw-r--r--include/net/tcp.h21
-rw-r--r--net/core/rtnetlink.c159
-rw-r--r--net/ipv4/tcp.c34
-rw-r--r--net/sctp/transport.c4
6 files changed, 166 insertions, 82 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5c9c657ab753..750effe0f98b 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1036,7 +1036,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1036/* This actually signals the guest, using eventfd. */ 1036/* This actually signals the guest, using eventfd. */
1037void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1037void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1038{ 1038{
1039 __u16 flags = 0; 1039 __u16 flags;
1040 /* Flush out used index updates. This is paired
1041 * with the barrier that the Guest executes when enabling
1042 * interrupts. */
1043 smp_mb();
1044
1040 if (get_user(flags, &vq->avail->flags)) { 1045 if (get_user(flags, &vq->avail->flags)) {
1041 vq_err(vq, "Failed to get flags"); 1046 vq_err(vq, "Failed to get flags");
1042 return; 1047 return;
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index cfd420ba72df..c3af67fce3f2 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -111,10 +111,7 @@ enum {
111 IFLA_NET_NS_PID, 111 IFLA_NET_NS_PID,
112 IFLA_IFALIAS, 112 IFLA_IFALIAS,
113 IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ 113 IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */
114 IFLA_VF_MAC, /* Hardware queue specific attributes */ 114 IFLA_VFINFO_LIST,
115 IFLA_VF_VLAN,
116 IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */
117 IFLA_VFINFO,
118 IFLA_STATS64, 115 IFLA_STATS64,
119 __IFLA_MAX 116 __IFLA_MAX
120}; 117};
@@ -236,6 +233,24 @@ enum macvlan_mode {
236 233
237/* SR-IOV virtual function managment section */ 234/* SR-IOV virtual function managment section */
238 235
236enum {
237 IFLA_VF_INFO_UNSPEC,
238 IFLA_VF_INFO,
239 __IFLA_VF_INFO_MAX,
240};
241
242#define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1)
243
244enum {
245 IFLA_VF_UNSPEC,
246 IFLA_VF_MAC, /* Hardware queue specific attributes */
247 IFLA_VF_VLAN,
248 IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */
249 __IFLA_VF_MAX,
250};
251
252#define IFLA_VF_MAX (__IFLA_VF_MAX - 1)
253
239struct ifla_vf_mac { 254struct ifla_vf_mac {
240 __u32 vf; 255 __u32 vf;
241 __u8 mac[32]; /* MAX_ADDR_LEN */ 256 __u8 mac[32]; /* MAX_ADDR_LEN */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index fb5c66b2ab81..a1449144848a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1206,30 +1206,15 @@ extern int tcp_v4_md5_do_del(struct sock *sk,
1206extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); 1206extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *);
1207extern void tcp_free_md5sig_pool(void); 1207extern void tcp_free_md5sig_pool(void);
1208 1208
1209extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); 1209extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1210extern void __tcp_put_md5sig_pool(void); 1210extern void tcp_put_md5sig_pool(void);
1211
1211extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); 1212extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1212extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, 1213extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1213 unsigned header_len); 1214 unsigned header_len);
1214extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1215extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1215 struct tcp_md5sig_key *key); 1216 struct tcp_md5sig_key *key);
1216 1217
1217static inline
1218struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
1219{
1220 int cpu = get_cpu();
1221 struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
1222 if (!ret)
1223 put_cpu();
1224 return ret;
1225}
1226
1227static inline void tcp_put_md5sig_pool(void)
1228{
1229 __tcp_put_md5sig_pool();
1230 put_cpu();
1231}
1232
1233/* write queue abstraction */ 1218/* write queue abstraction */
1234static inline void tcp_write_queue_purge(struct sock *sk) 1219static inline void tcp_write_queue_purge(struct sock *sk)
1235{ 1220{
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 23a71cb21273..66db1201da9b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -644,12 +644,19 @@ static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b)
644 memcpy(v, &a, sizeof(a)); 644 memcpy(v, &a, sizeof(a));
645} 645}
646 646
647/* All VF info */
647static inline int rtnl_vfinfo_size(const struct net_device *dev) 648static inline int rtnl_vfinfo_size(const struct net_device *dev)
648{ 649{
649 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) 650 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
650 return dev_num_vf(dev->dev.parent) * 651
651 sizeof(struct ifla_vf_info); 652 int num_vfs = dev_num_vf(dev->dev.parent);
652 else 653 size_t size = nlmsg_total_size(sizeof(struct nlattr));
654 size += nlmsg_total_size(num_vfs * sizeof(struct nlattr));
655 size += num_vfs * (sizeof(struct ifla_vf_mac) +
656 sizeof(struct ifla_vf_vlan) +
657 sizeof(struct ifla_vf_tx_rate));
658 return size;
659 } else
653 return 0; 660 return 0;
654} 661}
655 662
@@ -672,7 +679,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
672 + nla_total_size(1) /* IFLA_OPERSTATE */ 679 + nla_total_size(1) /* IFLA_OPERSTATE */
673 + nla_total_size(1) /* IFLA_LINKMODE */ 680 + nla_total_size(1) /* IFLA_LINKMODE */
674 + nla_total_size(4) /* IFLA_NUM_VF */ 681 + nla_total_size(4) /* IFLA_NUM_VF */
675 + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ 682 + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
676 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ 683 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
677} 684}
678 685
@@ -749,14 +756,37 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
749 756
750 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { 757 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
751 int i; 758 int i;
752 struct ifla_vf_info ivi;
753 759
754 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); 760 struct nlattr *vfinfo, *vf;
755 for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { 761 int num_vfs = dev_num_vf(dev->dev.parent);
762
763 NLA_PUT_U32(skb, IFLA_NUM_VF, num_vfs);
764 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
765 if (!vfinfo)
766 goto nla_put_failure;
767 for (i = 0; i < num_vfs; i++) {
768 struct ifla_vf_info ivi;
769 struct ifla_vf_mac vf_mac;
770 struct ifla_vf_vlan vf_vlan;
771 struct ifla_vf_tx_rate vf_tx_rate;
756 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) 772 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
757 break; 773 break;
758 NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); 774 vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf;
775 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
776 vf_vlan.vlan = ivi.vlan;
777 vf_vlan.qos = ivi.qos;
778 vf_tx_rate.rate = ivi.tx_rate;
779 vf = nla_nest_start(skb, IFLA_VF_INFO);
780 if (!vf) {
781 nla_nest_cancel(skb, vfinfo);
782 goto nla_put_failure;
783 }
784 NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
785 NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
786 NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate);
787 nla_nest_end(skb, vf);
759 } 788 }
789 nla_nest_end(skb, vfinfo);
760 } 790 }
761 if (dev->rtnl_link_ops) { 791 if (dev->rtnl_link_ops) {
762 if (rtnl_link_fill(skb, dev) < 0) 792 if (rtnl_link_fill(skb, dev) < 0)
@@ -818,12 +848,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
818 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 848 [IFLA_LINKINFO] = { .type = NLA_NESTED },
819 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 849 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
820 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 850 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
821 [IFLA_VF_MAC] = { .type = NLA_BINARY, 851 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
822 .len = sizeof(struct ifla_vf_mac) },
823 [IFLA_VF_VLAN] = { .type = NLA_BINARY,
824 .len = sizeof(struct ifla_vf_vlan) },
825 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
826 .len = sizeof(struct ifla_vf_tx_rate) },
827}; 852};
828EXPORT_SYMBOL(ifla_policy); 853EXPORT_SYMBOL(ifla_policy);
829 854
@@ -832,6 +857,19 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
832 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 857 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
833}; 858};
834 859
860static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
861 [IFLA_VF_INFO] = { .type = NLA_NESTED },
862};
863
864static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
865 [IFLA_VF_MAC] = { .type = NLA_BINARY,
866 .len = sizeof(struct ifla_vf_mac) },
867 [IFLA_VF_VLAN] = { .type = NLA_BINARY,
868 .len = sizeof(struct ifla_vf_vlan) },
869 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
870 .len = sizeof(struct ifla_vf_tx_rate) },
871};
872
835struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 873struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
836{ 874{
837 struct net *net; 875 struct net *net;
@@ -861,6 +899,52 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
861 return 0; 899 return 0;
862} 900}
863 901
902static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
903{
904 int rem, err = -EINVAL;
905 struct nlattr *vf;
906 const struct net_device_ops *ops = dev->netdev_ops;
907
908 nla_for_each_nested(vf, attr, rem) {
909 switch (nla_type(vf)) {
910 case IFLA_VF_MAC: {
911 struct ifla_vf_mac *ivm;
912 ivm = nla_data(vf);
913 err = -EOPNOTSUPP;
914 if (ops->ndo_set_vf_mac)
915 err = ops->ndo_set_vf_mac(dev, ivm->vf,
916 ivm->mac);
917 break;
918 }
919 case IFLA_VF_VLAN: {
920 struct ifla_vf_vlan *ivv;
921 ivv = nla_data(vf);
922 err = -EOPNOTSUPP;
923 if (ops->ndo_set_vf_vlan)
924 err = ops->ndo_set_vf_vlan(dev, ivv->vf,
925 ivv->vlan,
926 ivv->qos);
927 break;
928 }
929 case IFLA_VF_TX_RATE: {
930 struct ifla_vf_tx_rate *ivt;
931 ivt = nla_data(vf);
932 err = -EOPNOTSUPP;
933 if (ops->ndo_set_vf_tx_rate)
934 err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
935 ivt->rate);
936 break;
937 }
938 default:
939 err = -EINVAL;
940 break;
941 }
942 if (err)
943 break;
944 }
945 return err;
946}
947
864static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, 948static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
865 struct nlattr **tb, char *ifname, int modified) 949 struct nlattr **tb, char *ifname, int modified)
866{ 950{
@@ -991,40 +1075,17 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
991 write_unlock_bh(&dev_base_lock); 1075 write_unlock_bh(&dev_base_lock);
992 } 1076 }
993 1077
994 if (tb[IFLA_VF_MAC]) { 1078 if (tb[IFLA_VFINFO_LIST]) {
995 struct ifla_vf_mac *ivm; 1079 struct nlattr *attr;
996 ivm = nla_data(tb[IFLA_VF_MAC]); 1080 int rem;
997 err = -EOPNOTSUPP; 1081 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
998 if (ops->ndo_set_vf_mac) 1082 if (nla_type(attr) != IFLA_VF_INFO)
999 err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); 1083 goto errout;
1000 if (err < 0) 1084 err = do_setvfinfo(dev, attr);
1001 goto errout; 1085 if (err < 0)
1002 modified = 1; 1086 goto errout;
1003 } 1087 modified = 1;
1004 1088 }
1005 if (tb[IFLA_VF_VLAN]) {
1006 struct ifla_vf_vlan *ivv;
1007 ivv = nla_data(tb[IFLA_VF_VLAN]);
1008 err = -EOPNOTSUPP;
1009 if (ops->ndo_set_vf_vlan)
1010 err = ops->ndo_set_vf_vlan(dev, ivv->vf,
1011 ivv->vlan,
1012 ivv->qos);
1013 if (err < 0)
1014 goto errout;
1015 modified = 1;
1016 }
1017 err = 0;
1018
1019 if (tb[IFLA_VF_TX_RATE]) {
1020 struct ifla_vf_tx_rate *ivt;
1021 ivt = nla_data(tb[IFLA_VF_TX_RATE]);
1022 err = -EOPNOTSUPP;
1023 if (ops->ndo_set_vf_tx_rate)
1024 err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate);
1025 if (err < 0)
1026 goto errout;
1027 modified = 1;
1028 } 1089 }
1029 err = 0; 1090 err = 0;
1030 1091
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8ce29747ad9b..3284393d09b4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2840,7 +2840,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
2840 if (p->md5_desc.tfm) 2840 if (p->md5_desc.tfm)
2841 crypto_free_hash(p->md5_desc.tfm); 2841 crypto_free_hash(p->md5_desc.tfm);
2842 kfree(p); 2842 kfree(p);
2843 p = NULL;
2844 } 2843 }
2845 } 2844 }
2846 free_percpu(pool); 2845 free_percpu(pool);
@@ -2938,25 +2937,40 @@ retry:
2938 2937
2939EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 2938EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2940 2939
2941struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) 2940
2941/**
2942 * tcp_get_md5sig_pool - get md5sig_pool for this user
2943 *
2944 * We use percpu structure, so if we succeed, we exit with preemption
2945 * and BH disabled, to make sure another thread or softirq handling
2946 * wont try to get same context.
2947 */
2948struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2942{ 2949{
2943 struct tcp_md5sig_pool * __percpu *p; 2950 struct tcp_md5sig_pool * __percpu *p;
2944 spin_lock_bh(&tcp_md5sig_pool_lock); 2951
2952 local_bh_disable();
2953
2954 spin_lock(&tcp_md5sig_pool_lock);
2945 p = tcp_md5sig_pool; 2955 p = tcp_md5sig_pool;
2946 if (p) 2956 if (p)
2947 tcp_md5sig_users++; 2957 tcp_md5sig_users++;
2948 spin_unlock_bh(&tcp_md5sig_pool_lock); 2958 spin_unlock(&tcp_md5sig_pool_lock);
2949 return (p ? *per_cpu_ptr(p, cpu) : NULL); 2959
2950} 2960 if (p)
2961 return *per_cpu_ptr(p, smp_processor_id());
2951 2962
2952EXPORT_SYMBOL(__tcp_get_md5sig_pool); 2963 local_bh_enable();
2964 return NULL;
2965}
2966EXPORT_SYMBOL(tcp_get_md5sig_pool);
2953 2967
2954void __tcp_put_md5sig_pool(void) 2968void tcp_put_md5sig_pool(void)
2955{ 2969{
2970 local_bh_enable();
2956 tcp_free_md5sig_pool(); 2971 tcp_free_md5sig_pool();
2957} 2972}
2958 2973EXPORT_SYMBOL(tcp_put_md5sig_pool);
2959EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2960 2974
2961int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, 2975int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
2962 struct tcphdr *th) 2976 struct tcphdr *th)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index d67501f92ca3..132046cb82fc 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -148,6 +148,10 @@ void sctp_transport_free(struct sctp_transport *transport)
148 del_timer(&transport->T3_rtx_timer)) 148 del_timer(&transport->T3_rtx_timer))
149 sctp_transport_put(transport); 149 sctp_transport_put(transport);
150 150
151 /* Delete the ICMP proto unreachable timer if it's active. */
152 if (timer_pending(&transport->proto_unreach_timer) &&
153 del_timer(&transport->proto_unreach_timer))
154 sctp_association_put(transport->asoc);
151 155
152 sctp_transport_put(transport); 156 sctp_transport_put(transport);
153} 157}