summaryrefslogtreecommitdiffstats
path: root/net/openvswitch/conntrack.c
diff options
context:
space:
mode:
authorYi-Hung Wei <yihung.wei@gmail.com>2018-05-24 20:56:43 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-25 16:45:19 -0400
commit11efd5cb04a184eea4f57b68ea63dddd463158d1 (patch)
treed817b69c0a3bee567c87e78ad14b122b8e63fbe1 /net/openvswitch/conntrack.c
parent5972be6b2495c6bffbf444497517fd1c070eef78 (diff)
openvswitch: Support conntrack zone limit
Currently, nf_conntrack_max is used to limit the maximum number of conntrack entries in the conntrack table for every network namespace. For the VMs and containers that reside in the same namespace, they share the same conntrack table, and the total # of conntrack entries for all the VMs and containers are limited by nf_conntrack_max. In this case, if one of the VM/container abuses the usage the conntrack entries, it blocks the others from committing valid conntrack entries into the conntrack table. Even if we can possibly put the VM in different network namespace, the current nf_conntrack_max configuration is kind of rigid that we cannot limit different VM/container to have different # conntrack entries. To address the aforementioned issue, this patch proposes to have a fine-grained mechanism that could further limit the # of conntrack entries per-zone. For example, we can designate different zone to different VM, and set conntrack limit to each zone. By providing this isolation, a mis-behaved VM only consumes the conntrack entries in its own zone, and it will not influence other well-behaved VMs. Moreover, the users can set various conntrack limit to different zone based on their preference. The proposed implementation utilizes Netfilter's nf_conncount backend to count the number of connections in a particular zone. If the number of connection is above a configured limitation, ovs will return ENOMEM to the userspace. If userspace does not configure the zone limit, the limit defaults to zero that is no limitation, which is backward compatible to the behavior without this patch. The following high leve APIs are provided to the userspace: - OVS_CT_LIMIT_CMD_SET: * set default connection limit for all zones * set the connection limit for a particular zone - OVS_CT_LIMIT_CMD_DEL: * remove the connection limit for a particular zone - OVS_CT_LIMIT_CMD_GET: * get the default connection limit for all zones * get the connection limit for a particular zone Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com> Acked-by: Pravin B Shelar <pshelar@ovn.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/openvswitch/conntrack.c')
-rw-r--r--net/openvswitch/conntrack.c551
1 files changed, 550 insertions, 1 deletions
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 02fc343feb66..284aca2a252d 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -16,8 +16,11 @@
16#include <linux/tcp.h> 16#include <linux/tcp.h>
17#include <linux/udp.h> 17#include <linux/udp.h>
18#include <linux/sctp.h> 18#include <linux/sctp.h>
19#include <linux/static_key.h>
19#include <net/ip.h> 20#include <net/ip.h>
21#include <net/genetlink.h>
20#include <net/netfilter/nf_conntrack_core.h> 22#include <net/netfilter/nf_conntrack_core.h>
23#include <net/netfilter/nf_conntrack_count.h>
21#include <net/netfilter/nf_conntrack_helper.h> 24#include <net/netfilter/nf_conntrack_helper.h>
22#include <net/netfilter/nf_conntrack_labels.h> 25#include <net/netfilter/nf_conntrack_labels.h>
23#include <net/netfilter/nf_conntrack_seqadj.h> 26#include <net/netfilter/nf_conntrack_seqadj.h>
@@ -76,6 +79,31 @@ struct ovs_conntrack_info {
76#endif 79#endif
77}; 80};
78 81
82#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
83#define OVS_CT_LIMIT_UNLIMITED 0
84#define OVS_CT_LIMIT_DEFAULT OVS_CT_LIMIT_UNLIMITED
85#define CT_LIMIT_HASH_BUCKETS 512
86static DEFINE_STATIC_KEY_FALSE(ovs_ct_limit_enabled);
87
88struct ovs_ct_limit {
89 /* Elements in ovs_ct_limit_info->limits hash table */
90 struct hlist_node hlist_node;
91 struct rcu_head rcu;
92 u16 zone;
93 u32 limit;
94};
95
96struct ovs_ct_limit_info {
97 u32 default_limit;
98 struct hlist_head *limits;
99 struct nf_conncount_data *data;
100};
101
102static const struct nla_policy ct_limit_policy[OVS_CT_LIMIT_ATTR_MAX + 1] = {
103 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NLA_NESTED, },
104};
105#endif
106
79static bool labels_nonzero(const struct ovs_key_ct_labels *labels); 107static bool labels_nonzero(const struct ovs_key_ct_labels *labels);
80 108
81static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info); 109static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
@@ -1036,6 +1064,89 @@ static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
1036 return false; 1064 return false;
1037} 1065}
1038 1066
1067#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1068static struct hlist_head *ct_limit_hash_bucket(
1069 const struct ovs_ct_limit_info *info, u16 zone)
1070{
1071 return &info->limits[zone & (CT_LIMIT_HASH_BUCKETS - 1)];
1072}
1073
1074/* Call with ovs_mutex */
1075static void ct_limit_set(const struct ovs_ct_limit_info *info,
1076 struct ovs_ct_limit *new_ct_limit)
1077{
1078 struct ovs_ct_limit *ct_limit;
1079 struct hlist_head *head;
1080
1081 head = ct_limit_hash_bucket(info, new_ct_limit->zone);
1082 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1083 if (ct_limit->zone == new_ct_limit->zone) {
1084 hlist_replace_rcu(&ct_limit->hlist_node,
1085 &new_ct_limit->hlist_node);
1086 kfree_rcu(ct_limit, rcu);
1087 return;
1088 }
1089 }
1090
1091 hlist_add_head_rcu(&new_ct_limit->hlist_node, head);
1092}
1093
1094/* Call with ovs_mutex */
1095static void ct_limit_del(const struct ovs_ct_limit_info *info, u16 zone)
1096{
1097 struct ovs_ct_limit *ct_limit;
1098 struct hlist_head *head;
1099 struct hlist_node *n;
1100
1101 head = ct_limit_hash_bucket(info, zone);
1102 hlist_for_each_entry_safe(ct_limit, n, head, hlist_node) {
1103 if (ct_limit->zone == zone) {
1104 hlist_del_rcu(&ct_limit->hlist_node);
1105 kfree_rcu(ct_limit, rcu);
1106 return;
1107 }
1108 }
1109}
1110
1111/* Call with RCU read lock */
1112static u32 ct_limit_get(const struct ovs_ct_limit_info *info, u16 zone)
1113{
1114 struct ovs_ct_limit *ct_limit;
1115 struct hlist_head *head;
1116
1117 head = ct_limit_hash_bucket(info, zone);
1118 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1119 if (ct_limit->zone == zone)
1120 return ct_limit->limit;
1121 }
1122
1123 return info->default_limit;
1124}
1125
1126static int ovs_ct_check_limit(struct net *net,
1127 const struct ovs_conntrack_info *info,
1128 const struct nf_conntrack_tuple *tuple)
1129{
1130 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1131 const struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
1132 u32 per_zone_limit, connections;
1133 u32 conncount_key;
1134
1135 conncount_key = info->zone.id;
1136
1137 per_zone_limit = ct_limit_get(ct_limit_info, info->zone.id);
1138 if (per_zone_limit == OVS_CT_LIMIT_UNLIMITED)
1139 return 0;
1140
1141 connections = nf_conncount_count(net, ct_limit_info->data,
1142 &conncount_key, tuple, &info->zone);
1143 if (connections > per_zone_limit)
1144 return -ENOMEM;
1145
1146 return 0;
1147}
1148#endif
1149
1039/* Lookup connection and confirm if unconfirmed. */ 1150/* Lookup connection and confirm if unconfirmed. */
1040static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, 1151static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
1041 const struct ovs_conntrack_info *info, 1152 const struct ovs_conntrack_info *info,
@@ -1054,6 +1165,21 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
1054 if (!ct) 1165 if (!ct)
1055 return 0; 1166 return 0;
1056 1167
1168#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1169 if (static_branch_unlikely(&ovs_ct_limit_enabled)) {
1170 if (!nf_ct_is_confirmed(ct)) {
1171 err = ovs_ct_check_limit(net, info,
1172 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1173 if (err) {
1174 net_warn_ratelimited("openvswitch: zone: %u "
1175 "execeeds conntrack limit\n",
1176 info->zone.id);
1177 return err;
1178 }
1179 }
1180 }
1181#endif
1182
1057 /* Set the conntrack event mask if given. NEW and DELETE events have 1183 /* Set the conntrack event mask if given. NEW and DELETE events have
1058 * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener 1184 * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener
1059 * typically would receive many kinds of updates. Setting the event 1185 * typically would receive many kinds of updates. Setting the event
@@ -1655,7 +1781,420 @@ static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
1655 nf_ct_tmpl_free(ct_info->ct); 1781 nf_ct_tmpl_free(ct_info->ct);
1656} 1782}
1657 1783
1658void ovs_ct_init(struct net *net) 1784#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1785static int ovs_ct_limit_init(struct net *net, struct ovs_net *ovs_net)
1786{
1787 int i, err;
1788
1789 ovs_net->ct_limit_info = kmalloc(sizeof(*ovs_net->ct_limit_info),
1790 GFP_KERNEL);
1791 if (!ovs_net->ct_limit_info)
1792 return -ENOMEM;
1793
1794 ovs_net->ct_limit_info->default_limit = OVS_CT_LIMIT_DEFAULT;
1795 ovs_net->ct_limit_info->limits =
1796 kmalloc_array(CT_LIMIT_HASH_BUCKETS, sizeof(struct hlist_head),
1797 GFP_KERNEL);
1798 if (!ovs_net->ct_limit_info->limits) {
1799 kfree(ovs_net->ct_limit_info);
1800 return -ENOMEM;
1801 }
1802
1803 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; i++)
1804 INIT_HLIST_HEAD(&ovs_net->ct_limit_info->limits[i]);
1805
1806 ovs_net->ct_limit_info->data =
1807 nf_conncount_init(net, NFPROTO_INET, sizeof(u32));
1808
1809 if (IS_ERR(ovs_net->ct_limit_info->data)) {
1810 err = PTR_ERR(ovs_net->ct_limit_info->data);
1811 kfree(ovs_net->ct_limit_info->limits);
1812 kfree(ovs_net->ct_limit_info);
1813 pr_err("openvswitch: failed to init nf_conncount %d\n", err);
1814 return err;
1815 }
1816 return 0;
1817}
1818
1819static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
1820{
1821 const struct ovs_ct_limit_info *info = ovs_net->ct_limit_info;
1822 int i;
1823
1824 nf_conncount_destroy(net, NFPROTO_INET, info->data);
1825 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
1826 struct hlist_head *head = &info->limits[i];
1827 struct ovs_ct_limit *ct_limit;
1828
1829 hlist_for_each_entry_rcu(ct_limit, head, hlist_node)
1830 kfree_rcu(ct_limit, rcu);
1831 }
1832 kfree(ovs_net->ct_limit_info->limits);
1833 kfree(ovs_net->ct_limit_info);
1834}
1835
1836static struct sk_buff *
1837ovs_ct_limit_cmd_reply_start(struct genl_info *info, u8 cmd,
1838 struct ovs_header **ovs_reply_header)
1839{
1840 struct ovs_header *ovs_header = info->userhdr;
1841 struct sk_buff *skb;
1842
1843 skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1844 if (!skb)
1845 return ERR_PTR(-ENOMEM);
1846
1847 *ovs_reply_header = genlmsg_put(skb, info->snd_portid,
1848 info->snd_seq,
1849 &dp_ct_limit_genl_family, 0, cmd);
1850
1851 if (!*ovs_reply_header) {
1852 nlmsg_free(skb);
1853 return ERR_PTR(-EMSGSIZE);
1854 }
1855 (*ovs_reply_header)->dp_ifindex = ovs_header->dp_ifindex;
1856
1857 return skb;
1858}
1859
1860static bool check_zone_id(int zone_id, u16 *pzone)
1861{
1862 if (zone_id >= 0 && zone_id <= 65535) {
1863 *pzone = (u16)zone_id;
1864 return true;
1865 }
1866 return false;
1867}
1868
1869static int ovs_ct_limit_set_zone_limit(struct nlattr *nla_zone_limit,
1870 struct ovs_ct_limit_info *info)
1871{
1872 struct ovs_zone_limit *zone_limit;
1873 int rem;
1874 u16 zone;
1875
1876 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1877 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1878
1879 while (rem >= sizeof(*zone_limit)) {
1880 if (unlikely(zone_limit->zone_id ==
1881 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1882 ovs_lock();
1883 info->default_limit = zone_limit->limit;
1884 ovs_unlock();
1885 } else if (unlikely(!check_zone_id(
1886 zone_limit->zone_id, &zone))) {
1887 OVS_NLERR(true, "zone id is out of range");
1888 } else {
1889 struct ovs_ct_limit *ct_limit;
1890
1891 ct_limit = kmalloc(sizeof(*ct_limit), GFP_KERNEL);
1892 if (!ct_limit)
1893 return -ENOMEM;
1894
1895 ct_limit->zone = zone;
1896 ct_limit->limit = zone_limit->limit;
1897
1898 ovs_lock();
1899 ct_limit_set(info, ct_limit);
1900 ovs_unlock();
1901 }
1902 rem -= NLA_ALIGN(sizeof(*zone_limit));
1903 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
1904 NLA_ALIGN(sizeof(*zone_limit)));
1905 }
1906
1907 if (rem)
1908 OVS_NLERR(true, "set zone limit has %d unknown bytes", rem);
1909
1910 return 0;
1911}
1912
1913static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
1914 struct ovs_ct_limit_info *info)
1915{
1916 struct ovs_zone_limit *zone_limit;
1917 int rem;
1918 u16 zone;
1919
1920 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1921 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1922
1923 while (rem >= sizeof(*zone_limit)) {
1924 if (unlikely(zone_limit->zone_id ==
1925 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1926 ovs_lock();
1927 info->default_limit = OVS_CT_LIMIT_DEFAULT;
1928 ovs_unlock();
1929 } else if (unlikely(!check_zone_id(
1930 zone_limit->zone_id, &zone))) {
1931 OVS_NLERR(true, "zone id is out of range");
1932 } else {
1933 ovs_lock();
1934 ct_limit_del(info, zone);
1935 ovs_unlock();
1936 }
1937 rem -= NLA_ALIGN(sizeof(*zone_limit));
1938 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
1939 NLA_ALIGN(sizeof(*zone_limit)));
1940 }
1941
1942 if (rem)
1943 OVS_NLERR(true, "del zone limit has %d unknown bytes", rem);
1944
1945 return 0;
1946}
1947
1948static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
1949 struct sk_buff *reply)
1950{
1951 struct ovs_zone_limit zone_limit;
1952 int err;
1953
1954 zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
1955 zone_limit.limit = info->default_limit;
1956 err = nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
1957 if (err)
1958 return err;
1959
1960 return 0;
1961}
1962
1963static int __ovs_ct_limit_get_zone_limit(struct net *net,
1964 struct nf_conncount_data *data,
1965 u16 zone_id, u32 limit,
1966 struct sk_buff *reply)
1967{
1968 struct nf_conntrack_zone ct_zone;
1969 struct ovs_zone_limit zone_limit;
1970 u32 conncount_key = zone_id;
1971
1972 zone_limit.zone_id = zone_id;
1973 zone_limit.limit = limit;
1974 nf_ct_zone_init(&ct_zone, zone_id, NF_CT_DEFAULT_ZONE_DIR, 0);
1975
1976 zone_limit.count = nf_conncount_count(net, data, &conncount_key, NULL,
1977 &ct_zone);
1978 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
1979}
1980
1981static int ovs_ct_limit_get_zone_limit(struct net *net,
1982 struct nlattr *nla_zone_limit,
1983 struct ovs_ct_limit_info *info,
1984 struct sk_buff *reply)
1985{
1986 struct ovs_zone_limit *zone_limit;
1987 int rem, err;
1988 u32 limit;
1989 u16 zone;
1990
1991 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1992 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1993
1994 while (rem >= sizeof(*zone_limit)) {
1995 if (unlikely(zone_limit->zone_id ==
1996 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1997 err = ovs_ct_limit_get_default_limit(info, reply);
1998 if (err)
1999 return err;
2000 } else if (unlikely(!check_zone_id(zone_limit->zone_id,
2001 &zone))) {
2002 OVS_NLERR(true, "zone id is out of range");
2003 } else {
2004 rcu_read_lock();
2005 limit = ct_limit_get(info, zone);
2006 rcu_read_unlock();
2007
2008 err = __ovs_ct_limit_get_zone_limit(
2009 net, info->data, zone, limit, reply);
2010 if (err)
2011 return err;
2012 }
2013 rem -= NLA_ALIGN(sizeof(*zone_limit));
2014 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
2015 NLA_ALIGN(sizeof(*zone_limit)));
2016 }
2017
2018 if (rem)
2019 OVS_NLERR(true, "get zone limit has %d unknown bytes", rem);
2020
2021 return 0;
2022}
2023
2024static int ovs_ct_limit_get_all_zone_limit(struct net *net,
2025 struct ovs_ct_limit_info *info,
2026 struct sk_buff *reply)
2027{
2028 struct ovs_ct_limit *ct_limit;
2029 struct hlist_head *head;
2030 int i, err = 0;
2031
2032 err = ovs_ct_limit_get_default_limit(info, reply);
2033 if (err)
2034 return err;
2035
2036 rcu_read_lock();
2037 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
2038 head = &info->limits[i];
2039 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
2040 err = __ovs_ct_limit_get_zone_limit(net, info->data,
2041 ct_limit->zone, ct_limit->limit, reply);
2042 if (err)
2043 goto exit_err;
2044 }
2045 }
2046
2047exit_err:
2048 rcu_read_unlock();
2049 return err;
2050}
2051
2052static int ovs_ct_limit_cmd_set(struct sk_buff *skb, struct genl_info *info)
2053{
2054 struct nlattr **a = info->attrs;
2055 struct sk_buff *reply;
2056 struct ovs_header *ovs_reply_header;
2057 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2058 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2059 int err;
2060
2061 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_SET,
2062 &ovs_reply_header);
2063 if (IS_ERR(reply))
2064 return PTR_ERR(reply);
2065
2066 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2067 err = -EINVAL;
2068 goto exit_err;
2069 }
2070
2071 err = ovs_ct_limit_set_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
2072 ct_limit_info);
2073 if (err)
2074 goto exit_err;
2075
2076 static_branch_enable(&ovs_ct_limit_enabled);
2077
2078 genlmsg_end(reply, ovs_reply_header);
2079 return genlmsg_reply(reply, info);
2080
2081exit_err:
2082 nlmsg_free(reply);
2083 return err;
2084}
2085
2086static int ovs_ct_limit_cmd_del(struct sk_buff *skb, struct genl_info *info)
2087{
2088 struct nlattr **a = info->attrs;
2089 struct sk_buff *reply;
2090 struct ovs_header *ovs_reply_header;
2091 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2092 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2093 int err;
2094
2095 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_DEL,
2096 &ovs_reply_header);
2097 if (IS_ERR(reply))
2098 return PTR_ERR(reply);
2099
2100 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2101 err = -EINVAL;
2102 goto exit_err;
2103 }
2104
2105 err = ovs_ct_limit_del_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
2106 ct_limit_info);
2107 if (err)
2108 goto exit_err;
2109
2110 genlmsg_end(reply, ovs_reply_header);
2111 return genlmsg_reply(reply, info);
2112
2113exit_err:
2114 nlmsg_free(reply);
2115 return err;
2116}
2117
2118static int ovs_ct_limit_cmd_get(struct sk_buff *skb, struct genl_info *info)
2119{
2120 struct nlattr **a = info->attrs;
2121 struct nlattr *nla_reply;
2122 struct sk_buff *reply;
2123 struct ovs_header *ovs_reply_header;
2124 struct net *net = sock_net(skb->sk);
2125 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2126 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2127 int err;
2128
2129 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_GET,
2130 &ovs_reply_header);
2131 if (IS_ERR(reply))
2132 return PTR_ERR(reply);
2133
2134 nla_reply = nla_nest_start(reply, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
2135
2136 if (a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2137 err = ovs_ct_limit_get_zone_limit(
2138 net, a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT], ct_limit_info,
2139 reply);
2140 if (err)
2141 goto exit_err;
2142 } else {
2143 err = ovs_ct_limit_get_all_zone_limit(net, ct_limit_info,
2144 reply);
2145 if (err)
2146 goto exit_err;
2147 }
2148
2149 nla_nest_end(reply, nla_reply);
2150 genlmsg_end(reply, ovs_reply_header);
2151 return genlmsg_reply(reply, info);
2152
2153exit_err:
2154 nlmsg_free(reply);
2155 return err;
2156}
2157
2158static struct genl_ops ct_limit_genl_ops[] = {
2159 { .cmd = OVS_CT_LIMIT_CMD_SET,
2160 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2161 * privilege. */
2162 .policy = ct_limit_policy,
2163 .doit = ovs_ct_limit_cmd_set,
2164 },
2165 { .cmd = OVS_CT_LIMIT_CMD_DEL,
2166 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2167 * privilege. */
2168 .policy = ct_limit_policy,
2169 .doit = ovs_ct_limit_cmd_del,
2170 },
2171 { .cmd = OVS_CT_LIMIT_CMD_GET,
2172 .flags = 0, /* OK for unprivileged users. */
2173 .policy = ct_limit_policy,
2174 .doit = ovs_ct_limit_cmd_get,
2175 },
2176};
2177
2178static const struct genl_multicast_group ovs_ct_limit_multicast_group = {
2179 .name = OVS_CT_LIMIT_MCGROUP,
2180};
2181
2182struct genl_family dp_ct_limit_genl_family __ro_after_init = {
2183 .hdrsize = sizeof(struct ovs_header),
2184 .name = OVS_CT_LIMIT_FAMILY,
2185 .version = OVS_CT_LIMIT_VERSION,
2186 .maxattr = OVS_CT_LIMIT_ATTR_MAX,
2187 .netnsok = true,
2188 .parallel_ops = true,
2189 .ops = ct_limit_genl_ops,
2190 .n_ops = ARRAY_SIZE(ct_limit_genl_ops),
2191 .mcgrps = &ovs_ct_limit_multicast_group,
2192 .n_mcgrps = 1,
2193 .module = THIS_MODULE,
2194};
2195#endif
2196
2197int ovs_ct_init(struct net *net)
1659{ 2198{
1660 unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE; 2199 unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
1661 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2200 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
@@ -1666,12 +2205,22 @@ void ovs_ct_init(struct net *net)
1666 } else { 2205 } else {
1667 ovs_net->xt_label = true; 2206 ovs_net->xt_label = true;
1668 } 2207 }
2208
2209#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2210 return ovs_ct_limit_init(net, ovs_net);
2211#else
2212 return 0;
2213#endif
1669} 2214}
1670 2215
1671void ovs_ct_exit(struct net *net) 2216void ovs_ct_exit(struct net *net)
1672{ 2217{
1673 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2218 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1674 2219
2220#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2221 ovs_ct_limit_exit(net, ovs_net);
2222#endif
2223
1675 if (ovs_net->xt_label) 2224 if (ovs_net->xt_label)
1676 nf_connlabels_put(net); 2225 nf_connlabels_put(net);
1677} 2226}