diff options
Diffstat (limited to 'net')
112 files changed, 1282 insertions, 714 deletions
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index af73bc3acb40..410dd5e76c41 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
| @@ -101,11 +101,11 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s); | |||
| 101 | #define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) | 101 | #define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) |
| 102 | #define __get_rpn_parity(line) (((line) >> 3) & 0x7) | 102 | #define __get_rpn_parity(line) (((line) >> 3) & 0x7) |
| 103 | 103 | ||
| 104 | static DECLARE_WAIT_QUEUE_HEAD(rfcomm_wq); | ||
| 105 | |||
| 104 | static void rfcomm_schedule(void) | 106 | static void rfcomm_schedule(void) |
| 105 | { | 107 | { |
| 106 | if (!rfcomm_thread) | 108 | wake_up_all(&rfcomm_wq); |
| 107 | return; | ||
| 108 | wake_up_process(rfcomm_thread); | ||
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | /* ---- RFCOMM FCS computation ---- */ | 111 | /* ---- RFCOMM FCS computation ---- */ |
| @@ -2086,24 +2086,22 @@ static void rfcomm_kill_listener(void) | |||
| 2086 | 2086 | ||
| 2087 | static int rfcomm_run(void *unused) | 2087 | static int rfcomm_run(void *unused) |
| 2088 | { | 2088 | { |
| 2089 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | ||
| 2089 | BT_DBG(""); | 2090 | BT_DBG(""); |
| 2090 | 2091 | ||
| 2091 | set_user_nice(current, -10); | 2092 | set_user_nice(current, -10); |
| 2092 | 2093 | ||
| 2093 | rfcomm_add_listener(BDADDR_ANY); | 2094 | rfcomm_add_listener(BDADDR_ANY); |
| 2094 | 2095 | ||
| 2095 | while (1) { | 2096 | add_wait_queue(&rfcomm_wq, &wait); |
| 2096 | set_current_state(TASK_INTERRUPTIBLE); | 2097 | while (!kthread_should_stop()) { |
| 2097 | |||
| 2098 | if (kthread_should_stop()) | ||
| 2099 | break; | ||
| 2100 | 2098 | ||
| 2101 | /* Process stuff */ | 2099 | /* Process stuff */ |
| 2102 | rfcomm_process_sessions(); | 2100 | rfcomm_process_sessions(); |
| 2103 | 2101 | ||
| 2104 | schedule(); | 2102 | wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
| 2105 | } | 2103 | } |
| 2106 | __set_current_state(TASK_RUNNING); | 2104 | remove_wait_queue(&rfcomm_wq, &wait); |
| 2107 | 2105 | ||
| 2108 | rfcomm_kill_listener(); | 2106 | rfcomm_kill_listener(); |
| 2109 | 2107 | ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 648d79ccf462..c465876c7861 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
| @@ -813,10 +813,9 @@ static void __br_multicast_send_query(struct net_bridge *br, | |||
| 813 | return; | 813 | return; |
| 814 | 814 | ||
| 815 | if (port) { | 815 | if (port) { |
| 816 | __skb_push(skb, sizeof(struct ethhdr)); | ||
| 817 | skb->dev = port->dev; | 816 | skb->dev = port->dev; |
| 818 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 817 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, |
| 819 | dev_queue_xmit); | 818 | br_dev_queue_push_xmit); |
| 820 | } else { | 819 | } else { |
| 821 | br_multicast_select_own_querier(br, ip, skb); | 820 | br_multicast_select_own_querier(br, ip, skb); |
| 822 | netif_rx(skb); | 821 | netif_rx(skb); |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 2ff9706647f2..e5ec470b851f 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
| @@ -280,6 +280,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { | |||
| 280 | [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, | 280 | [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, |
| 281 | [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, | 281 | [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, |
| 282 | [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, | 282 | [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, |
| 283 | [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, | ||
| 283 | [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, | 284 | [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, |
| 284 | [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, | 285 | [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, |
| 285 | }; | 286 | }; |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 654c9018e3e7..48da2c54a69e 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <net/netfilter/ipv6/nf_reject.h> | 18 | #include <net/netfilter/ipv6/nf_reject.h> |
| 19 | #include <linux/ip.h> | 19 | #include <linux/ip.h> |
| 20 | #include <net/ip.h> | 20 | #include <net/ip.h> |
| 21 | #include <net/ip6_checksum.h> | ||
| 21 | #include <linux/netfilter_bridge.h> | 22 | #include <linux/netfilter_bridge.h> |
| 22 | #include "../br_private.h" | 23 | #include "../br_private.h" |
| 23 | 24 | ||
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index de6662b14e1f..7e38b729696a 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c | |||
| @@ -149,6 +149,7 @@ static int process_one_ticket(struct ceph_auth_client *ac, | |||
| 149 | struct ceph_crypto_key old_key; | 149 | struct ceph_crypto_key old_key; |
| 150 | void *ticket_buf = NULL; | 150 | void *ticket_buf = NULL; |
| 151 | void *tp, *tpend; | 151 | void *tp, *tpend; |
| 152 | void **ptp; | ||
| 152 | struct ceph_timespec new_validity; | 153 | struct ceph_timespec new_validity; |
| 153 | struct ceph_crypto_key new_session_key; | 154 | struct ceph_crypto_key new_session_key; |
| 154 | struct ceph_buffer *new_ticket_blob; | 155 | struct ceph_buffer *new_ticket_blob; |
| @@ -208,25 +209,19 @@ static int process_one_ticket(struct ceph_auth_client *ac, | |||
| 208 | goto out; | 209 | goto out; |
| 209 | } | 210 | } |
| 210 | tp = ticket_buf; | 211 | tp = ticket_buf; |
| 211 | dlen = ceph_decode_32(&tp); | 212 | ptp = &tp; |
| 213 | tpend = *ptp + dlen; | ||
| 212 | } else { | 214 | } else { |
| 213 | /* unencrypted */ | 215 | /* unencrypted */ |
| 214 | ceph_decode_32_safe(p, end, dlen, bad); | 216 | ptp = p; |
| 215 | ticket_buf = kmalloc(dlen, GFP_NOFS); | 217 | tpend = end; |
| 216 | if (!ticket_buf) { | ||
| 217 | ret = -ENOMEM; | ||
| 218 | goto out; | ||
| 219 | } | ||
| 220 | tp = ticket_buf; | ||
| 221 | ceph_decode_need(p, end, dlen, bad); | ||
| 222 | ceph_decode_copy(p, ticket_buf, dlen); | ||
| 223 | } | 218 | } |
| 224 | tpend = tp + dlen; | 219 | ceph_decode_32_safe(ptp, tpend, dlen, bad); |
| 225 | dout(" ticket blob is %d bytes\n", dlen); | 220 | dout(" ticket blob is %d bytes\n", dlen); |
| 226 | ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); | 221 | ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad); |
| 227 | blob_struct_v = ceph_decode_8(&tp); | 222 | blob_struct_v = ceph_decode_8(ptp); |
| 228 | new_secret_id = ceph_decode_64(&tp); | 223 | new_secret_id = ceph_decode_64(ptp); |
| 229 | ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); | 224 | ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend); |
| 230 | if (ret) | 225 | if (ret) |
| 231 | goto out; | 226 | goto out; |
| 232 | 227 | ||
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 62fc5e7a9acf..790fe89d90c0 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c | |||
| @@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void) | |||
| 90 | 90 | ||
| 91 | static const u8 *aes_iv = (u8 *)CEPH_AES_IV; | 91 | static const u8 *aes_iv = (u8 *)CEPH_AES_IV; |
| 92 | 92 | ||
| 93 | /* | ||
| 94 | * Should be used for buffers allocated with ceph_kvmalloc(). | ||
| 95 | * Currently these are encrypt out-buffer (ceph_buffer) and decrypt | ||
| 96 | * in-buffer (msg front). | ||
| 97 | * | ||
| 98 | * Dispose of @sgt with teardown_sgtable(). | ||
| 99 | * | ||
| 100 | * @prealloc_sg is to avoid memory allocation inside sg_alloc_table() | ||
| 101 | * in cases where a single sg is sufficient. No attempt to reduce the | ||
| 102 | * number of sgs by squeezing physically contiguous pages together is | ||
| 103 | * made though, for simplicity. | ||
| 104 | */ | ||
| 105 | static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, | ||
| 106 | const void *buf, unsigned int buf_len) | ||
| 107 | { | ||
| 108 | struct scatterlist *sg; | ||
| 109 | const bool is_vmalloc = is_vmalloc_addr(buf); | ||
| 110 | unsigned int off = offset_in_page(buf); | ||
| 111 | unsigned int chunk_cnt = 1; | ||
| 112 | unsigned int chunk_len = PAGE_ALIGN(off + buf_len); | ||
| 113 | int i; | ||
| 114 | int ret; | ||
| 115 | |||
| 116 | if (buf_len == 0) { | ||
| 117 | memset(sgt, 0, sizeof(*sgt)); | ||
| 118 | return -EINVAL; | ||
| 119 | } | ||
| 120 | |||
| 121 | if (is_vmalloc) { | ||
| 122 | chunk_cnt = chunk_len >> PAGE_SHIFT; | ||
| 123 | chunk_len = PAGE_SIZE; | ||
| 124 | } | ||
| 125 | |||
| 126 | if (chunk_cnt > 1) { | ||
| 127 | ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS); | ||
| 128 | if (ret) | ||
| 129 | return ret; | ||
| 130 | } else { | ||
| 131 | WARN_ON(chunk_cnt != 1); | ||
| 132 | sg_init_table(prealloc_sg, 1); | ||
| 133 | sgt->sgl = prealloc_sg; | ||
| 134 | sgt->nents = sgt->orig_nents = 1; | ||
| 135 | } | ||
| 136 | |||
| 137 | for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { | ||
| 138 | struct page *page; | ||
| 139 | unsigned int len = min(chunk_len - off, buf_len); | ||
| 140 | |||
| 141 | if (is_vmalloc) | ||
| 142 | page = vmalloc_to_page(buf); | ||
| 143 | else | ||
| 144 | page = virt_to_page(buf); | ||
| 145 | |||
| 146 | sg_set_page(sg, page, len, off); | ||
| 147 | |||
| 148 | off = 0; | ||
| 149 | buf += len; | ||
| 150 | buf_len -= len; | ||
| 151 | } | ||
| 152 | WARN_ON(buf_len != 0); | ||
| 153 | |||
| 154 | return 0; | ||
| 155 | } | ||
| 156 | |||
| 157 | static void teardown_sgtable(struct sg_table *sgt) | ||
| 158 | { | ||
| 159 | if (sgt->orig_nents > 1) | ||
| 160 | sg_free_table(sgt); | ||
| 161 | } | ||
| 162 | |||
| 93 | static int ceph_aes_encrypt(const void *key, int key_len, | 163 | static int ceph_aes_encrypt(const void *key, int key_len, |
| 94 | void *dst, size_t *dst_len, | 164 | void *dst, size_t *dst_len, |
| 95 | const void *src, size_t src_len) | 165 | const void *src, size_t src_len) |
| 96 | { | 166 | { |
| 97 | struct scatterlist sg_in[2], sg_out[1]; | 167 | struct scatterlist sg_in[2], prealloc_sg; |
| 168 | struct sg_table sg_out; | ||
| 98 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 169 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
| 99 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; | 170 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; |
| 100 | int ret; | 171 | int ret; |
| @@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len, | |||
| 110 | 181 | ||
| 111 | *dst_len = src_len + zero_padding; | 182 | *dst_len = src_len + zero_padding; |
| 112 | 183 | ||
| 113 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
| 114 | sg_init_table(sg_in, 2); | 184 | sg_init_table(sg_in, 2); |
| 115 | sg_set_buf(&sg_in[0], src, src_len); | 185 | sg_set_buf(&sg_in[0], src, src_len); |
| 116 | sg_set_buf(&sg_in[1], pad, zero_padding); | 186 | sg_set_buf(&sg_in[1], pad, zero_padding); |
| 117 | sg_init_table(sg_out, 1); | 187 | ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); |
| 118 | sg_set_buf(sg_out, dst, *dst_len); | 188 | if (ret) |
| 189 | goto out_tfm; | ||
| 190 | |||
| 191 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
| 119 | iv = crypto_blkcipher_crt(tfm)->iv; | 192 | iv = crypto_blkcipher_crt(tfm)->iv; |
| 120 | ivsize = crypto_blkcipher_ivsize(tfm); | 193 | ivsize = crypto_blkcipher_ivsize(tfm); |
| 121 | |||
| 122 | memcpy(iv, aes_iv, ivsize); | 194 | memcpy(iv, aes_iv, ivsize); |
| 195 | |||
| 123 | /* | 196 | /* |
| 124 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, | 197 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, |
| 125 | key, key_len, 1); | 198 | key, key_len, 1); |
| @@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len, | |||
| 128 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, | 201 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, |
| 129 | pad, zero_padding, 1); | 202 | pad, zero_padding, 1); |
| 130 | */ | 203 | */ |
| 131 | ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, | 204 | ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, |
| 132 | src_len + zero_padding); | 205 | src_len + zero_padding); |
| 133 | crypto_free_blkcipher(tfm); | 206 | if (ret < 0) { |
| 134 | if (ret < 0) | ||
| 135 | pr_err("ceph_aes_crypt failed %d\n", ret); | 207 | pr_err("ceph_aes_crypt failed %d\n", ret); |
| 208 | goto out_sg; | ||
| 209 | } | ||
| 136 | /* | 210 | /* |
| 137 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, | 211 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, |
| 138 | dst, *dst_len, 1); | 212 | dst, *dst_len, 1); |
| 139 | */ | 213 | */ |
| 140 | return 0; | 214 | |
| 215 | out_sg: | ||
| 216 | teardown_sgtable(&sg_out); | ||
| 217 | out_tfm: | ||
| 218 | crypto_free_blkcipher(tfm); | ||
| 219 | return ret; | ||
| 141 | } | 220 | } |
| 142 | 221 | ||
| 143 | static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | 222 | static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, |
| @@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | |||
| 145 | const void *src1, size_t src1_len, | 224 | const void *src1, size_t src1_len, |
| 146 | const void *src2, size_t src2_len) | 225 | const void *src2, size_t src2_len) |
| 147 | { | 226 | { |
| 148 | struct scatterlist sg_in[3], sg_out[1]; | 227 | struct scatterlist sg_in[3], prealloc_sg; |
| 228 | struct sg_table sg_out; | ||
| 149 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 229 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
| 150 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; | 230 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; |
| 151 | int ret; | 231 | int ret; |
| @@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | |||
| 161 | 241 | ||
| 162 | *dst_len = src1_len + src2_len + zero_padding; | 242 | *dst_len = src1_len + src2_len + zero_padding; |
| 163 | 243 | ||
| 164 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
| 165 | sg_init_table(sg_in, 3); | 244 | sg_init_table(sg_in, 3); |
| 166 | sg_set_buf(&sg_in[0], src1, src1_len); | 245 | sg_set_buf(&sg_in[0], src1, src1_len); |
| 167 | sg_set_buf(&sg_in[1], src2, src2_len); | 246 | sg_set_buf(&sg_in[1], src2, src2_len); |
| 168 | sg_set_buf(&sg_in[2], pad, zero_padding); | 247 | sg_set_buf(&sg_in[2], pad, zero_padding); |
| 169 | sg_init_table(sg_out, 1); | 248 | ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); |
| 170 | sg_set_buf(sg_out, dst, *dst_len); | 249 | if (ret) |
| 250 | goto out_tfm; | ||
| 251 | |||
| 252 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
| 171 | iv = crypto_blkcipher_crt(tfm)->iv; | 253 | iv = crypto_blkcipher_crt(tfm)->iv; |
| 172 | ivsize = crypto_blkcipher_ivsize(tfm); | 254 | ivsize = crypto_blkcipher_ivsize(tfm); |
| 173 | |||
| 174 | memcpy(iv, aes_iv, ivsize); | 255 | memcpy(iv, aes_iv, ivsize); |
| 256 | |||
| 175 | /* | 257 | /* |
| 176 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, | 258 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, |
| 177 | key, key_len, 1); | 259 | key, key_len, 1); |
| @@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | |||
| 182 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, | 264 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, |
| 183 | pad, zero_padding, 1); | 265 | pad, zero_padding, 1); |
| 184 | */ | 266 | */ |
| 185 | ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, | 267 | ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, |
| 186 | src1_len + src2_len + zero_padding); | 268 | src1_len + src2_len + zero_padding); |
| 187 | crypto_free_blkcipher(tfm); | 269 | if (ret < 0) { |
| 188 | if (ret < 0) | ||
| 189 | pr_err("ceph_aes_crypt2 failed %d\n", ret); | 270 | pr_err("ceph_aes_crypt2 failed %d\n", ret); |
| 271 | goto out_sg; | ||
| 272 | } | ||
| 190 | /* | 273 | /* |
| 191 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, | 274 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, |
| 192 | dst, *dst_len, 1); | 275 | dst, *dst_len, 1); |
| 193 | */ | 276 | */ |
| 194 | return 0; | 277 | |
| 278 | out_sg: | ||
| 279 | teardown_sgtable(&sg_out); | ||
| 280 | out_tfm: | ||
| 281 | crypto_free_blkcipher(tfm); | ||
| 282 | return ret; | ||
| 195 | } | 283 | } |
| 196 | 284 | ||
| 197 | static int ceph_aes_decrypt(const void *key, int key_len, | 285 | static int ceph_aes_decrypt(const void *key, int key_len, |
| 198 | void *dst, size_t *dst_len, | 286 | void *dst, size_t *dst_len, |
| 199 | const void *src, size_t src_len) | 287 | const void *src, size_t src_len) |
| 200 | { | 288 | { |
| 201 | struct scatterlist sg_in[1], sg_out[2]; | 289 | struct sg_table sg_in; |
| 290 | struct scatterlist sg_out[2], prealloc_sg; | ||
| 202 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 291 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
| 203 | struct blkcipher_desc desc = { .tfm = tfm }; | 292 | struct blkcipher_desc desc = { .tfm = tfm }; |
| 204 | char pad[16]; | 293 | char pad[16]; |
| @@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len, | |||
| 210 | if (IS_ERR(tfm)) | 299 | if (IS_ERR(tfm)) |
| 211 | return PTR_ERR(tfm); | 300 | return PTR_ERR(tfm); |
| 212 | 301 | ||
| 213 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
| 214 | sg_init_table(sg_in, 1); | ||
| 215 | sg_init_table(sg_out, 2); | 302 | sg_init_table(sg_out, 2); |
| 216 | sg_set_buf(sg_in, src, src_len); | ||
| 217 | sg_set_buf(&sg_out[0], dst, *dst_len); | 303 | sg_set_buf(&sg_out[0], dst, *dst_len); |
| 218 | sg_set_buf(&sg_out[1], pad, sizeof(pad)); | 304 | sg_set_buf(&sg_out[1], pad, sizeof(pad)); |
| 305 | ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); | ||
| 306 | if (ret) | ||
| 307 | goto out_tfm; | ||
| 219 | 308 | ||
| 309 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
| 220 | iv = crypto_blkcipher_crt(tfm)->iv; | 310 | iv = crypto_blkcipher_crt(tfm)->iv; |
| 221 | ivsize = crypto_blkcipher_ivsize(tfm); | 311 | ivsize = crypto_blkcipher_ivsize(tfm); |
| 222 | |||
| 223 | memcpy(iv, aes_iv, ivsize); | 312 | memcpy(iv, aes_iv, ivsize); |
| 224 | 313 | ||
| 225 | /* | 314 | /* |
| @@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len, | |||
| 228 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, | 317 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, |
| 229 | src, src_len, 1); | 318 | src, src_len, 1); |
| 230 | */ | 319 | */ |
| 231 | 320 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); | |
| 232 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); | ||
| 233 | crypto_free_blkcipher(tfm); | ||
| 234 | if (ret < 0) { | 321 | if (ret < 0) { |
| 235 | pr_err("ceph_aes_decrypt failed %d\n", ret); | 322 | pr_err("ceph_aes_decrypt failed %d\n", ret); |
| 236 | return ret; | 323 | goto out_sg; |
| 237 | } | 324 | } |
| 238 | 325 | ||
| 239 | if (src_len <= *dst_len) | 326 | if (src_len <= *dst_len) |
| @@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len, | |||
| 251 | print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, | 338 | print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, |
| 252 | dst, *dst_len, 1); | 339 | dst, *dst_len, 1); |
| 253 | */ | 340 | */ |
| 254 | return 0; | 341 | |
| 342 | out_sg: | ||
| 343 | teardown_sgtable(&sg_in); | ||
| 344 | out_tfm: | ||
| 345 | crypto_free_blkcipher(tfm); | ||
| 346 | return ret; | ||
| 255 | } | 347 | } |
| 256 | 348 | ||
| 257 | static int ceph_aes_decrypt2(const void *key, int key_len, | 349 | static int ceph_aes_decrypt2(const void *key, int key_len, |
| @@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
| 259 | void *dst2, size_t *dst2_len, | 351 | void *dst2, size_t *dst2_len, |
| 260 | const void *src, size_t src_len) | 352 | const void *src, size_t src_len) |
| 261 | { | 353 | { |
| 262 | struct scatterlist sg_in[1], sg_out[3]; | 354 | struct sg_table sg_in; |
| 355 | struct scatterlist sg_out[3], prealloc_sg; | ||
| 263 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 356 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
| 264 | struct blkcipher_desc desc = { .tfm = tfm }; | 357 | struct blkcipher_desc desc = { .tfm = tfm }; |
| 265 | char pad[16]; | 358 | char pad[16]; |
| @@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
| 271 | if (IS_ERR(tfm)) | 364 | if (IS_ERR(tfm)) |
| 272 | return PTR_ERR(tfm); | 365 | return PTR_ERR(tfm); |
| 273 | 366 | ||
| 274 | sg_init_table(sg_in, 1); | ||
| 275 | sg_set_buf(sg_in, src, src_len); | ||
| 276 | sg_init_table(sg_out, 3); | 367 | sg_init_table(sg_out, 3); |
| 277 | sg_set_buf(&sg_out[0], dst1, *dst1_len); | 368 | sg_set_buf(&sg_out[0], dst1, *dst1_len); |
| 278 | sg_set_buf(&sg_out[1], dst2, *dst2_len); | 369 | sg_set_buf(&sg_out[1], dst2, *dst2_len); |
| 279 | sg_set_buf(&sg_out[2], pad, sizeof(pad)); | 370 | sg_set_buf(&sg_out[2], pad, sizeof(pad)); |
| 371 | ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); | ||
| 372 | if (ret) | ||
| 373 | goto out_tfm; | ||
| 280 | 374 | ||
| 281 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | 375 | crypto_blkcipher_setkey((void *)tfm, key, key_len); |
| 282 | iv = crypto_blkcipher_crt(tfm)->iv; | 376 | iv = crypto_blkcipher_crt(tfm)->iv; |
| 283 | ivsize = crypto_blkcipher_ivsize(tfm); | 377 | ivsize = crypto_blkcipher_ivsize(tfm); |
| 284 | |||
| 285 | memcpy(iv, aes_iv, ivsize); | 378 | memcpy(iv, aes_iv, ivsize); |
| 286 | 379 | ||
| 287 | /* | 380 | /* |
| @@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
| 290 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, | 383 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, |
| 291 | src, src_len, 1); | 384 | src, src_len, 1); |
| 292 | */ | 385 | */ |
| 293 | 386 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); | |
| 294 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); | ||
| 295 | crypto_free_blkcipher(tfm); | ||
| 296 | if (ret < 0) { | 387 | if (ret < 0) { |
| 297 | pr_err("ceph_aes_decrypt failed %d\n", ret); | 388 | pr_err("ceph_aes_decrypt failed %d\n", ret); |
| 298 | return ret; | 389 | goto out_sg; |
| 299 | } | 390 | } |
| 300 | 391 | ||
| 301 | if (src_len <= *dst1_len) | 392 | if (src_len <= *dst1_len) |
| @@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
| 325 | dst2, *dst2_len, 1); | 416 | dst2, *dst2_len, 1); |
| 326 | */ | 417 | */ |
| 327 | 418 | ||
| 328 | return 0; | 419 | out_sg: |
| 420 | teardown_sgtable(&sg_in); | ||
| 421 | out_tfm: | ||
| 422 | crypto_free_blkcipher(tfm); | ||
| 423 | return ret; | ||
| 329 | } | 424 | } |
| 330 | 425 | ||
| 331 | 426 | ||
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 559c9f619c20..8d1653caffdb 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con) | |||
| 484 | IPPROTO_TCP, &sock); | 484 | IPPROTO_TCP, &sock); |
| 485 | if (ret) | 485 | if (ret) |
| 486 | return ret; | 486 | return ret; |
| 487 | sock->sk->sk_allocation = GFP_NOFS; | 487 | sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC; |
| 488 | 488 | ||
| 489 | #ifdef CONFIG_LOCKDEP | 489 | #ifdef CONFIG_LOCKDEP |
| 490 | lockdep_set_class(&sock->sk->sk_lock, &socket_class); | 490 | lockdep_set_class(&sock->sk->sk_lock, &socket_class); |
| @@ -509,6 +509,9 @@ static int ceph_tcp_connect(struct ceph_connection *con) | |||
| 509 | 509 | ||
| 510 | return ret; | 510 | return ret; |
| 511 | } | 511 | } |
| 512 | |||
| 513 | sk_set_memalloc(sock->sk); | ||
| 514 | |||
| 512 | con->sock = sock; | 515 | con->sock = sock; |
| 513 | return 0; | 516 | return 0; |
| 514 | } | 517 | } |
| @@ -2769,8 +2772,11 @@ static void con_work(struct work_struct *work) | |||
| 2769 | { | 2772 | { |
| 2770 | struct ceph_connection *con = container_of(work, struct ceph_connection, | 2773 | struct ceph_connection *con = container_of(work, struct ceph_connection, |
| 2771 | work.work); | 2774 | work.work); |
| 2775 | unsigned long pflags = current->flags; | ||
| 2772 | bool fault; | 2776 | bool fault; |
| 2773 | 2777 | ||
| 2778 | current->flags |= PF_MEMALLOC; | ||
| 2779 | |||
| 2774 | mutex_lock(&con->mutex); | 2780 | mutex_lock(&con->mutex); |
| 2775 | while (true) { | 2781 | while (true) { |
| 2776 | int ret; | 2782 | int ret; |
| @@ -2824,6 +2830,8 @@ static void con_work(struct work_struct *work) | |||
| 2824 | con_fault_finish(con); | 2830 | con_fault_finish(con); |
| 2825 | 2831 | ||
| 2826 | con->ops->put(con); | 2832 | con->ops->put(con); |
| 2833 | |||
| 2834 | tsk_restore_flags(current, pflags, PF_MEMALLOC); | ||
| 2827 | } | 2835 | } |
| 2828 | 2836 | ||
| 2829 | /* | 2837 | /* |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f3fc54eac09d..6f164289bde8 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
| @@ -1007,8 +1007,8 @@ static void put_osd(struct ceph_osd *osd) | |||
| 1007 | static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) | 1007 | static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) |
| 1008 | { | 1008 | { |
| 1009 | dout("__remove_osd %p\n", osd); | 1009 | dout("__remove_osd %p\n", osd); |
| 1010 | BUG_ON(!list_empty(&osd->o_requests)); | 1010 | WARN_ON(!list_empty(&osd->o_requests)); |
| 1011 | BUG_ON(!list_empty(&osd->o_linger_requests)); | 1011 | WARN_ON(!list_empty(&osd->o_linger_requests)); |
| 1012 | 1012 | ||
| 1013 | rb_erase(&osd->o_node, &osdc->osds); | 1013 | rb_erase(&osd->o_node, &osdc->osds); |
| 1014 | list_del_init(&osd->o_osd_lru); | 1014 | list_del_init(&osd->o_osd_lru); |
| @@ -1254,6 +1254,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, | |||
| 1254 | if (list_empty(&req->r_osd_item)) | 1254 | if (list_empty(&req->r_osd_item)) |
| 1255 | req->r_osd = NULL; | 1255 | req->r_osd = NULL; |
| 1256 | } | 1256 | } |
| 1257 | |||
| 1258 | list_del_init(&req->r_req_lru_item); /* can be on notarget */ | ||
| 1257 | ceph_osdc_put_request(req); | 1259 | ceph_osdc_put_request(req); |
| 1258 | } | 1260 | } |
| 1259 | 1261 | ||
| @@ -1395,6 +1397,7 @@ static int __map_request(struct ceph_osd_client *osdc, | |||
| 1395 | if (req->r_osd) { | 1397 | if (req->r_osd) { |
| 1396 | __cancel_request(req); | 1398 | __cancel_request(req); |
| 1397 | list_del_init(&req->r_osd_item); | 1399 | list_del_init(&req->r_osd_item); |
| 1400 | list_del_init(&req->r_linger_osd_item); | ||
| 1398 | req->r_osd = NULL; | 1401 | req->r_osd = NULL; |
| 1399 | } | 1402 | } |
| 1400 | 1403 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 945bbd001359..3acff0974560 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -7200,11 +7200,10 @@ static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) | |||
| 7200 | */ | 7200 | */ |
| 7201 | struct net *net; | 7201 | struct net *net; |
| 7202 | bool unregistering; | 7202 | bool unregistering; |
| 7203 | DEFINE_WAIT(wait); | 7203 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
| 7204 | 7204 | ||
| 7205 | add_wait_queue(&netdev_unregistering_wq, &wait); | ||
| 7205 | for (;;) { | 7206 | for (;;) { |
| 7206 | prepare_to_wait(&netdev_unregistering_wq, &wait, | ||
| 7207 | TASK_UNINTERRUPTIBLE); | ||
| 7208 | unregistering = false; | 7207 | unregistering = false; |
| 7209 | rtnl_lock(); | 7208 | rtnl_lock(); |
| 7210 | list_for_each_entry(net, net_list, exit_list) { | 7209 | list_for_each_entry(net, net_list, exit_list) { |
| @@ -7216,9 +7215,10 @@ static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) | |||
| 7216 | if (!unregistering) | 7215 | if (!unregistering) |
| 7217 | break; | 7216 | break; |
| 7218 | __rtnl_unlock(); | 7217 | __rtnl_unlock(); |
| 7219 | schedule(); | 7218 | |
| 7219 | wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
| 7220 | } | 7220 | } |
| 7221 | finish_wait(&netdev_unregistering_wq, &wait); | 7221 | remove_wait_queue(&netdev_unregistering_wq, &wait); |
| 7222 | } | 7222 | } |
| 7223 | 7223 | ||
| 7224 | static void __net_exit default_device_exit_batch(struct list_head *net_list) | 7224 | static void __net_exit default_device_exit_batch(struct list_head *net_list) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a6882686ca3a..88e8de3b59b0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -365,11 +365,10 @@ static void rtnl_lock_unregistering_all(void) | |||
| 365 | { | 365 | { |
| 366 | struct net *net; | 366 | struct net *net; |
| 367 | bool unregistering; | 367 | bool unregistering; |
| 368 | DEFINE_WAIT(wait); | 368 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
| 369 | 369 | ||
| 370 | add_wait_queue(&netdev_unregistering_wq, &wait); | ||
| 370 | for (;;) { | 371 | for (;;) { |
| 371 | prepare_to_wait(&netdev_unregistering_wq, &wait, | ||
| 372 | TASK_UNINTERRUPTIBLE); | ||
| 373 | unregistering = false; | 372 | unregistering = false; |
| 374 | rtnl_lock(); | 373 | rtnl_lock(); |
| 375 | for_each_net(net) { | 374 | for_each_net(net) { |
| @@ -381,9 +380,10 @@ static void rtnl_lock_unregistering_all(void) | |||
| 381 | if (!unregistering) | 380 | if (!unregistering) |
| 382 | break; | 381 | break; |
| 383 | __rtnl_unlock(); | 382 | __rtnl_unlock(); |
| 384 | schedule(); | 383 | |
| 384 | wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
| 385 | } | 385 | } |
| 386 | finish_wait(&netdev_unregistering_wq, &wait); | 386 | remove_wait_queue(&netdev_unregistering_wq, &wait); |
| 387 | } | 387 | } |
| 388 | 388 | ||
| 389 | /** | 389 | /** |
| @@ -1498,6 +1498,7 @@ static int do_setlink(const struct sk_buff *skb, | |||
| 1498 | goto errout; | 1498 | goto errout; |
| 1499 | } | 1499 | } |
| 1500 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { | 1500 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { |
| 1501 | put_net(net); | ||
| 1501 | err = -EPERM; | 1502 | err = -EPERM; |
| 1502 | goto errout; | 1503 | goto errout; |
| 1503 | } | 1504 | } |
| @@ -2685,13 +2686,20 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2685 | int idx = 0; | 2686 | int idx = 0; |
| 2686 | u32 portid = NETLINK_CB(cb->skb).portid; | 2687 | u32 portid = NETLINK_CB(cb->skb).portid; |
| 2687 | u32 seq = cb->nlh->nlmsg_seq; | 2688 | u32 seq = cb->nlh->nlmsg_seq; |
| 2688 | struct nlattr *extfilt; | ||
| 2689 | u32 filter_mask = 0; | 2689 | u32 filter_mask = 0; |
| 2690 | 2690 | ||
| 2691 | extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), | 2691 | if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { |
| 2692 | IFLA_EXT_MASK); | 2692 | struct nlattr *extfilt; |
| 2693 | if (extfilt) | 2693 | |
| 2694 | filter_mask = nla_get_u32(extfilt); | 2694 | extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), |
| 2695 | IFLA_EXT_MASK); | ||
| 2696 | if (extfilt) { | ||
| 2697 | if (nla_len(extfilt) < sizeof(filter_mask)) | ||
| 2698 | return -EINVAL; | ||
| 2699 | |||
| 2700 | filter_mask = nla_get_u32(extfilt); | ||
| 2701 | } | ||
| 2702 | } | ||
| 2695 | 2703 | ||
| 2696 | rcu_read_lock(); | 2704 | rcu_read_lock(); |
| 2697 | for_each_netdev_rcu(net, dev) { | 2705 | for_each_netdev_rcu(net, dev) { |
| @@ -2798,6 +2806,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 2798 | if (br_spec) { | 2806 | if (br_spec) { |
| 2799 | nla_for_each_nested(attr, br_spec, rem) { | 2807 | nla_for_each_nested(attr, br_spec, rem) { |
| 2800 | if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { | 2808 | if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { |
| 2809 | if (nla_len(attr) < sizeof(flags)) | ||
| 2810 | return -EINVAL; | ||
| 2811 | |||
| 2801 | have_flags = true; | 2812 | have_flags = true; |
| 2802 | flags = nla_get_u16(attr); | 2813 | flags = nla_get_u16(attr); |
| 2803 | break; | 2814 | break; |
| @@ -2868,6 +2879,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 2868 | if (br_spec) { | 2879 | if (br_spec) { |
| 2869 | nla_for_each_nested(attr, br_spec, rem) { | 2880 | nla_for_each_nested(attr, br_spec, rem) { |
| 2870 | if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { | 2881 | if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { |
| 2882 | if (nla_len(attr) < sizeof(flags)) | ||
| 2883 | return -EINVAL; | ||
| 2884 | |||
| 2871 | have_flags = true; | 2885 | have_flags = true; |
| 2872 | flags = nla_get_u16(attr); | 2886 | flags = nla_get_u16(attr); |
| 2873 | break; | 2887 | break; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c16615bfb61e..32e31c299631 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -552,20 +552,13 @@ static void kfree_skbmem(struct sk_buff *skb) | |||
| 552 | case SKB_FCLONE_CLONE: | 552 | case SKB_FCLONE_CLONE: |
| 553 | fclones = container_of(skb, struct sk_buff_fclones, skb2); | 553 | fclones = container_of(skb, struct sk_buff_fclones, skb2); |
| 554 | 554 | ||
| 555 | /* Warning : We must perform the atomic_dec_and_test() before | 555 | /* The clone portion is available for |
| 556 | * setting skb->fclone back to SKB_FCLONE_FREE, otherwise | 556 | * fast-cloning again. |
| 557 | * skb_clone() could set clone_ref to 2 before our decrement. | ||
| 558 | * Anyway, if we are going to free the structure, no need to | ||
| 559 | * rewrite skb->fclone. | ||
| 560 | */ | 557 | */ |
| 561 | if (atomic_dec_and_test(&fclones->fclone_ref)) { | 558 | skb->fclone = SKB_FCLONE_FREE; |
| 559 | |||
| 560 | if (atomic_dec_and_test(&fclones->fclone_ref)) | ||
| 562 | kmem_cache_free(skbuff_fclone_cache, fclones); | 561 | kmem_cache_free(skbuff_fclone_cache, fclones); |
| 563 | } else { | ||
| 564 | /* The clone portion is available for | ||
| 565 | * fast-cloning again. | ||
| 566 | */ | ||
| 567 | skb->fclone = SKB_FCLONE_FREE; | ||
| 568 | } | ||
| 569 | break; | 562 | break; |
| 570 | } | 563 | } |
| 571 | } | 564 | } |
| @@ -887,11 +880,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
| 887 | if (skb->fclone == SKB_FCLONE_ORIG && | 880 | if (skb->fclone == SKB_FCLONE_ORIG && |
| 888 | n->fclone == SKB_FCLONE_FREE) { | 881 | n->fclone == SKB_FCLONE_FREE) { |
| 889 | n->fclone = SKB_FCLONE_CLONE; | 882 | n->fclone = SKB_FCLONE_CLONE; |
| 890 | /* As our fastclone was free, clone_ref must be 1 at this point. | 883 | atomic_inc(&fclones->fclone_ref); |
| 891 | * We could use atomic_inc() here, but it is faster | ||
| 892 | * to set the final value. | ||
| 893 | */ | ||
| 894 | atomic_set(&fclones->fclone_ref, 2); | ||
| 895 | } else { | 884 | } else { |
| 896 | if (skb_pfmemalloc(skb)) | 885 | if (skb_pfmemalloc(skb)) |
| 897 | gfp_mask |= __GFP_MEMALLOC; | 886 | gfp_mask |= __GFP_MEMALLOC; |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index ca11d283bbeb..93ea80196f0e 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
| @@ -1080,13 +1080,13 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1080 | if (!app) | 1080 | if (!app) |
| 1081 | return -EMSGSIZE; | 1081 | return -EMSGSIZE; |
| 1082 | 1082 | ||
| 1083 | spin_lock(&dcb_lock); | 1083 | spin_lock_bh(&dcb_lock); |
| 1084 | list_for_each_entry(itr, &dcb_app_list, list) { | 1084 | list_for_each_entry(itr, &dcb_app_list, list) { |
| 1085 | if (itr->ifindex == netdev->ifindex) { | 1085 | if (itr->ifindex == netdev->ifindex) { |
| 1086 | err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), | 1086 | err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), |
| 1087 | &itr->app); | 1087 | &itr->app); |
| 1088 | if (err) { | 1088 | if (err) { |
| 1089 | spin_unlock(&dcb_lock); | 1089 | spin_unlock_bh(&dcb_lock); |
| 1090 | return -EMSGSIZE; | 1090 | return -EMSGSIZE; |
| 1091 | } | 1091 | } |
| 1092 | } | 1092 | } |
| @@ -1097,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1097 | else | 1097 | else |
| 1098 | dcbx = -EOPNOTSUPP; | 1098 | dcbx = -EOPNOTSUPP; |
| 1099 | 1099 | ||
| 1100 | spin_unlock(&dcb_lock); | 1100 | spin_unlock_bh(&dcb_lock); |
| 1101 | nla_nest_end(skb, app); | 1101 | nla_nest_end(skb, app); |
| 1102 | 1102 | ||
| 1103 | /* get peer info if available */ | 1103 | /* get peer info if available */ |
| @@ -1234,7 +1234,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1234 | } | 1234 | } |
| 1235 | 1235 | ||
| 1236 | /* local app */ | 1236 | /* local app */ |
| 1237 | spin_lock(&dcb_lock); | 1237 | spin_lock_bh(&dcb_lock); |
| 1238 | app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); | 1238 | app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); |
| 1239 | if (!app) | 1239 | if (!app) |
| 1240 | goto dcb_unlock; | 1240 | goto dcb_unlock; |
| @@ -1271,7 +1271,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1271 | else | 1271 | else |
| 1272 | dcbx = -EOPNOTSUPP; | 1272 | dcbx = -EOPNOTSUPP; |
| 1273 | 1273 | ||
| 1274 | spin_unlock(&dcb_lock); | 1274 | spin_unlock_bh(&dcb_lock); |
| 1275 | 1275 | ||
| 1276 | /* features flags */ | 1276 | /* features flags */ |
| 1277 | if (ops->getfeatcfg) { | 1277 | if (ops->getfeatcfg) { |
| @@ -1326,7 +1326,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1326 | return 0; | 1326 | return 0; |
| 1327 | 1327 | ||
| 1328 | dcb_unlock: | 1328 | dcb_unlock: |
| 1329 | spin_unlock(&dcb_lock); | 1329 | spin_unlock_bh(&dcb_lock); |
| 1330 | nla_put_failure: | 1330 | nla_put_failure: |
| 1331 | return err; | 1331 | return err; |
| 1332 | } | 1332 | } |
| @@ -1762,10 +1762,10 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) | |||
| 1762 | struct dcb_app_type *itr; | 1762 | struct dcb_app_type *itr; |
| 1763 | u8 prio = 0; | 1763 | u8 prio = 0; |
| 1764 | 1764 | ||
| 1765 | spin_lock(&dcb_lock); | 1765 | spin_lock_bh(&dcb_lock); |
| 1766 | if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) | 1766 | if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) |
| 1767 | prio = itr->app.priority; | 1767 | prio = itr->app.priority; |
| 1768 | spin_unlock(&dcb_lock); | 1768 | spin_unlock_bh(&dcb_lock); |
| 1769 | 1769 | ||
| 1770 | return prio; | 1770 | return prio; |
| 1771 | } | 1771 | } |
| @@ -1789,7 +1789,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) | |||
| 1789 | if (dev->dcbnl_ops->getdcbx) | 1789 | if (dev->dcbnl_ops->getdcbx) |
| 1790 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); | 1790 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); |
| 1791 | 1791 | ||
| 1792 | spin_lock(&dcb_lock); | 1792 | spin_lock_bh(&dcb_lock); |
| 1793 | /* Search for existing match and replace */ | 1793 | /* Search for existing match and replace */ |
| 1794 | if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { | 1794 | if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { |
| 1795 | if (new->priority) | 1795 | if (new->priority) |
| @@ -1804,7 +1804,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) | |||
| 1804 | if (new->priority) | 1804 | if (new->priority) |
| 1805 | err = dcb_app_add(new, dev->ifindex); | 1805 | err = dcb_app_add(new, dev->ifindex); |
| 1806 | out: | 1806 | out: |
| 1807 | spin_unlock(&dcb_lock); | 1807 | spin_unlock_bh(&dcb_lock); |
| 1808 | if (!err) | 1808 | if (!err) |
| 1809 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); | 1809 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); |
| 1810 | return err; | 1810 | return err; |
| @@ -1823,10 +1823,10 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) | |||
| 1823 | struct dcb_app_type *itr; | 1823 | struct dcb_app_type *itr; |
| 1824 | u8 prio = 0; | 1824 | u8 prio = 0; |
| 1825 | 1825 | ||
| 1826 | spin_lock(&dcb_lock); | 1826 | spin_lock_bh(&dcb_lock); |
| 1827 | if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) | 1827 | if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) |
| 1828 | prio |= 1 << itr->app.priority; | 1828 | prio |= 1 << itr->app.priority; |
| 1829 | spin_unlock(&dcb_lock); | 1829 | spin_unlock_bh(&dcb_lock); |
| 1830 | 1830 | ||
| 1831 | return prio; | 1831 | return prio; |
| 1832 | } | 1832 | } |
| @@ -1850,7 +1850,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) | |||
| 1850 | if (dev->dcbnl_ops->getdcbx) | 1850 | if (dev->dcbnl_ops->getdcbx) |
| 1851 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); | 1851 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); |
| 1852 | 1852 | ||
| 1853 | spin_lock(&dcb_lock); | 1853 | spin_lock_bh(&dcb_lock); |
| 1854 | /* Search for existing match and abort if found */ | 1854 | /* Search for existing match and abort if found */ |
| 1855 | if (dcb_app_lookup(new, dev->ifindex, new->priority)) { | 1855 | if (dcb_app_lookup(new, dev->ifindex, new->priority)) { |
| 1856 | err = -EEXIST; | 1856 | err = -EEXIST; |
| @@ -1859,7 +1859,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) | |||
| 1859 | 1859 | ||
| 1860 | err = dcb_app_add(new, dev->ifindex); | 1860 | err = dcb_app_add(new, dev->ifindex); |
| 1861 | out: | 1861 | out: |
| 1862 | spin_unlock(&dcb_lock); | 1862 | spin_unlock_bh(&dcb_lock); |
| 1863 | if (!err) | 1863 | if (!err) |
| 1864 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); | 1864 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); |
| 1865 | return err; | 1865 | return err; |
| @@ -1882,7 +1882,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) | |||
| 1882 | if (dev->dcbnl_ops->getdcbx) | 1882 | if (dev->dcbnl_ops->getdcbx) |
| 1883 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); | 1883 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); |
| 1884 | 1884 | ||
| 1885 | spin_lock(&dcb_lock); | 1885 | spin_lock_bh(&dcb_lock); |
| 1886 | /* Search for existing match and remove it. */ | 1886 | /* Search for existing match and remove it. */ |
| 1887 | if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { | 1887 | if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { |
| 1888 | list_del(&itr->list); | 1888 | list_del(&itr->list); |
| @@ -1890,7 +1890,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) | |||
| 1890 | err = 0; | 1890 | err = 0; |
| 1891 | } | 1891 | } |
| 1892 | 1892 | ||
| 1893 | spin_unlock(&dcb_lock); | 1893 | spin_unlock_bh(&dcb_lock); |
| 1894 | if (!err) | 1894 | if (!err) |
| 1895 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); | 1895 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); |
| 1896 | return err; | 1896 | return err; |
| @@ -1902,12 +1902,12 @@ static void dcb_flushapp(void) | |||
| 1902 | struct dcb_app_type *app; | 1902 | struct dcb_app_type *app; |
| 1903 | struct dcb_app_type *tmp; | 1903 | struct dcb_app_type *tmp; |
| 1904 | 1904 | ||
| 1905 | spin_lock(&dcb_lock); | 1905 | spin_lock_bh(&dcb_lock); |
| 1906 | list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { | 1906 | list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { |
| 1907 | list_del(&app->list); | 1907 | list_del(&app->list); |
| 1908 | kfree(app); | 1908 | kfree(app); |
| 1909 | } | 1909 | } |
| 1910 | spin_unlock(&dcb_lock); | 1910 | spin_unlock_bh(&dcb_lock); |
| 1911 | } | 1911 | } |
| 1912 | 1912 | ||
| 1913 | static int __init dcbnl_init(void) | 1913 | static int __init dcbnl_init(void) |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6d1817449c36..ab03e00ffe8f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
| @@ -489,11 +489,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p, | |||
| 489 | /* We could not connect to a designated PHY, so use the switch internal | 489 | /* We could not connect to a designated PHY, so use the switch internal |
| 490 | * MDIO bus instead | 490 | * MDIO bus instead |
| 491 | */ | 491 | */ |
| 492 | if (!p->phy) | 492 | if (!p->phy) { |
| 493 | p->phy = ds->slave_mii_bus->phy_map[p->port]; | 493 | p->phy = ds->slave_mii_bus->phy_map[p->port]; |
| 494 | else | 494 | phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, |
| 495 | p->phy_interface); | ||
| 496 | } else { | ||
| 495 | pr_info("attached PHY at address %d [%s]\n", | 497 | pr_info("attached PHY at address %d [%s]\n", |
| 496 | p->phy->addr, p->phy->drv->name); | 498 | p->phy->addr, p->phy->drv->name); |
| 499 | } | ||
| 497 | } | 500 | } |
| 498 | 501 | ||
| 499 | int dsa_slave_suspend(struct net_device *slave_dev) | 502 | int dsa_slave_suspend(struct net_device *slave_dev) |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8b7fe5b03906..e67da4e6c324 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -1386,6 +1386,17 @@ out: | |||
| 1386 | return pp; | 1386 | return pp; |
| 1387 | } | 1387 | } |
| 1388 | 1388 | ||
| 1389 | int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | ||
| 1390 | { | ||
| 1391 | if (sk->sk_family == AF_INET) | ||
| 1392 | return ip_recv_error(sk, msg, len, addr_len); | ||
| 1393 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 1394 | if (sk->sk_family == AF_INET6) | ||
| 1395 | return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len); | ||
| 1396 | #endif | ||
| 1397 | return -EINVAL; | ||
| 1398 | } | ||
| 1399 | |||
| 1389 | static int inet_gro_complete(struct sk_buff *skb, int nhoff) | 1400 | static int inet_gro_complete(struct sk_buff *skb, int nhoff) |
| 1390 | { | 1401 | { |
| 1391 | __be16 newlen = htons(skb->len - nhoff); | 1402 | __be16 newlen = htons(skb->len - nhoff); |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index f2e15738534d..8f7bd56955b0 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
| @@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) | |||
| 62 | else | 62 | else |
| 63 | res->tclassid = 0; | 63 | res->tclassid = 0; |
| 64 | #endif | 64 | #endif |
| 65 | |||
| 66 | if (err == -ESRCH) | ||
| 67 | err = -ENETUNREACH; | ||
| 68 | |||
| 65 | return err; | 69 | return err; |
| 66 | } | 70 | } |
| 67 | EXPORT_SYMBOL_GPL(__fib_lookup); | 71 | EXPORT_SYMBOL_GPL(__fib_lookup); |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 32e78924e246..606c520ffd5a 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
| @@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff) | |||
| 133 | int err = -ENOSYS; | 133 | int err = -ENOSYS; |
| 134 | const struct net_offload **offloads; | 134 | const struct net_offload **offloads; |
| 135 | 135 | ||
| 136 | udp_tunnel_gro_complete(skb, nhoff); | ||
| 137 | |||
| 136 | rcu_read_lock(); | 138 | rcu_read_lock(); |
| 137 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; | 139 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; |
| 138 | ops = rcu_dereference(offloads[proto]); | 140 | ops = rcu_dereference(offloads[proto]); |
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index 065cd94c640c..dedb21e99914 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c | |||
| @@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, | |||
| 144 | gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); | 144 | gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); |
| 145 | geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); | 145 | geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); |
| 146 | 146 | ||
| 147 | skb_set_inner_protocol(skb, htons(ETH_P_TEB)); | ||
| 148 | |||
| 147 | return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, | 149 | return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, |
| 148 | tos, ttl, df, src_port, dst_port, xnet); | 150 | tos, ttl, df, src_port, dst_port, xnet); |
| 149 | } | 151 | } |
| @@ -364,6 +366,7 @@ late_initcall(geneve_init_module); | |||
| 364 | static void __exit geneve_cleanup_module(void) | 366 | static void __exit geneve_cleanup_module(void) |
| 365 | { | 367 | { |
| 366 | destroy_workqueue(geneve_wq); | 368 | destroy_workqueue(geneve_wq); |
| 369 | unregister_pernet_subsys(&geneve_net_ops); | ||
| 367 | } | 370 | } |
| 368 | module_exit(geneve_cleanup_module); | 371 | module_exit(geneve_cleanup_module); |
| 369 | 372 | ||
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index fb70e3ecc3e4..bb15d0e03d4f 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
| @@ -318,9 +318,7 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) | |||
| 318 | return scount; | 318 | return scount; |
| 319 | } | 319 | } |
| 320 | 320 | ||
| 321 | #define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb)) | 321 | static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) |
| 322 | |||
| 323 | static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | ||
| 324 | { | 322 | { |
| 325 | struct sk_buff *skb; | 323 | struct sk_buff *skb; |
| 326 | struct rtable *rt; | 324 | struct rtable *rt; |
| @@ -330,6 +328,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
| 330 | struct flowi4 fl4; | 328 | struct flowi4 fl4; |
| 331 | int hlen = LL_RESERVED_SPACE(dev); | 329 | int hlen = LL_RESERVED_SPACE(dev); |
| 332 | int tlen = dev->needed_tailroom; | 330 | int tlen = dev->needed_tailroom; |
| 331 | unsigned int size = mtu; | ||
| 333 | 332 | ||
| 334 | while (1) { | 333 | while (1) { |
| 335 | skb = alloc_skb(size + hlen + tlen, | 334 | skb = alloc_skb(size + hlen + tlen, |
| @@ -341,7 +340,6 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
| 341 | return NULL; | 340 | return NULL; |
| 342 | } | 341 | } |
| 343 | skb->priority = TC_PRIO_CONTROL; | 342 | skb->priority = TC_PRIO_CONTROL; |
| 344 | igmp_skb_size(skb) = size; | ||
| 345 | 343 | ||
| 346 | rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, | 344 | rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, |
| 347 | 0, 0, | 345 | 0, 0, |
| @@ -354,6 +352,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
| 354 | skb_dst_set(skb, &rt->dst); | 352 | skb_dst_set(skb, &rt->dst); |
| 355 | skb->dev = dev; | 353 | skb->dev = dev; |
| 356 | 354 | ||
| 355 | skb->reserved_tailroom = skb_end_offset(skb) - | ||
| 356 | min(mtu, skb_end_offset(skb)); | ||
| 357 | skb_reserve(skb, hlen); | 357 | skb_reserve(skb, hlen); |
| 358 | 358 | ||
| 359 | skb_reset_network_header(skb); | 359 | skb_reset_network_header(skb); |
| @@ -423,8 +423,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, | |||
| 423 | return skb; | 423 | return skb; |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \ | 426 | #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) |
| 427 | skb_tailroom(skb)) : 0) | ||
| 428 | 427 | ||
| 429 | static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, | 428 | static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, |
| 430 | int type, int gdeleted, int sdeleted) | 429 | int type, int gdeleted, int sdeleted) |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index c373a9ad4555..9daf2177dc00 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
| @@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, | |||
| 195 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | 195 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { |
| 196 | if (!CMSG_OK(msg, cmsg)) | 196 | if (!CMSG_OK(msg, cmsg)) |
| 197 | return -EINVAL; | 197 | return -EINVAL; |
| 198 | #if defined(CONFIG_IPV6) | 198 | #if IS_ENABLED(CONFIG_IPV6) |
| 199 | if (allow_ipv6 && | 199 | if (allow_ipv6 && |
| 200 | cmsg->cmsg_level == SOL_IPV6 && | 200 | cmsg->cmsg_level == SOL_IPV6 && |
| 201 | cmsg->cmsg_type == IPV6_PKTINFO) { | 201 | cmsg->cmsg_type == IPV6_PKTINFO) { |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 3e861011e4a3..1a7e979e80ba 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
| @@ -528,6 +528,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = { | |||
| 528 | .validate = vti_tunnel_validate, | 528 | .validate = vti_tunnel_validate, |
| 529 | .newlink = vti_newlink, | 529 | .newlink = vti_newlink, |
| 530 | .changelink = vti_changelink, | 530 | .changelink = vti_changelink, |
| 531 | .dellink = ip_tunnel_dellink, | ||
| 531 | .get_size = vti_get_size, | 532 | .get_size = vti_get_size, |
| 532 | .fill_info = vti_fill_info, | 533 | .fill_info = vti_fill_info, |
| 533 | }; | 534 | }; |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index a054fe083431..5c61328b7704 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
| @@ -56,11 +56,11 @@ static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
| 56 | return true; | 56 | return true; |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | static int ipv4_print_tuple(struct seq_file *s, | 59 | static void ipv4_print_tuple(struct seq_file *s, |
| 60 | const struct nf_conntrack_tuple *tuple) | 60 | const struct nf_conntrack_tuple *tuple) |
| 61 | { | 61 | { |
| 62 | return seq_printf(s, "src=%pI4 dst=%pI4 ", | 62 | seq_printf(s, "src=%pI4 dst=%pI4 ", |
| 63 | &tuple->src.u3.ip, &tuple->dst.u3.ip); | 63 | &tuple->src.u3.ip, &tuple->dst.u3.ip); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | 66 | static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 4c48e434bb1f..a460a87e14f8 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
| @@ -94,7 +94,7 @@ static void ct_seq_stop(struct seq_file *s, void *v) | |||
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 96 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
| 97 | static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) | 97 | static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) |
| 98 | { | 98 | { |
| 99 | int ret; | 99 | int ret; |
| 100 | u32 len; | 100 | u32 len; |
| @@ -102,17 +102,15 @@ static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) | |||
| 102 | 102 | ||
| 103 | ret = security_secid_to_secctx(ct->secmark, &secctx, &len); | 103 | ret = security_secid_to_secctx(ct->secmark, &secctx, &len); |
| 104 | if (ret) | 104 | if (ret) |
| 105 | return 0; | 105 | return; |
| 106 | 106 | ||
| 107 | ret = seq_printf(s, "secctx=%s ", secctx); | 107 | seq_printf(s, "secctx=%s ", secctx); |
| 108 | 108 | ||
| 109 | security_release_secctx(secctx, len); | 109 | security_release_secctx(secctx, len); |
| 110 | return ret; | ||
| 111 | } | 110 | } |
| 112 | #else | 111 | #else |
| 113 | static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) | 112 | static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) |
| 114 | { | 113 | { |
| 115 | return 0; | ||
| 116 | } | 114 | } |
| 117 | #endif | 115 | #endif |
| 118 | 116 | ||
| @@ -141,47 +139,52 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
| 141 | NF_CT_ASSERT(l4proto); | 139 | NF_CT_ASSERT(l4proto); |
| 142 | 140 | ||
| 143 | ret = -ENOSPC; | 141 | ret = -ENOSPC; |
| 144 | if (seq_printf(s, "%-8s %u %ld ", | 142 | seq_printf(s, "%-8s %u %ld ", |
| 145 | l4proto->name, nf_ct_protonum(ct), | 143 | l4proto->name, nf_ct_protonum(ct), |
| 146 | timer_pending(&ct->timeout) | 144 | timer_pending(&ct->timeout) |
| 147 | ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) | 145 | ? (long)(ct->timeout.expires - jiffies)/HZ : 0); |
| 148 | goto release; | 146 | |
| 147 | if (l4proto->print_conntrack) | ||
| 148 | l4proto->print_conntrack(s, ct); | ||
| 149 | 149 | ||
| 150 | if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct)) | 150 | if (seq_has_overflowed(s)) |
| 151 | goto release; | 151 | goto release; |
| 152 | 152 | ||
| 153 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 153 | print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
| 154 | l3proto, l4proto)) | 154 | l3proto, l4proto); |
| 155 | |||
| 156 | if (seq_has_overflowed(s)) | ||
| 155 | goto release; | 157 | goto release; |
| 156 | 158 | ||
| 157 | if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) | 159 | if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) |
| 158 | goto release; | 160 | goto release; |
| 159 | 161 | ||
| 160 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) | 162 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) |
| 161 | if (seq_printf(s, "[UNREPLIED] ")) | 163 | seq_printf(s, "[UNREPLIED] "); |
| 162 | goto release; | ||
| 163 | 164 | ||
| 164 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, | 165 | print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, |
| 165 | l3proto, l4proto)) | 166 | l3proto, l4proto); |
| 167 | |||
| 168 | if (seq_has_overflowed(s)) | ||
| 166 | goto release; | 169 | goto release; |
| 167 | 170 | ||
| 168 | if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) | 171 | if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) |
| 169 | goto release; | 172 | goto release; |
| 170 | 173 | ||
| 171 | if (test_bit(IPS_ASSURED_BIT, &ct->status)) | 174 | if (test_bit(IPS_ASSURED_BIT, &ct->status)) |
| 172 | if (seq_printf(s, "[ASSURED] ")) | 175 | seq_printf(s, "[ASSURED] "); |
| 173 | goto release; | ||
| 174 | 176 | ||
| 175 | #ifdef CONFIG_NF_CONNTRACK_MARK | 177 | #ifdef CONFIG_NF_CONNTRACK_MARK |
| 176 | if (seq_printf(s, "mark=%u ", ct->mark)) | 178 | seq_printf(s, "mark=%u ", ct->mark); |
| 177 | goto release; | ||
| 178 | #endif | 179 | #endif |
| 179 | 180 | ||
| 180 | if (ct_show_secctx(s, ct)) | 181 | ct_show_secctx(s, ct); |
| 181 | goto release; | ||
| 182 | 182 | ||
| 183 | if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) | 183 | seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)); |
| 184 | |||
| 185 | if (seq_has_overflowed(s)) | ||
| 184 | goto release; | 186 | goto release; |
| 187 | |||
| 185 | ret = 0; | 188 | ret = 0; |
| 186 | release: | 189 | release: |
| 187 | nf_ct_put(ct); | 190 | nf_ct_put(ct); |
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index b91b2641adda..80d5554b9a88 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c | |||
| @@ -72,13 +72,13 @@ static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | /* Print out the per-protocol part of the tuple. */ | 74 | /* Print out the per-protocol part of the tuple. */ |
| 75 | static int icmp_print_tuple(struct seq_file *s, | 75 | static void icmp_print_tuple(struct seq_file *s, |
| 76 | const struct nf_conntrack_tuple *tuple) | 76 | const struct nf_conntrack_tuple *tuple) |
| 77 | { | 77 | { |
| 78 | return seq_printf(s, "type=%u code=%u id=%u ", | 78 | seq_printf(s, "type=%u code=%u id=%u ", |
| 79 | tuple->dst.u.icmp.type, | 79 | tuple->dst.u.icmp.type, |
| 80 | tuple->dst.u.icmp.code, | 80 | tuple->dst.u.icmp.code, |
| 81 | ntohs(tuple->src.u.icmp.id)); | 81 | ntohs(tuple->src.u.icmp.id)); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | static unsigned int *icmp_get_timeouts(struct net *net) | 84 | static unsigned int *icmp_get_timeouts(struct net *net) |
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index c1023c445920..665de06561cd 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c | |||
| @@ -24,6 +24,7 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr, | |||
| 24 | struct nf_nat_range range; | 24 | struct nf_nat_range range; |
| 25 | unsigned int verdict; | 25 | unsigned int verdict; |
| 26 | 26 | ||
| 27 | memset(&range, 0, sizeof(range)); | ||
| 27 | range.flags = priv->flags; | 28 | range.flags = priv->flags; |
| 28 | 29 | ||
| 29 | verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum, | 30 | verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum, |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 57f7c9804139..5d740cccf69e 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
| @@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) | |||
| 217 | &ipv6_hdr(skb)->daddr)) | 217 | &ipv6_hdr(skb)->daddr)) |
| 218 | continue; | 218 | continue; |
| 219 | #endif | 219 | #endif |
| 220 | } else { | ||
| 221 | continue; | ||
| 220 | } | 222 | } |
| 221 | 223 | ||
| 222 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) | 224 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) |
| @@ -853,16 +855,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
| 853 | if (flags & MSG_OOB) | 855 | if (flags & MSG_OOB) |
| 854 | goto out; | 856 | goto out; |
| 855 | 857 | ||
| 856 | if (flags & MSG_ERRQUEUE) { | 858 | if (flags & MSG_ERRQUEUE) |
| 857 | if (family == AF_INET) { | 859 | return inet_recv_error(sk, msg, len, addr_len); |
| 858 | return ip_recv_error(sk, msg, len, addr_len); | ||
| 859 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 860 | } else if (family == AF_INET6) { | ||
| 861 | return pingv6_ops.ipv6_recv_error(sk, msg, len, | ||
| 862 | addr_len); | ||
| 863 | #endif | ||
| 864 | } | ||
| 865 | } | ||
| 866 | 860 | ||
| 867 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 861 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
| 868 | if (!skb) | 862 | if (!skb) |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 39ec0c379545..38c2bcb8dd5d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -1598,7 +1598,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
| 1598 | u32 urg_hole = 0; | 1598 | u32 urg_hole = 0; |
| 1599 | 1599 | ||
| 1600 | if (unlikely(flags & MSG_ERRQUEUE)) | 1600 | if (unlikely(flags & MSG_ERRQUEUE)) |
| 1601 | return ip_recv_error(sk, msg, len, addr_len); | 1601 | return inet_recv_error(sk, msg, len, addr_len); |
| 1602 | 1602 | ||
| 1603 | if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && | 1603 | if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && |
| 1604 | (sk->sk_state == TCP_ESTABLISHED)) | 1604 | (sk->sk_state == TCP_ESTABLISHED)) |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a12b455928e5..d107ee246a1d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) | |||
| 2315 | 2315 | ||
| 2316 | /* Undo procedures. */ | 2316 | /* Undo procedures. */ |
| 2317 | 2317 | ||
| 2318 | /* We can clear retrans_stamp when there are no retransmissions in the | ||
| 2319 | * window. It would seem that it is trivially available for us in | ||
| 2320 | * tp->retrans_out, however, that kind of assumptions doesn't consider | ||
| 2321 | * what will happen if errors occur when sending retransmission for the | ||
| 2322 | * second time. ...It could the that such segment has only | ||
| 2323 | * TCPCB_EVER_RETRANS set at the present time. It seems that checking | ||
| 2324 | * the head skb is enough except for some reneging corner cases that | ||
| 2325 | * are not worth the effort. | ||
| 2326 | * | ||
| 2327 | * Main reason for all this complexity is the fact that connection dying | ||
| 2328 | * time now depends on the validity of the retrans_stamp, in particular, | ||
| 2329 | * that successive retransmissions of a segment must not advance | ||
| 2330 | * retrans_stamp under any conditions. | ||
| 2331 | */ | ||
| 2332 | static bool tcp_any_retrans_done(const struct sock *sk) | ||
| 2333 | { | ||
| 2334 | const struct tcp_sock *tp = tcp_sk(sk); | ||
| 2335 | struct sk_buff *skb; | ||
| 2336 | |||
| 2337 | if (tp->retrans_out) | ||
| 2338 | return true; | ||
| 2339 | |||
| 2340 | skb = tcp_write_queue_head(sk); | ||
| 2341 | if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) | ||
| 2342 | return true; | ||
| 2343 | |||
| 2344 | return false; | ||
| 2345 | } | ||
| 2346 | |||
| 2318 | #if FASTRETRANS_DEBUG > 1 | 2347 | #if FASTRETRANS_DEBUG > 1 |
| 2319 | static void DBGUNDO(struct sock *sk, const char *msg) | 2348 | static void DBGUNDO(struct sock *sk, const char *msg) |
| 2320 | { | 2349 | { |
| @@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) | |||
| 2410 | * is ACKed. For Reno it is MUST to prevent false | 2439 | * is ACKed. For Reno it is MUST to prevent false |
| 2411 | * fast retransmits (RFC2582). SACK TCP is safe. */ | 2440 | * fast retransmits (RFC2582). SACK TCP is safe. */ |
| 2412 | tcp_moderate_cwnd(tp); | 2441 | tcp_moderate_cwnd(tp); |
| 2442 | if (!tcp_any_retrans_done(sk)) | ||
| 2443 | tp->retrans_stamp = 0; | ||
| 2413 | return true; | 2444 | return true; |
| 2414 | } | 2445 | } |
| 2415 | tcp_set_ca_state(sk, TCP_CA_Open); | 2446 | tcp_set_ca_state(sk, TCP_CA_Open); |
| @@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk) | |||
| 2430 | return false; | 2461 | return false; |
| 2431 | } | 2462 | } |
| 2432 | 2463 | ||
| 2433 | /* We can clear retrans_stamp when there are no retransmissions in the | ||
| 2434 | * window. It would seem that it is trivially available for us in | ||
| 2435 | * tp->retrans_out, however, that kind of assumptions doesn't consider | ||
| 2436 | * what will happen if errors occur when sending retransmission for the | ||
| 2437 | * second time. ...It could the that such segment has only | ||
| 2438 | * TCPCB_EVER_RETRANS set at the present time. It seems that checking | ||
| 2439 | * the head skb is enough except for some reneging corner cases that | ||
| 2440 | * are not worth the effort. | ||
| 2441 | * | ||
| 2442 | * Main reason for all this complexity is the fact that connection dying | ||
| 2443 | * time now depends on the validity of the retrans_stamp, in particular, | ||
| 2444 | * that successive retransmissions of a segment must not advance | ||
| 2445 | * retrans_stamp under any conditions. | ||
| 2446 | */ | ||
| 2447 | static bool tcp_any_retrans_done(const struct sock *sk) | ||
| 2448 | { | ||
| 2449 | const struct tcp_sock *tp = tcp_sk(sk); | ||
| 2450 | struct sk_buff *skb; | ||
| 2451 | |||
| 2452 | if (tp->retrans_out) | ||
| 2453 | return true; | ||
| 2454 | |||
| 2455 | skb = tcp_write_queue_head(sk); | ||
| 2456 | if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) | ||
| 2457 | return true; | ||
| 2458 | |||
| 2459 | return false; | ||
| 2460 | } | ||
| 2461 | |||
| 2462 | /* Undo during loss recovery after partial ACK or using F-RTO. */ | 2464 | /* Undo during loss recovery after partial ACK or using F-RTO. */ |
| 2463 | static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) | 2465 | static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) |
| 2464 | { | 2466 | { |
| @@ -5229,7 +5231,7 @@ slow_path: | |||
| 5229 | if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) | 5231 | if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) |
| 5230 | goto csum_error; | 5232 | goto csum_error; |
| 5231 | 5233 | ||
| 5232 | if (!th->ack && !th->rst) | 5234 | if (!th->ack && !th->rst && !th->syn) |
| 5233 | goto discard; | 5235 | goto discard; |
| 5234 | 5236 | ||
| 5235 | /* | 5237 | /* |
| @@ -5648,7 +5650,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 5648 | goto discard; | 5650 | goto discard; |
| 5649 | } | 5651 | } |
| 5650 | 5652 | ||
| 5651 | if (!th->ack && !th->rst) | 5653 | if (!th->ack && !th->rst && !th->syn) |
| 5652 | goto discard; | 5654 | goto discard; |
| 5653 | 5655 | ||
| 5654 | if (!tcp_validate_incoming(sk, skb, th, 0)) | 5656 | if (!tcp_validate_incoming(sk, skb, th, 0)) |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9c7d7621466b..147be2024290 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -598,7 +598,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
| 598 | if (th->rst) | 598 | if (th->rst) |
| 599 | return; | 599 | return; |
| 600 | 600 | ||
| 601 | if (skb_rtable(skb)->rt_type != RTN_LOCAL) | 601 | /* If sk not NULL, it means we did a successful lookup and incoming |
| 602 | * route had to be correct. prequeue might have dropped our dst. | ||
| 603 | */ | ||
| 604 | if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) | ||
| 602 | return; | 605 | return; |
| 603 | 606 | ||
| 604 | /* Swap the send and the receive. */ | 607 | /* Swap the send and the receive. */ |
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 1d191357bf88..272327134a1b 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c | |||
| @@ -9,13 +9,13 @@ | |||
| 9 | int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | 9 | int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) |
| 10 | { | 10 | { |
| 11 | /* | 11 | /* |
| 12 | * The root cgroup does not use res_counters, but rather, | 12 | * The root cgroup does not use page_counters, but rather, |
| 13 | * rely on the data already collected by the network | 13 | * rely on the data already collected by the network |
| 14 | * subsystem | 14 | * subsystem |
| 15 | */ | 15 | */ |
| 16 | struct res_counter *res_parent = NULL; | ||
| 17 | struct cg_proto *cg_proto, *parent_cg; | ||
| 18 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); | 16 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); |
| 17 | struct page_counter *counter_parent = NULL; | ||
| 18 | struct cg_proto *cg_proto, *parent_cg; | ||
| 19 | 19 | ||
| 20 | cg_proto = tcp_prot.proto_cgroup(memcg); | 20 | cg_proto = tcp_prot.proto_cgroup(memcg); |
| 21 | if (!cg_proto) | 21 | if (!cg_proto) |
| @@ -29,9 +29,9 @@ int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | |||
| 29 | 29 | ||
| 30 | parent_cg = tcp_prot.proto_cgroup(parent); | 30 | parent_cg = tcp_prot.proto_cgroup(parent); |
| 31 | if (parent_cg) | 31 | if (parent_cg) |
| 32 | res_parent = &parent_cg->memory_allocated; | 32 | counter_parent = &parent_cg->memory_allocated; |
| 33 | 33 | ||
| 34 | res_counter_init(&cg_proto->memory_allocated, res_parent); | 34 | page_counter_init(&cg_proto->memory_allocated, counter_parent); |
| 35 | percpu_counter_init(&cg_proto->sockets_allocated, 0, GFP_KERNEL); | 35 | percpu_counter_init(&cg_proto->sockets_allocated, 0, GFP_KERNEL); |
| 36 | 36 | ||
| 37 | return 0; | 37 | return 0; |
| @@ -50,7 +50,7 @@ void tcp_destroy_cgroup(struct mem_cgroup *memcg) | |||
| 50 | } | 50 | } |
| 51 | EXPORT_SYMBOL(tcp_destroy_cgroup); | 51 | EXPORT_SYMBOL(tcp_destroy_cgroup); |
| 52 | 52 | ||
| 53 | static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) | 53 | static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) |
| 54 | { | 54 | { |
| 55 | struct cg_proto *cg_proto; | 55 | struct cg_proto *cg_proto; |
| 56 | int i; | 56 | int i; |
| @@ -60,20 +60,17 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) | |||
| 60 | if (!cg_proto) | 60 | if (!cg_proto) |
| 61 | return -EINVAL; | 61 | return -EINVAL; |
| 62 | 62 | ||
| 63 | if (val > RES_COUNTER_MAX) | 63 | ret = page_counter_limit(&cg_proto->memory_allocated, nr_pages); |
| 64 | val = RES_COUNTER_MAX; | ||
| 65 | |||
| 66 | ret = res_counter_set_limit(&cg_proto->memory_allocated, val); | ||
| 67 | if (ret) | 64 | if (ret) |
| 68 | return ret; | 65 | return ret; |
| 69 | 66 | ||
| 70 | for (i = 0; i < 3; i++) | 67 | for (i = 0; i < 3; i++) |
| 71 | cg_proto->sysctl_mem[i] = min_t(long, val >> PAGE_SHIFT, | 68 | cg_proto->sysctl_mem[i] = min_t(long, nr_pages, |
| 72 | sysctl_tcp_mem[i]); | 69 | sysctl_tcp_mem[i]); |
| 73 | 70 | ||
| 74 | if (val == RES_COUNTER_MAX) | 71 | if (nr_pages == PAGE_COUNTER_MAX) |
| 75 | clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags); | 72 | clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags); |
| 76 | else if (val != RES_COUNTER_MAX) { | 73 | else { |
| 77 | /* | 74 | /* |
| 78 | * The active bit needs to be written after the static_key | 75 | * The active bit needs to be written after the static_key |
| 79 | * update. This is what guarantees that the socket activation | 76 | * update. This is what guarantees that the socket activation |
| @@ -102,11 +99,20 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) | |||
| 102 | return 0; | 99 | return 0; |
| 103 | } | 100 | } |
| 104 | 101 | ||
| 102 | enum { | ||
| 103 | RES_USAGE, | ||
| 104 | RES_LIMIT, | ||
| 105 | RES_MAX_USAGE, | ||
| 106 | RES_FAILCNT, | ||
| 107 | }; | ||
| 108 | |||
| 109 | static DEFINE_MUTEX(tcp_limit_mutex); | ||
| 110 | |||
| 105 | static ssize_t tcp_cgroup_write(struct kernfs_open_file *of, | 111 | static ssize_t tcp_cgroup_write(struct kernfs_open_file *of, |
| 106 | char *buf, size_t nbytes, loff_t off) | 112 | char *buf, size_t nbytes, loff_t off) |
| 107 | { | 113 | { |
| 108 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | 114 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); |
| 109 | unsigned long long val; | 115 | unsigned long nr_pages; |
| 110 | int ret = 0; | 116 | int ret = 0; |
| 111 | 117 | ||
| 112 | buf = strstrip(buf); | 118 | buf = strstrip(buf); |
| @@ -114,10 +120,12 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of, | |||
| 114 | switch (of_cft(of)->private) { | 120 | switch (of_cft(of)->private) { |
| 115 | case RES_LIMIT: | 121 | case RES_LIMIT: |
| 116 | /* see memcontrol.c */ | 122 | /* see memcontrol.c */ |
| 117 | ret = res_counter_memparse_write_strategy(buf, &val); | 123 | ret = page_counter_memparse(buf, &nr_pages); |
| 118 | if (ret) | 124 | if (ret) |
| 119 | break; | 125 | break; |
| 120 | ret = tcp_update_limit(memcg, val); | 126 | mutex_lock(&tcp_limit_mutex); |
| 127 | ret = tcp_update_limit(memcg, nr_pages); | ||
| 128 | mutex_unlock(&tcp_limit_mutex); | ||
| 121 | break; | 129 | break; |
| 122 | default: | 130 | default: |
| 123 | ret = -EINVAL; | 131 | ret = -EINVAL; |
| @@ -126,43 +134,36 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of, | |||
| 126 | return ret ?: nbytes; | 134 | return ret ?: nbytes; |
| 127 | } | 135 | } |
| 128 | 136 | ||
| 129 | static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val) | ||
| 130 | { | ||
| 131 | struct cg_proto *cg_proto; | ||
| 132 | |||
| 133 | cg_proto = tcp_prot.proto_cgroup(memcg); | ||
| 134 | if (!cg_proto) | ||
| 135 | return default_val; | ||
| 136 | |||
| 137 | return res_counter_read_u64(&cg_proto->memory_allocated, type); | ||
| 138 | } | ||
| 139 | |||
| 140 | static u64 tcp_read_usage(struct mem_cgroup *memcg) | ||
| 141 | { | ||
| 142 | struct cg_proto *cg_proto; | ||
| 143 | |||
| 144 | cg_proto = tcp_prot.proto_cgroup(memcg); | ||
| 145 | if (!cg_proto) | ||
| 146 | return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT; | ||
| 147 | |||
| 148 | return res_counter_read_u64(&cg_proto->memory_allocated, RES_USAGE); | ||
| 149 | } | ||
| 150 | |||
| 151 | static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft) | 137 | static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft) |
| 152 | { | 138 | { |
| 153 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 139 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
| 140 | struct cg_proto *cg_proto = tcp_prot.proto_cgroup(memcg); | ||
| 154 | u64 val; | 141 | u64 val; |
| 155 | 142 | ||
| 156 | switch (cft->private) { | 143 | switch (cft->private) { |
| 157 | case RES_LIMIT: | 144 | case RES_LIMIT: |
| 158 | val = tcp_read_stat(memcg, RES_LIMIT, RES_COUNTER_MAX); | 145 | if (!cg_proto) |
| 146 | return PAGE_COUNTER_MAX; | ||
| 147 | val = cg_proto->memory_allocated.limit; | ||
| 148 | val *= PAGE_SIZE; | ||
| 159 | break; | 149 | break; |
| 160 | case RES_USAGE: | 150 | case RES_USAGE: |
| 161 | val = tcp_read_usage(memcg); | 151 | if (!cg_proto) |
| 152 | val = atomic_long_read(&tcp_memory_allocated); | ||
| 153 | else | ||
| 154 | val = page_counter_read(&cg_proto->memory_allocated); | ||
| 155 | val *= PAGE_SIZE; | ||
| 162 | break; | 156 | break; |
| 163 | case RES_FAILCNT: | 157 | case RES_FAILCNT: |
| 158 | if (!cg_proto) | ||
| 159 | return 0; | ||
| 160 | val = cg_proto->memory_allocated.failcnt; | ||
| 161 | break; | ||
| 164 | case RES_MAX_USAGE: | 162 | case RES_MAX_USAGE: |
| 165 | val = tcp_read_stat(memcg, cft->private, 0); | 163 | if (!cg_proto) |
| 164 | return 0; | ||
| 165 | val = cg_proto->memory_allocated.watermark; | ||
| 166 | val *= PAGE_SIZE; | ||
| 166 | break; | 167 | break; |
| 167 | default: | 168 | default: |
| 168 | BUG(); | 169 | BUG(); |
| @@ -183,10 +184,10 @@ static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of, | |||
| 183 | 184 | ||
| 184 | switch (of_cft(of)->private) { | 185 | switch (of_cft(of)->private) { |
| 185 | case RES_MAX_USAGE: | 186 | case RES_MAX_USAGE: |
| 186 | res_counter_reset_max(&cg_proto->memory_allocated); | 187 | page_counter_reset_watermark(&cg_proto->memory_allocated); |
| 187 | break; | 188 | break; |
| 188 | case RES_FAILCNT: | 189 | case RES_FAILCNT: |
| 189 | res_counter_reset_failcnt(&cg_proto->memory_allocated); | 190 | cg_proto->memory_allocated.failcnt = 0; |
| 190 | break; | 191 | break; |
| 191 | } | 192 | } |
| 192 | 193 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 12c3c8ef3849..0e32d2e1bdbf 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -502,11 +502,11 @@ static int ip6gre_rcv(struct sk_buff *skb) | |||
| 502 | 502 | ||
| 503 | skb->protocol = gre_proto; | 503 | skb->protocol = gre_proto; |
| 504 | /* WCCP version 1 and 2 protocol decoding. | 504 | /* WCCP version 1 and 2 protocol decoding. |
| 505 | * - Change protocol to IP | 505 | * - Change protocol to IPv6 |
| 506 | * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header | 506 | * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header |
| 507 | */ | 507 | */ |
| 508 | if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { | 508 | if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { |
| 509 | skb->protocol = htons(ETH_P_IP); | 509 | skb->protocol = htons(ETH_P_IPV6); |
| 510 | if ((*(h + offset) & 0xF0) != 0x40) | 510 | if ((*(h + offset) & 0xF0) != 0x40) |
| 511 | offset += 4; | 511 | offset += 4; |
| 512 | } | 512 | } |
| @@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | |||
| 961 | else | 961 | else |
| 962 | dev->flags &= ~IFF_POINTOPOINT; | 962 | dev->flags &= ~IFF_POINTOPOINT; |
| 963 | 963 | ||
| 964 | dev->iflink = p->link; | ||
| 965 | |||
| 966 | /* Precalculate GRE options length */ | 964 | /* Precalculate GRE options length */ |
| 967 | if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { | 965 | if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { |
| 968 | if (t->parms.o_flags&GRE_CSUM) | 966 | if (t->parms.o_flags&GRE_CSUM) |
| @@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev) | |||
| 1272 | u64_stats_init(&ip6gre_tunnel_stats->syncp); | 1270 | u64_stats_init(&ip6gre_tunnel_stats->syncp); |
| 1273 | } | 1271 | } |
| 1274 | 1272 | ||
| 1273 | dev->iflink = tunnel->parms.link; | ||
| 1275 | 1274 | ||
| 1276 | return 0; | 1275 | return 0; |
| 1277 | } | 1276 | } |
| @@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev) | |||
| 1481 | if (!dev->tstats) | 1480 | if (!dev->tstats) |
| 1482 | return -ENOMEM; | 1481 | return -ENOMEM; |
| 1483 | 1482 | ||
| 1483 | dev->iflink = tunnel->parms.link; | ||
| 1484 | |||
| 1484 | return 0; | 1485 | return 0; |
| 1485 | } | 1486 | } |
| 1486 | 1487 | ||
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index a071563a7e6e..01e12d0d8fcc 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
| @@ -69,7 +69,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
| 69 | int nhoff; | 69 | int nhoff; |
| 70 | 70 | ||
| 71 | if (unlikely(skb_shinfo(skb)->gso_type & | 71 | if (unlikely(skb_shinfo(skb)->gso_type & |
| 72 | ~(SKB_GSO_UDP | | 72 | ~(SKB_GSO_TCPV4 | |
| 73 | SKB_GSO_UDP | | ||
| 73 | SKB_GSO_DODGY | | 74 | SKB_GSO_DODGY | |
| 74 | SKB_GSO_TCP_ECN | | 75 | SKB_GSO_TCP_ECN | |
| 75 | SKB_GSO_GRE | | 76 | SKB_GSO_GRE | |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9409887fb664..9cb94cfa0ae7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev) | |||
| 272 | int err; | 272 | int err; |
| 273 | 273 | ||
| 274 | t = netdev_priv(dev); | 274 | t = netdev_priv(dev); |
| 275 | err = ip6_tnl_dev_init(dev); | ||
| 276 | if (err < 0) | ||
| 277 | goto out; | ||
| 278 | 275 | ||
| 279 | err = register_netdevice(dev); | 276 | err = register_netdevice(dev); |
| 280 | if (err < 0) | 277 | if (err < 0) |
| @@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1462 | 1459 | ||
| 1463 | 1460 | ||
| 1464 | static const struct net_device_ops ip6_tnl_netdev_ops = { | 1461 | static const struct net_device_ops ip6_tnl_netdev_ops = { |
| 1462 | .ndo_init = ip6_tnl_dev_init, | ||
| 1465 | .ndo_uninit = ip6_tnl_dev_uninit, | 1463 | .ndo_uninit = ip6_tnl_dev_uninit, |
| 1466 | .ndo_start_xmit = ip6_tnl_xmit, | 1464 | .ndo_start_xmit = ip6_tnl_xmit, |
| 1467 | .ndo_do_ioctl = ip6_tnl_ioctl, | 1465 | .ndo_do_ioctl = ip6_tnl_ioctl, |
| @@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) | |||
| 1546 | struct ip6_tnl *t = netdev_priv(dev); | 1544 | struct ip6_tnl *t = netdev_priv(dev); |
| 1547 | struct net *net = dev_net(dev); | 1545 | struct net *net = dev_net(dev); |
| 1548 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 1546 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
| 1549 | int err = ip6_tnl_dev_init_gen(dev); | ||
| 1550 | |||
| 1551 | if (err) | ||
| 1552 | return err; | ||
| 1553 | 1547 | ||
| 1554 | t->parms.proto = IPPROTO_IPV6; | 1548 | t->parms.proto = IPPROTO_IPV6; |
| 1555 | dev_hold(dev); | 1549 | dev_hold(dev); |
| 1556 | 1550 | ||
| 1557 | ip6_tnl_link_config(t); | ||
| 1558 | |||
| 1559 | rcu_assign_pointer(ip6n->tnls_wc[0], t); | 1551 | rcu_assign_pointer(ip6n->tnls_wc[0], t); |
| 1560 | return 0; | 1552 | return 0; |
| 1561 | } | 1553 | } |
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index b04ed72c4542..8db6c98fe218 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c | |||
| @@ -79,15 +79,13 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, | |||
| 79 | uh->source = src_port; | 79 | uh->source = src_port; |
| 80 | 80 | ||
| 81 | uh->len = htons(skb->len); | 81 | uh->len = htons(skb->len); |
| 82 | uh->check = 0; | ||
| 83 | 82 | ||
| 84 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 83 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
| 85 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 84 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
| 86 | | IPSKB_REROUTED); | 85 | | IPSKB_REROUTED); |
| 87 | skb_dst_set(skb, dst); | 86 | skb_dst_set(skb, dst); |
| 88 | 87 | ||
| 89 | udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, | 88 | udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len); |
| 90 | &sk->sk_v6_daddr, skb->len); | ||
| 91 | 89 | ||
| 92 | __skb_push(skb, sizeof(*ip6h)); | 90 | __skb_push(skb, sizeof(*ip6h)); |
| 93 | skb_reset_network_header(skb); | 91 | skb_reset_network_header(skb); |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index d440bb585524..bcda14de7f84 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
| @@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev) | |||
| 172 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); | 172 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); |
| 173 | int err; | 173 | int err; |
| 174 | 174 | ||
| 175 | err = vti6_dev_init(dev); | ||
| 176 | if (err < 0) | ||
| 177 | goto out; | ||
| 178 | |||
| 179 | err = register_netdevice(dev); | 175 | err = register_netdevice(dev); |
| 180 | if (err < 0) | 176 | if (err < 0) |
| 181 | goto out; | 177 | goto out; |
| @@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu) | |||
| 783 | } | 779 | } |
| 784 | 780 | ||
| 785 | static const struct net_device_ops vti6_netdev_ops = { | 781 | static const struct net_device_ops vti6_netdev_ops = { |
| 782 | .ndo_init = vti6_dev_init, | ||
| 786 | .ndo_uninit = vti6_dev_uninit, | 783 | .ndo_uninit = vti6_dev_uninit, |
| 787 | .ndo_start_xmit = vti6_tnl_xmit, | 784 | .ndo_start_xmit = vti6_tnl_xmit, |
| 788 | .ndo_do_ioctl = vti6_ioctl, | 785 | .ndo_do_ioctl = vti6_ioctl, |
| @@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) | |||
| 852 | struct ip6_tnl *t = netdev_priv(dev); | 849 | struct ip6_tnl *t = netdev_priv(dev); |
| 853 | struct net *net = dev_net(dev); | 850 | struct net *net = dev_net(dev); |
| 854 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); | 851 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); |
| 855 | int err = vti6_dev_init_gen(dev); | ||
| 856 | |||
| 857 | if (err) | ||
| 858 | return err; | ||
| 859 | 852 | ||
| 860 | t->parms.proto = IPPROTO_IPV6; | 853 | t->parms.proto = IPPROTO_IPV6; |
| 861 | dev_hold(dev); | 854 | dev_hold(dev); |
| 862 | 855 | ||
| 863 | vti6_link_config(t); | ||
| 864 | |||
| 865 | rcu_assign_pointer(ip6n->tnls_wc[0], t); | 856 | rcu_assign_pointer(ip6n->tnls_wc[0], t); |
| 866 | return 0; | 857 | return 0; |
| 867 | } | 858 | } |
| @@ -914,6 +905,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev, | |||
| 914 | return vti6_tnl_create2(dev); | 905 | return vti6_tnl_create2(dev); |
| 915 | } | 906 | } |
| 916 | 907 | ||
| 908 | static void vti6_dellink(struct net_device *dev, struct list_head *head) | ||
| 909 | { | ||
| 910 | struct net *net = dev_net(dev); | ||
| 911 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); | ||
| 912 | |||
| 913 | if (dev != ip6n->fb_tnl_dev) | ||
| 914 | unregister_netdevice_queue(dev, head); | ||
| 915 | } | ||
| 916 | |||
| 917 | static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], | 917 | static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], |
| 918 | struct nlattr *data[]) | 918 | struct nlattr *data[]) |
| 919 | { | 919 | { |
| @@ -989,6 +989,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = { | |||
| 989 | .setup = vti6_dev_setup, | 989 | .setup = vti6_dev_setup, |
| 990 | .validate = vti6_validate, | 990 | .validate = vti6_validate, |
| 991 | .newlink = vti6_newlink, | 991 | .newlink = vti6_newlink, |
| 992 | .dellink = vti6_dellink, | ||
| 992 | .changelink = vti6_changelink, | 993 | .changelink = vti6_changelink, |
| 993 | .get_size = vti6_get_size, | 994 | .get_size = vti6_get_size, |
| 994 | .fill_info = vti6_fill_info, | 995 | .fill_info = vti6_fill_info, |
| @@ -1029,6 +1030,7 @@ static int __net_init vti6_init_net(struct net *net) | |||
| 1029 | if (!ip6n->fb_tnl_dev) | 1030 | if (!ip6n->fb_tnl_dev) |
| 1030 | goto err_alloc_dev; | 1031 | goto err_alloc_dev; |
| 1031 | dev_net_set(ip6n->fb_tnl_dev, net); | 1032 | dev_net_set(ip6n->fb_tnl_dev, net); |
| 1033 | ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops; | ||
| 1032 | 1034 | ||
| 1033 | err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); | 1035 | err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); |
| 1034 | if (err < 0) | 1036 | if (err < 0) |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0171f08325c3..1a01d79b8698 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
| @@ -1439,6 +1439,10 @@ reg_pernet_fail: | |||
| 1439 | 1439 | ||
| 1440 | void ip6_mr_cleanup(void) | 1440 | void ip6_mr_cleanup(void) |
| 1441 | { | 1441 | { |
| 1442 | rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE); | ||
| 1443 | #ifdef CONFIG_IPV6_PIMSM_V2 | ||
| 1444 | inet6_del_protocol(&pim6_protocol, IPPROTO_PIM); | ||
| 1445 | #endif | ||
| 1442 | unregister_netdevice_notifier(&ip6_mr_notifier); | 1446 | unregister_netdevice_notifier(&ip6_mr_notifier); |
| 1443 | unregister_pernet_subsys(&ip6mr_net_ops); | 1447 | unregister_pernet_subsys(&ip6mr_net_ops); |
| 1444 | kmem_cache_destroy(mrt_cachep); | 1448 | kmem_cache_destroy(mrt_cachep); |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 9648de2b6745..ed2c4e400b46 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
| @@ -1550,7 +1550,7 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb, | |||
| 1550 | hdr->daddr = *daddr; | 1550 | hdr->daddr = *daddr; |
| 1551 | } | 1551 | } |
| 1552 | 1552 | ||
| 1553 | static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) | 1553 | static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) |
| 1554 | { | 1554 | { |
| 1555 | struct net_device *dev = idev->dev; | 1555 | struct net_device *dev = idev->dev; |
| 1556 | struct net *net = dev_net(dev); | 1556 | struct net *net = dev_net(dev); |
| @@ -1561,13 +1561,13 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) | |||
| 1561 | const struct in6_addr *saddr; | 1561 | const struct in6_addr *saddr; |
| 1562 | int hlen = LL_RESERVED_SPACE(dev); | 1562 | int hlen = LL_RESERVED_SPACE(dev); |
| 1563 | int tlen = dev->needed_tailroom; | 1563 | int tlen = dev->needed_tailroom; |
| 1564 | unsigned int size = mtu + hlen + tlen; | ||
| 1564 | int err; | 1565 | int err; |
| 1565 | u8 ra[8] = { IPPROTO_ICMPV6, 0, | 1566 | u8 ra[8] = { IPPROTO_ICMPV6, 0, |
| 1566 | IPV6_TLV_ROUTERALERT, 2, 0, 0, | 1567 | IPV6_TLV_ROUTERALERT, 2, 0, 0, |
| 1567 | IPV6_TLV_PADN, 0 }; | 1568 | IPV6_TLV_PADN, 0 }; |
| 1568 | 1569 | ||
| 1569 | /* we assume size > sizeof(ra) here */ | 1570 | /* we assume size > sizeof(ra) here */ |
| 1570 | size += hlen + tlen; | ||
| 1571 | /* limit our allocations to order-0 page */ | 1571 | /* limit our allocations to order-0 page */ |
| 1572 | size = min_t(int, size, SKB_MAX_ORDER(0, 0)); | 1572 | size = min_t(int, size, SKB_MAX_ORDER(0, 0)); |
| 1573 | skb = sock_alloc_send_skb(sk, size, 1, &err); | 1573 | skb = sock_alloc_send_skb(sk, size, 1, &err); |
| @@ -1576,6 +1576,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) | |||
| 1576 | return NULL; | 1576 | return NULL; |
| 1577 | 1577 | ||
| 1578 | skb->priority = TC_PRIO_CONTROL; | 1578 | skb->priority = TC_PRIO_CONTROL; |
| 1579 | skb->reserved_tailroom = skb_end_offset(skb) - | ||
| 1580 | min(mtu, skb_end_offset(skb)); | ||
| 1579 | skb_reserve(skb, hlen); | 1581 | skb_reserve(skb, hlen); |
| 1580 | 1582 | ||
| 1581 | if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { | 1583 | if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { |
| @@ -1690,8 +1692,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
| 1690 | return skb; | 1692 | return skb; |
| 1691 | } | 1693 | } |
| 1692 | 1694 | ||
| 1693 | #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ | 1695 | #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) |
| 1694 | skb_tailroom(skb)) : 0) | ||
| 1695 | 1696 | ||
| 1696 | static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | 1697 | static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, |
| 1697 | int type, int gdeleted, int sdeleted, int crsend) | 1698 | int type, int gdeleted, int sdeleted, int crsend) |
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 4cbc6b290dd5..b68d0e59c1f8 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
| @@ -60,11 +60,11 @@ static bool ipv6_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
| 60 | return true; | 60 | return true; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static int ipv6_print_tuple(struct seq_file *s, | 63 | static void ipv6_print_tuple(struct seq_file *s, |
| 64 | const struct nf_conntrack_tuple *tuple) | 64 | const struct nf_conntrack_tuple *tuple) |
| 65 | { | 65 | { |
| 66 | return seq_printf(s, "src=%pI6 dst=%pI6 ", | 66 | seq_printf(s, "src=%pI6 dst=%pI6 ", |
| 67 | tuple->src.u3.ip6, tuple->dst.u3.ip6); | 67 | tuple->src.u3.ip6, tuple->dst.u3.ip6); |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | 70 | static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, |
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index b3807c5cb888..90388d606483 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | |||
| @@ -84,13 +84,13 @@ static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | /* Print out the per-protocol part of the tuple. */ | 86 | /* Print out the per-protocol part of the tuple. */ |
| 87 | static int icmpv6_print_tuple(struct seq_file *s, | 87 | static void icmpv6_print_tuple(struct seq_file *s, |
| 88 | const struct nf_conntrack_tuple *tuple) | 88 | const struct nf_conntrack_tuple *tuple) |
| 89 | { | 89 | { |
| 90 | return seq_printf(s, "type=%u code=%u id=%u ", | 90 | seq_printf(s, "type=%u code=%u id=%u ", |
| 91 | tuple->dst.u.icmp.type, | 91 | tuple->dst.u.icmp.type, |
| 92 | tuple->dst.u.icmp.code, | 92 | tuple->dst.u.icmp.code, |
| 93 | ntohs(tuple->src.u.icmp.id)); | 93 | ntohs(tuple->src.u.icmp.id)); |
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | static unsigned int *icmpv6_get_timeouts(struct net *net) | 96 | static unsigned int *icmpv6_get_timeouts(struct net *net) |
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index 8a7ac685076d..529c119cbb14 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c | |||
| @@ -25,6 +25,7 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr, | |||
| 25 | struct nf_nat_range range; | 25 | struct nf_nat_range range; |
| 26 | unsigned int verdict; | 26 | unsigned int verdict; |
| 27 | 27 | ||
| 28 | memset(&range, 0, sizeof(range)); | ||
| 28 | range.flags = priv->flags; | 29 | range.flags = priv->flags; |
| 29 | 30 | ||
| 30 | verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); | 31 | verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 58e5b4710127..a24557a1c1d8 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
| @@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev) | |||
| 195 | struct sit_net *sitn = net_generic(net, sit_net_id); | 195 | struct sit_net *sitn = net_generic(net, sit_net_id); |
| 196 | int err; | 196 | int err; |
| 197 | 197 | ||
| 198 | err = ipip6_tunnel_init(dev); | 198 | memcpy(dev->dev_addr, &t->parms.iph.saddr, 4); |
| 199 | if (err < 0) | 199 | memcpy(dev->broadcast, &t->parms.iph.daddr, 4); |
| 200 | goto out; | ||
| 201 | ipip6_tunnel_clone_6rd(dev, sitn); | ||
| 202 | 200 | ||
| 203 | if ((__force u16)t->parms.i_flags & SIT_ISATAP) | 201 | if ((__force u16)t->parms.i_flags & SIT_ISATAP) |
| 204 | dev->priv_flags |= IFF_ISATAP; | 202 | dev->priv_flags |= IFF_ISATAP; |
| @@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev) | |||
| 207 | if (err < 0) | 205 | if (err < 0) |
| 208 | goto out; | 206 | goto out; |
| 209 | 207 | ||
| 210 | strcpy(t->parms.name, dev->name); | 208 | ipip6_tunnel_clone_6rd(dev, sitn); |
| 209 | |||
| 211 | dev->rtnl_link_ops = &sit_link_ops; | 210 | dev->rtnl_link_ops = &sit_link_ops; |
| 212 | 211 | ||
| 213 | dev_hold(dev); | 212 | dev_hold(dev); |
| @@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1330 | } | 1329 | } |
| 1331 | 1330 | ||
| 1332 | static const struct net_device_ops ipip6_netdev_ops = { | 1331 | static const struct net_device_ops ipip6_netdev_ops = { |
| 1332 | .ndo_init = ipip6_tunnel_init, | ||
| 1333 | .ndo_uninit = ipip6_tunnel_uninit, | 1333 | .ndo_uninit = ipip6_tunnel_uninit, |
| 1334 | .ndo_start_xmit = sit_tunnel_xmit, | 1334 | .ndo_start_xmit = sit_tunnel_xmit, |
| 1335 | .ndo_do_ioctl = ipip6_tunnel_ioctl, | 1335 | .ndo_do_ioctl = ipip6_tunnel_ioctl, |
| @@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev) | |||
| 1378 | 1378 | ||
| 1379 | tunnel->dev = dev; | 1379 | tunnel->dev = dev; |
| 1380 | tunnel->net = dev_net(dev); | 1380 | tunnel->net = dev_net(dev); |
| 1381 | 1381 | strcpy(tunnel->parms.name, dev->name); | |
| 1382 | memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); | ||
| 1383 | memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); | ||
| 1384 | 1382 | ||
| 1385 | ipip6_tunnel_bind_dev(dev); | 1383 | ipip6_tunnel_bind_dev(dev); |
| 1386 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | 1384 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
| @@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) | |||
| 1405 | 1403 | ||
| 1406 | tunnel->dev = dev; | 1404 | tunnel->dev = dev; |
| 1407 | tunnel->net = dev_net(dev); | 1405 | tunnel->net = dev_net(dev); |
| 1408 | strcpy(tunnel->parms.name, dev->name); | ||
| 1409 | 1406 | ||
| 1410 | iph->version = 4; | 1407 | iph->version = 4; |
| 1411 | iph->protocol = IPPROTO_IPV6; | 1408 | iph->protocol = IPPROTO_IPV6; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index ace29b60813c..dc495ae2ead0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -903,7 +903,10 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
| 903 | if (th->rst) | 903 | if (th->rst) |
| 904 | return; | 904 | return; |
| 905 | 905 | ||
| 906 | if (!ipv6_unicast_destination(skb)) | 906 | /* If sk not NULL, it means we did a successful lookup and incoming |
| 907 | * route had to be correct. prequeue might have dropped our dst. | ||
| 908 | */ | ||
| 909 | if (!sk && !ipv6_unicast_destination(skb)) | ||
| 907 | return; | 910 | return; |
| 908 | 911 | ||
| 909 | #ifdef CONFIG_TCP_MD5SIG | 912 | #ifdef CONFIG_TCP_MD5SIG |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 91729b807c7d..1b095ca37aa4 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
| @@ -1764,6 +1764,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1764 | struct ipxhdr *ipx = NULL; | 1764 | struct ipxhdr *ipx = NULL; |
| 1765 | struct sk_buff *skb; | 1765 | struct sk_buff *skb; |
| 1766 | int copied, rc; | 1766 | int copied, rc; |
| 1767 | bool locked = true; | ||
| 1767 | 1768 | ||
| 1768 | lock_sock(sk); | 1769 | lock_sock(sk); |
| 1769 | /* put the autobinding in */ | 1770 | /* put the autobinding in */ |
| @@ -1790,6 +1791,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1790 | if (sock_flag(sk, SOCK_ZAPPED)) | 1791 | if (sock_flag(sk, SOCK_ZAPPED)) |
| 1791 | goto out; | 1792 | goto out; |
| 1792 | 1793 | ||
| 1794 | release_sock(sk); | ||
| 1795 | locked = false; | ||
| 1793 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 1796 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
| 1794 | flags & MSG_DONTWAIT, &rc); | 1797 | flags & MSG_DONTWAIT, &rc); |
| 1795 | if (!skb) { | 1798 | if (!skb) { |
| @@ -1826,7 +1829,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1826 | out_free: | 1829 | out_free: |
| 1827 | skb_free_datagram(sk, skb); | 1830 | skb_free_datagram(sk, skb); |
| 1828 | out: | 1831 | out: |
| 1829 | release_sock(sk); | 1832 | if (locked) |
| 1833 | release_sock(sk); | ||
| 1830 | return rc; | 1834 | return rc; |
| 1831 | } | 1835 | } |
| 1832 | 1836 | ||
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index ec24378caaaf..09d9caaec591 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c | |||
| @@ -53,6 +53,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | |||
| 53 | __aligned(__alignof__(struct aead_request)); | 53 | __aligned(__alignof__(struct aead_request)); |
| 54 | struct aead_request *aead_req = (void *) aead_req_data; | 54 | struct aead_request *aead_req = (void *) aead_req_data; |
| 55 | 55 | ||
| 56 | if (data_len == 0) | ||
| 57 | return -EINVAL; | ||
| 58 | |||
| 56 | memset(aead_req, 0, sizeof(aead_req_data)); | 59 | memset(aead_req, 0, sizeof(aead_req_data)); |
| 57 | 60 | ||
| 58 | sg_init_one(&pt, data, data_len); | 61 | sg_init_one(&pt, data, data_len); |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 56b53571c807..509bc157ce55 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
| @@ -805,7 +805,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
| 805 | 805 | ||
| 806 | memset(¶ms, 0, sizeof(params)); | 806 | memset(¶ms, 0, sizeof(params)); |
| 807 | memset(&csa_ie, 0, sizeof(csa_ie)); | 807 | memset(&csa_ie, 0, sizeof(csa_ie)); |
| 808 | err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, | 808 | err = ieee80211_parse_ch_switch_ie(sdata, elems, |
| 809 | ifibss->chandef.chan->band, | 809 | ifibss->chandef.chan->band, |
| 810 | sta_flags, ifibss->bssid, &csa_ie); | 810 | sta_flags, ifibss->bssid, &csa_ie); |
| 811 | /* can't switch to destination channel, fail */ | 811 | /* can't switch to destination channel, fail */ |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c2aaec4dfcf0..8c68da30595d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
| @@ -1642,7 +1642,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | |||
| 1642 | * ieee80211_parse_ch_switch_ie - parses channel switch IEs | 1642 | * ieee80211_parse_ch_switch_ie - parses channel switch IEs |
| 1643 | * @sdata: the sdata of the interface which has received the frame | 1643 | * @sdata: the sdata of the interface which has received the frame |
| 1644 | * @elems: parsed 802.11 elements received with the frame | 1644 | * @elems: parsed 802.11 elements received with the frame |
| 1645 | * @beacon: indicates if the frame was a beacon or probe response | ||
| 1646 | * @current_band: indicates the current band | 1645 | * @current_band: indicates the current band |
| 1647 | * @sta_flags: contains information about own capabilities and restrictions | 1646 | * @sta_flags: contains information about own capabilities and restrictions |
| 1648 | * to decide which channel switch announcements can be accepted. Only the | 1647 | * to decide which channel switch announcements can be accepted. Only the |
| @@ -1656,7 +1655,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | |||
| 1656 | * Return: 0 on success, <0 on error and >0 if there is nothing to parse. | 1655 | * Return: 0 on success, <0 on error and >0 if there is nothing to parse. |
| 1657 | */ | 1656 | */ |
| 1658 | int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | 1657 | int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, |
| 1659 | struct ieee802_11_elems *elems, bool beacon, | 1658 | struct ieee802_11_elems *elems, |
| 1660 | enum ieee80211_band current_band, | 1659 | enum ieee80211_band current_band, |
| 1661 | u32 sta_flags, u8 *bssid, | 1660 | u32 sta_flags, u8 *bssid, |
| 1662 | struct ieee80211_csa_ie *csa_ie); | 1661 | struct ieee80211_csa_ie *csa_ie); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index af237223a8cd..653f5eb07a27 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
| @@ -766,10 +766,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
| 766 | int i, flushed; | 766 | int i, flushed; |
| 767 | struct ps_data *ps; | 767 | struct ps_data *ps; |
| 768 | struct cfg80211_chan_def chandef; | 768 | struct cfg80211_chan_def chandef; |
| 769 | bool cancel_scan; | ||
| 769 | 770 | ||
| 770 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); | 771 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); |
| 771 | 772 | ||
| 772 | if (rcu_access_pointer(local->scan_sdata) == sdata) | 773 | cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata; |
| 774 | if (cancel_scan) | ||
| 773 | ieee80211_scan_cancel(local); | 775 | ieee80211_scan_cancel(local); |
| 774 | 776 | ||
| 775 | /* | 777 | /* |
| @@ -898,6 +900,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
| 898 | list_del(&sdata->u.vlan.list); | 900 | list_del(&sdata->u.vlan.list); |
| 899 | mutex_unlock(&local->mtx); | 901 | mutex_unlock(&local->mtx); |
| 900 | RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL); | 902 | RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL); |
| 903 | /* see comment in the default case below */ | ||
| 904 | ieee80211_free_keys(sdata, true); | ||
| 901 | /* no need to tell driver */ | 905 | /* no need to tell driver */ |
| 902 | break; | 906 | break; |
| 903 | case NL80211_IFTYPE_MONITOR: | 907 | case NL80211_IFTYPE_MONITOR: |
| @@ -923,17 +927,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
| 923 | /* | 927 | /* |
| 924 | * When we get here, the interface is marked down. | 928 | * When we get here, the interface is marked down. |
| 925 | * Free the remaining keys, if there are any | 929 | * Free the remaining keys, if there are any |
| 926 | * (shouldn't be, except maybe in WDS mode?) | 930 | * (which can happen in AP mode if userspace sets |
| 931 | * keys before the interface is operating, and maybe | ||
| 932 | * also in WDS mode) | ||
| 927 | * | 933 | * |
| 928 | * Force the key freeing to always synchronize_net() | 934 | * Force the key freeing to always synchronize_net() |
| 929 | * to wait for the RX path in case it is using this | 935 | * to wait for the RX path in case it is using this |
| 930 | * interface enqueuing frames * at this very time on | 936 | * interface enqueuing frames at this very time on |
| 931 | * another CPU. | 937 | * another CPU. |
| 932 | */ | 938 | */ |
| 933 | ieee80211_free_keys(sdata, true); | 939 | ieee80211_free_keys(sdata, true); |
| 934 | |||
| 935 | /* fall through */ | ||
| 936 | case NL80211_IFTYPE_AP: | ||
| 937 | skb_queue_purge(&sdata->skb_queue); | 940 | skb_queue_purge(&sdata->skb_queue); |
| 938 | } | 941 | } |
| 939 | 942 | ||
| @@ -991,6 +994,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
| 991 | 994 | ||
| 992 | ieee80211_recalc_ps(local, -1); | 995 | ieee80211_recalc_ps(local, -1); |
| 993 | 996 | ||
| 997 | if (cancel_scan) | ||
| 998 | flush_delayed_work(&local->scan_work); | ||
| 999 | |||
| 994 | if (local->open_count == 0) { | 1000 | if (local->open_count == 0) { |
| 995 | ieee80211_stop_device(local); | 1001 | ieee80211_stop_device(local); |
| 996 | 1002 | ||
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index e9f99c1e3fad..0c8b2a77d312 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
| @@ -874,7 +874,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, | |||
| 874 | 874 | ||
| 875 | memset(¶ms, 0, sizeof(params)); | 875 | memset(¶ms, 0, sizeof(params)); |
| 876 | memset(&csa_ie, 0, sizeof(csa_ie)); | 876 | memset(&csa_ie, 0, sizeof(csa_ie)); |
| 877 | err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band, | 877 | err = ieee80211_parse_ch_switch_ie(sdata, elems, band, |
| 878 | sta_flags, sdata->vif.addr, | 878 | sta_flags, sdata->vif.addr, |
| 879 | &csa_ie); | 879 | &csa_ie); |
| 880 | if (err < 0) | 880 | if (err < 0) |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2de88704278b..93af0f1c9d99 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
| @@ -1072,7 +1072,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
| 1072 | 1072 | ||
| 1073 | current_band = cbss->channel->band; | 1073 | current_band = cbss->channel->band; |
| 1074 | memset(&csa_ie, 0, sizeof(csa_ie)); | 1074 | memset(&csa_ie, 0, sizeof(csa_ie)); |
| 1075 | res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band, | 1075 | res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band, |
| 1076 | ifmgd->flags, | 1076 | ifmgd->flags, |
| 1077 | ifmgd->associated->bssid, &csa_ie); | 1077 | ifmgd->associated->bssid, &csa_ie); |
| 1078 | if (res < 0) | 1078 | if (res < 0) |
| @@ -1168,7 +1168,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
| 1168 | ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); | 1168 | ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); |
| 1169 | else | 1169 | else |
| 1170 | mod_timer(&ifmgd->chswitch_timer, | 1170 | mod_timer(&ifmgd->chswitch_timer, |
| 1171 | TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval)); | 1171 | TU_TO_EXP_TIME((csa_ie.count - 1) * |
| 1172 | cbss->beacon_interval)); | ||
| 1172 | } | 1173 | } |
| 1173 | 1174 | ||
| 1174 | static bool | 1175 | static bool |
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index df90ce2db00c..408fd8ab4eef 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
| @@ -252,19 +252,16 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index, | |||
| 252 | cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; | 252 | cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; |
| 253 | cur_prob = mi->groups[cur_group].rates[cur_idx].probability; | 253 | cur_prob = mi->groups[cur_group].rates[cur_idx].probability; |
| 254 | 254 | ||
| 255 | tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; | 255 | do { |
| 256 | tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; | ||
| 257 | tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; | ||
| 258 | tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; | ||
| 259 | |||
| 260 | while (j > 0 && (cur_thr > tmp_thr || | ||
| 261 | (cur_thr == tmp_thr && cur_prob > tmp_prob))) { | ||
| 262 | j--; | ||
| 263 | tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; | 256 | tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; |
| 264 | tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; | 257 | tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; |
| 265 | tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; | 258 | tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; |
| 266 | tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; | 259 | tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; |
| 267 | } | 260 | if (cur_thr < tmp_thr || |
| 261 | (cur_thr == tmp_thr && cur_prob <= tmp_prob)) | ||
| 262 | break; | ||
| 263 | j--; | ||
| 264 | } while (j > 0); | ||
| 268 | 265 | ||
| 269 | if (j < MAX_THR_RATES - 1) { | 266 | if (j < MAX_THR_RATES - 1) { |
| 270 | memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * | 267 | memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index b04ca4049c95..a37f9af634cb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -1678,11 +1678,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
| 1678 | sc = le16_to_cpu(hdr->seq_ctrl); | 1678 | sc = le16_to_cpu(hdr->seq_ctrl); |
| 1679 | frag = sc & IEEE80211_SCTL_FRAG; | 1679 | frag = sc & IEEE80211_SCTL_FRAG; |
| 1680 | 1680 | ||
| 1681 | if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || | 1681 | if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) |
| 1682 | is_multicast_ether_addr(hdr->addr1))) { | 1682 | goto out; |
| 1683 | /* not fragmented */ | 1683 | |
| 1684 | if (is_multicast_ether_addr(hdr->addr1)) { | ||
| 1685 | rx->local->dot11MulticastReceivedFrameCount++; | ||
| 1684 | goto out; | 1686 | goto out; |
| 1685 | } | 1687 | } |
| 1688 | |||
| 1686 | I802_DEBUG_INC(rx->local->rx_handlers_fragments); | 1689 | I802_DEBUG_INC(rx->local->rx_handlers_fragments); |
| 1687 | 1690 | ||
| 1688 | if (skb_linearize(rx->skb)) | 1691 | if (skb_linearize(rx->skb)) |
| @@ -1775,10 +1778,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
| 1775 | out: | 1778 | out: |
| 1776 | if (rx->sta) | 1779 | if (rx->sta) |
| 1777 | rx->sta->rx_packets++; | 1780 | rx->sta->rx_packets++; |
| 1778 | if (is_multicast_ether_addr(hdr->addr1)) | 1781 | ieee80211_led_rx(rx->local); |
| 1779 | rx->local->dot11MulticastReceivedFrameCount++; | ||
| 1780 | else | ||
| 1781 | ieee80211_led_rx(rx->local); | ||
| 1782 | return RX_CONTINUE; | 1782 | return RX_CONTINUE; |
| 1783 | } | 1783 | } |
| 1784 | 1784 | ||
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index 6ab009070084..efeba56c913b 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | #include "wme.h" | 22 | #include "wme.h" |
| 23 | 23 | ||
| 24 | int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | 24 | int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, |
| 25 | struct ieee802_11_elems *elems, bool beacon, | 25 | struct ieee802_11_elems *elems, |
| 26 | enum ieee80211_band current_band, | 26 | enum ieee80211_band current_band, |
| 27 | u32 sta_flags, u8 *bssid, | 27 | u32 sta_flags, u8 *bssid, |
| 28 | struct ieee80211_csa_ie *csa_ie) | 28 | struct ieee80211_csa_ie *csa_ie) |
| @@ -91,19 +91,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | |||
| 91 | return -EINVAL; | 91 | return -EINVAL; |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | if (!beacon && sec_chan_offs) { | 94 | if (sec_chan_offs) { |
| 95 | secondary_channel_offset = sec_chan_offs->sec_chan_offs; | 95 | secondary_channel_offset = sec_chan_offs->sec_chan_offs; |
| 96 | } else if (beacon && ht_oper) { | ||
| 97 | secondary_channel_offset = | ||
| 98 | ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET; | ||
| 99 | } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) { | 96 | } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) { |
| 100 | /* If it's not a beacon, HT is enabled and the IE not present, | 97 | /* If the secondary channel offset IE is not present, |
| 101 | * it's 20 MHz, 802.11-2012 8.5.2.6: | 98 | * we can't know what's the post-CSA offset, so the |
| 102 | * This element [the Secondary Channel Offset Element] is | 99 | * best we can do is use 20MHz. |
| 103 | * present when switching to a 40 MHz channel. It may be | 100 | */ |
| 104 | * present when switching to a 20 MHz channel (in which | ||
| 105 | * case the secondary channel offset is set to SCN). | ||
| 106 | */ | ||
| 107 | secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; | 101 | secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; |
| 108 | } | 102 | } |
| 109 | 103 | ||
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 86f9d76b1464..d259da3ce67a 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
| @@ -1863,6 +1863,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) | |||
| 1863 | if (*op < IP_SET_OP_VERSION) { | 1863 | if (*op < IP_SET_OP_VERSION) { |
| 1864 | /* Check the version at the beginning of operations */ | 1864 | /* Check the version at the beginning of operations */ |
| 1865 | struct ip_set_req_version *req_version = data; | 1865 | struct ip_set_req_version *req_version = data; |
| 1866 | |||
| 1867 | if (*len < sizeof(struct ip_set_req_version)) { | ||
| 1868 | ret = -EINVAL; | ||
| 1869 | goto done; | ||
| 1870 | } | ||
| 1871 | |||
| 1866 | if (req_version->version != IPSET_PROTOCOL) { | 1872 | if (req_version->version != IPSET_PROTOCOL) { |
| 1867 | ret = -EPROTO; | 1873 | ret = -EPROTO; |
| 1868 | goto done; | 1874 | goto done; |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 437a3663ad03..bd90bf8107da 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
| @@ -846,6 +846,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, | |||
| 846 | new_skb = skb_realloc_headroom(skb, max_headroom); | 846 | new_skb = skb_realloc_headroom(skb, max_headroom); |
| 847 | if (!new_skb) | 847 | if (!new_skb) |
| 848 | goto error; | 848 | goto error; |
| 849 | if (skb->sk) | ||
| 850 | skb_set_owner_w(new_skb, skb->sk); | ||
| 849 | consume_skb(skb); | 851 | consume_skb(skb); |
| 850 | skb = new_skb; | 852 | skb = new_skb; |
| 851 | } | 853 | } |
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c index e7eb807fe07d..cf9ace70bece 100644 --- a/net/netfilter/nf_conntrack_l3proto_generic.c +++ b/net/netfilter/nf_conntrack_l3proto_generic.c | |||
| @@ -49,10 +49,9 @@ static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
| 49 | return true; | 49 | return true; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static int generic_print_tuple(struct seq_file *s, | 52 | static void generic_print_tuple(struct seq_file *s, |
| 53 | const struct nf_conntrack_tuple *tuple) | 53 | const struct nf_conntrack_tuple *tuple) |
| 54 | { | 54 | { |
| 55 | return 0; | ||
| 56 | } | 55 | } |
| 57 | 56 | ||
| 58 | static int generic_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | 57 | static int generic_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, |
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index cb372f96f10d..6dd995c7c72b 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
| @@ -618,17 +618,17 @@ out_invalid: | |||
| 618 | return -NF_ACCEPT; | 618 | return -NF_ACCEPT; |
| 619 | } | 619 | } |
| 620 | 620 | ||
| 621 | static int dccp_print_tuple(struct seq_file *s, | 621 | static void dccp_print_tuple(struct seq_file *s, |
| 622 | const struct nf_conntrack_tuple *tuple) | 622 | const struct nf_conntrack_tuple *tuple) |
| 623 | { | 623 | { |
| 624 | return seq_printf(s, "sport=%hu dport=%hu ", | 624 | seq_printf(s, "sport=%hu dport=%hu ", |
| 625 | ntohs(tuple->src.u.dccp.port), | 625 | ntohs(tuple->src.u.dccp.port), |
| 626 | ntohs(tuple->dst.u.dccp.port)); | 626 | ntohs(tuple->dst.u.dccp.port)); |
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | static int dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct) | 629 | static void dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct) |
| 630 | { | 630 | { |
| 631 | return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]); | 631 | seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]); |
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) | 634 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) |
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index 957c1db66652..60865f110309 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c | |||
| @@ -63,10 +63,9 @@ static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | /* Print out the per-protocol part of the tuple. */ | 65 | /* Print out the per-protocol part of the tuple. */ |
| 66 | static int generic_print_tuple(struct seq_file *s, | 66 | static void generic_print_tuple(struct seq_file *s, |
| 67 | const struct nf_conntrack_tuple *tuple) | 67 | const struct nf_conntrack_tuple *tuple) |
| 68 | { | 68 | { |
| 69 | return 0; | ||
| 70 | } | 69 | } |
| 71 | 70 | ||
| 72 | static unsigned int *generic_get_timeouts(struct net *net) | 71 | static unsigned int *generic_get_timeouts(struct net *net) |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index d5665739e3b1..7648674f29c3 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
| @@ -226,20 +226,20 @@ static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, | |||
| 226 | } | 226 | } |
| 227 | 227 | ||
| 228 | /* print gre part of tuple */ | 228 | /* print gre part of tuple */ |
| 229 | static int gre_print_tuple(struct seq_file *s, | 229 | static void gre_print_tuple(struct seq_file *s, |
| 230 | const struct nf_conntrack_tuple *tuple) | 230 | const struct nf_conntrack_tuple *tuple) |
| 231 | { | 231 | { |
| 232 | return seq_printf(s, "srckey=0x%x dstkey=0x%x ", | 232 | seq_printf(s, "srckey=0x%x dstkey=0x%x ", |
| 233 | ntohs(tuple->src.u.gre.key), | 233 | ntohs(tuple->src.u.gre.key), |
| 234 | ntohs(tuple->dst.u.gre.key)); | 234 | ntohs(tuple->dst.u.gre.key)); |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | /* print private data for conntrack */ | 237 | /* print private data for conntrack */ |
| 238 | static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct) | 238 | static void gre_print_conntrack(struct seq_file *s, struct nf_conn *ct) |
| 239 | { | 239 | { |
| 240 | return seq_printf(s, "timeout=%u, stream_timeout=%u ", | 240 | seq_printf(s, "timeout=%u, stream_timeout=%u ", |
| 241 | (ct->proto.gre.timeout / HZ), | 241 | (ct->proto.gre.timeout / HZ), |
| 242 | (ct->proto.gre.stream_timeout / HZ)); | 242 | (ct->proto.gre.stream_timeout / HZ)); |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | static unsigned int *gre_get_timeouts(struct net *net) | 245 | static unsigned int *gre_get_timeouts(struct net *net) |
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 1314d33f6bcf..b45da90fad32 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
| @@ -166,16 +166,16 @@ static bool sctp_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | /* Print out the per-protocol part of the tuple. */ | 168 | /* Print out the per-protocol part of the tuple. */ |
| 169 | static int sctp_print_tuple(struct seq_file *s, | 169 | static void sctp_print_tuple(struct seq_file *s, |
| 170 | const struct nf_conntrack_tuple *tuple) | 170 | const struct nf_conntrack_tuple *tuple) |
| 171 | { | 171 | { |
| 172 | return seq_printf(s, "sport=%hu dport=%hu ", | 172 | seq_printf(s, "sport=%hu dport=%hu ", |
| 173 | ntohs(tuple->src.u.sctp.port), | 173 | ntohs(tuple->src.u.sctp.port), |
| 174 | ntohs(tuple->dst.u.sctp.port)); | 174 | ntohs(tuple->dst.u.sctp.port)); |
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | /* Print out the private part of the conntrack. */ | 177 | /* Print out the private part of the conntrack. */ |
| 178 | static int sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) | 178 | static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) |
| 179 | { | 179 | { |
| 180 | enum sctp_conntrack state; | 180 | enum sctp_conntrack state; |
| 181 | 181 | ||
| @@ -183,7 +183,7 @@ static int sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) | |||
| 183 | state = ct->proto.sctp.state; | 183 | state = ct->proto.sctp.state; |
| 184 | spin_unlock_bh(&ct->lock); | 184 | spin_unlock_bh(&ct->lock); |
| 185 | 185 | ||
| 186 | return seq_printf(s, "%s ", sctp_conntrack_names[state]); | 186 | seq_printf(s, "%s ", sctp_conntrack_names[state]); |
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \ | 189 | #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \ |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index d87b6423ffb2..5caa0c41bf26 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
| @@ -302,16 +302,16 @@ static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | /* Print out the per-protocol part of the tuple. */ | 304 | /* Print out the per-protocol part of the tuple. */ |
| 305 | static int tcp_print_tuple(struct seq_file *s, | 305 | static void tcp_print_tuple(struct seq_file *s, |
| 306 | const struct nf_conntrack_tuple *tuple) | 306 | const struct nf_conntrack_tuple *tuple) |
| 307 | { | 307 | { |
| 308 | return seq_printf(s, "sport=%hu dport=%hu ", | 308 | seq_printf(s, "sport=%hu dport=%hu ", |
| 309 | ntohs(tuple->src.u.tcp.port), | 309 | ntohs(tuple->src.u.tcp.port), |
| 310 | ntohs(tuple->dst.u.tcp.port)); | 310 | ntohs(tuple->dst.u.tcp.port)); |
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | /* Print out the private part of the conntrack. */ | 313 | /* Print out the private part of the conntrack. */ |
| 314 | static int tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) | 314 | static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) |
| 315 | { | 315 | { |
| 316 | enum tcp_conntrack state; | 316 | enum tcp_conntrack state; |
| 317 | 317 | ||
| @@ -319,7 +319,7 @@ static int tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) | |||
| 319 | state = ct->proto.tcp.state; | 319 | state = ct->proto.tcp.state; |
| 320 | spin_unlock_bh(&ct->lock); | 320 | spin_unlock_bh(&ct->lock); |
| 321 | 321 | ||
| 322 | return seq_printf(s, "%s ", tcp_conntrack_names[state]); | 322 | seq_printf(s, "%s ", tcp_conntrack_names[state]); |
| 323 | } | 323 | } |
| 324 | 324 | ||
| 325 | static unsigned int get_conntrack_index(const struct tcphdr *tcph) | 325 | static unsigned int get_conntrack_index(const struct tcphdr *tcph) |
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 9d7721cbce4b..6957281ffee5 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c | |||
| @@ -63,12 +63,12 @@ static bool udp_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | /* Print out the per-protocol part of the tuple. */ | 65 | /* Print out the per-protocol part of the tuple. */ |
| 66 | static int udp_print_tuple(struct seq_file *s, | 66 | static void udp_print_tuple(struct seq_file *s, |
| 67 | const struct nf_conntrack_tuple *tuple) | 67 | const struct nf_conntrack_tuple *tuple) |
| 68 | { | 68 | { |
| 69 | return seq_printf(s, "sport=%hu dport=%hu ", | 69 | seq_printf(s, "sport=%hu dport=%hu ", |
| 70 | ntohs(tuple->src.u.udp.port), | 70 | ntohs(tuple->src.u.udp.port), |
| 71 | ntohs(tuple->dst.u.udp.port)); | 71 | ntohs(tuple->dst.u.udp.port)); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static unsigned int *udp_get_timeouts(struct net *net) | 74 | static unsigned int *udp_get_timeouts(struct net *net) |
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 2750e6c69f82..c5903d1649f9 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c | |||
| @@ -71,12 +71,12 @@ static bool udplite_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | /* Print out the per-protocol part of the tuple. */ | 73 | /* Print out the per-protocol part of the tuple. */ |
| 74 | static int udplite_print_tuple(struct seq_file *s, | 74 | static void udplite_print_tuple(struct seq_file *s, |
| 75 | const struct nf_conntrack_tuple *tuple) | 75 | const struct nf_conntrack_tuple *tuple) |
| 76 | { | 76 | { |
| 77 | return seq_printf(s, "sport=%hu dport=%hu ", | 77 | seq_printf(s, "sport=%hu dport=%hu ", |
| 78 | ntohs(tuple->src.u.udp.port), | 78 | ntohs(tuple->src.u.udp.port), |
| 79 | ntohs(tuple->dst.u.udp.port)); | 79 | ntohs(tuple->dst.u.udp.port)); |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | static unsigned int *udplite_get_timeouts(struct net *net) | 82 | static unsigned int *udplite_get_timeouts(struct net *net) |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index cf65a1e040dd..fc823fa5dcf5 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
| @@ -36,12 +36,13 @@ | |||
| 36 | MODULE_LICENSE("GPL"); | 36 | MODULE_LICENSE("GPL"); |
| 37 | 37 | ||
| 38 | #ifdef CONFIG_NF_CONNTRACK_PROCFS | 38 | #ifdef CONFIG_NF_CONNTRACK_PROCFS |
| 39 | int | 39 | void |
| 40 | print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, | 40 | print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, |
| 41 | const struct nf_conntrack_l3proto *l3proto, | 41 | const struct nf_conntrack_l3proto *l3proto, |
| 42 | const struct nf_conntrack_l4proto *l4proto) | 42 | const struct nf_conntrack_l4proto *l4proto) |
| 43 | { | 43 | { |
| 44 | return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple); | 44 | l3proto->print_tuple(s, tuple); |
| 45 | l4proto->print_tuple(s, tuple); | ||
| 45 | } | 46 | } |
| 46 | EXPORT_SYMBOL_GPL(print_tuple); | 47 | EXPORT_SYMBOL_GPL(print_tuple); |
| 47 | 48 | ||
| @@ -119,7 +120,7 @@ static void ct_seq_stop(struct seq_file *s, void *v) | |||
| 119 | } | 120 | } |
| 120 | 121 | ||
| 121 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | 122 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
| 122 | static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) | 123 | static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) |
| 123 | { | 124 | { |
| 124 | int ret; | 125 | int ret; |
| 125 | u32 len; | 126 | u32 len; |
| @@ -127,22 +128,20 @@ static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) | |||
| 127 | 128 | ||
| 128 | ret = security_secid_to_secctx(ct->secmark, &secctx, &len); | 129 | ret = security_secid_to_secctx(ct->secmark, &secctx, &len); |
| 129 | if (ret) | 130 | if (ret) |
| 130 | return 0; | 131 | return; |
| 131 | 132 | ||
| 132 | ret = seq_printf(s, "secctx=%s ", secctx); | 133 | seq_printf(s, "secctx=%s ", secctx); |
| 133 | 134 | ||
| 134 | security_release_secctx(secctx, len); | 135 | security_release_secctx(secctx, len); |
| 135 | return ret; | ||
| 136 | } | 136 | } |
| 137 | #else | 137 | #else |
| 138 | static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) | 138 | static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) |
| 139 | { | 139 | { |
| 140 | return 0; | ||
| 141 | } | 140 | } |
| 142 | #endif | 141 | #endif |
| 143 | 142 | ||
| 144 | #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP | 143 | #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP |
| 145 | static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) | 144 | static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) |
| 146 | { | 145 | { |
| 147 | struct ct_iter_state *st = s->private; | 146 | struct ct_iter_state *st = s->private; |
| 148 | struct nf_conn_tstamp *tstamp; | 147 | struct nf_conn_tstamp *tstamp; |
| @@ -156,16 +155,15 @@ static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) | |||
| 156 | else | 155 | else |
| 157 | delta_time = 0; | 156 | delta_time = 0; |
| 158 | 157 | ||
| 159 | return seq_printf(s, "delta-time=%llu ", | 158 | seq_printf(s, "delta-time=%llu ", |
| 160 | (unsigned long long)delta_time); | 159 | (unsigned long long)delta_time); |
| 161 | } | 160 | } |
| 162 | return 0; | 161 | return; |
| 163 | } | 162 | } |
| 164 | #else | 163 | #else |
| 165 | static inline int | 164 | static inline void |
| 166 | ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) | 165 | ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) |
| 167 | { | 166 | { |
| 168 | return 0; | ||
| 169 | } | 167 | } |
| 170 | #endif | 168 | #endif |
| 171 | 169 | ||
| @@ -192,55 +190,54 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
| 192 | NF_CT_ASSERT(l4proto); | 190 | NF_CT_ASSERT(l4proto); |
| 193 | 191 | ||
| 194 | ret = -ENOSPC; | 192 | ret = -ENOSPC; |
| 195 | if (seq_printf(s, "%-8s %u %-8s %u %ld ", | 193 | seq_printf(s, "%-8s %u %-8s %u %ld ", |
| 196 | l3proto->name, nf_ct_l3num(ct), | 194 | l3proto->name, nf_ct_l3num(ct), |
| 197 | l4proto->name, nf_ct_protonum(ct), | 195 | l4proto->name, nf_ct_protonum(ct), |
| 198 | timer_pending(&ct->timeout) | 196 | timer_pending(&ct->timeout) |
| 199 | ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) | 197 | ? (long)(ct->timeout.expires - jiffies)/HZ : 0); |
| 200 | goto release; | ||
| 201 | 198 | ||
| 202 | if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct)) | 199 | if (l4proto->print_conntrack) |
| 203 | goto release; | 200 | l4proto->print_conntrack(s, ct); |
| 201 | |||
| 202 | print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | ||
| 203 | l3proto, l4proto); | ||
| 204 | 204 | ||
| 205 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 205 | if (seq_has_overflowed(s)) |
| 206 | l3proto, l4proto)) | ||
| 207 | goto release; | 206 | goto release; |
| 208 | 207 | ||
| 209 | if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) | 208 | if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) |
| 210 | goto release; | 209 | goto release; |
| 211 | 210 | ||
| 212 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) | 211 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) |
| 213 | if (seq_printf(s, "[UNREPLIED] ")) | 212 | seq_printf(s, "[UNREPLIED] "); |
| 214 | goto release; | ||
| 215 | 213 | ||
| 216 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, | 214 | print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, |
| 217 | l3proto, l4proto)) | 215 | l3proto, l4proto); |
| 218 | goto release; | ||
| 219 | 216 | ||
| 220 | if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) | 217 | if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) |
| 221 | goto release; | 218 | goto release; |
| 222 | 219 | ||
| 223 | if (test_bit(IPS_ASSURED_BIT, &ct->status)) | 220 | if (test_bit(IPS_ASSURED_BIT, &ct->status)) |
| 224 | if (seq_printf(s, "[ASSURED] ")) | 221 | seq_printf(s, "[ASSURED] "); |
| 225 | goto release; | ||
| 226 | 222 | ||
| 227 | #if defined(CONFIG_NF_CONNTRACK_MARK) | 223 | if (seq_has_overflowed(s)) |
| 228 | if (seq_printf(s, "mark=%u ", ct->mark)) | ||
| 229 | goto release; | 224 | goto release; |
| 225 | |||
| 226 | #if defined(CONFIG_NF_CONNTRACK_MARK) | ||
| 227 | seq_printf(s, "mark=%u ", ct->mark); | ||
| 230 | #endif | 228 | #endif |
| 231 | 229 | ||
| 232 | if (ct_show_secctx(s, ct)) | 230 | ct_show_secctx(s, ct); |
| 233 | goto release; | ||
| 234 | 231 | ||
| 235 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 232 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
| 236 | if (seq_printf(s, "zone=%u ", nf_ct_zone(ct))) | 233 | seq_printf(s, "zone=%u ", nf_ct_zone(ct)); |
| 237 | goto release; | ||
| 238 | #endif | 234 | #endif |
| 239 | 235 | ||
| 240 | if (ct_show_delta_time(s, ct)) | 236 | ct_show_delta_time(s, ct); |
| 241 | goto release; | 237 | |
| 238 | seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)); | ||
| 242 | 239 | ||
| 243 | if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) | 240 | if (seq_has_overflowed(s)) |
| 244 | goto release; | 241 | goto release; |
| 245 | 242 | ||
| 246 | ret = 0; | 243 | ret = 0; |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index d7197649dba6..6e3b9117db1f 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
| @@ -294,19 +294,19 @@ static int seq_show(struct seq_file *s, void *v) | |||
| 294 | { | 294 | { |
| 295 | loff_t *pos = v; | 295 | loff_t *pos = v; |
| 296 | const struct nf_logger *logger; | 296 | const struct nf_logger *logger; |
| 297 | int i, ret; | 297 | int i; |
| 298 | struct net *net = seq_file_net(s); | 298 | struct net *net = seq_file_net(s); |
| 299 | 299 | ||
| 300 | logger = rcu_dereference_protected(net->nf.nf_loggers[*pos], | 300 | logger = rcu_dereference_protected(net->nf.nf_loggers[*pos], |
| 301 | lockdep_is_held(&nf_log_mutex)); | 301 | lockdep_is_held(&nf_log_mutex)); |
| 302 | 302 | ||
| 303 | if (!logger) | 303 | if (!logger) |
| 304 | ret = seq_printf(s, "%2lld NONE (", *pos); | 304 | seq_printf(s, "%2lld NONE (", *pos); |
| 305 | else | 305 | else |
| 306 | ret = seq_printf(s, "%2lld %s (", *pos, logger->name); | 306 | seq_printf(s, "%2lld %s (", *pos, logger->name); |
| 307 | 307 | ||
| 308 | if (ret < 0) | 308 | if (seq_has_overflowed(s)) |
| 309 | return ret; | 309 | return -ENOSPC; |
| 310 | 310 | ||
| 311 | for (i = 0; i < NF_LOG_TYPE_MAX; i++) { | 311 | for (i = 0; i < NF_LOG_TYPE_MAX; i++) { |
| 312 | if (loggers[*pos][i] == NULL) | 312 | if (loggers[*pos][i] == NULL) |
| @@ -314,17 +314,19 @@ static int seq_show(struct seq_file *s, void *v) | |||
| 314 | 314 | ||
| 315 | logger = rcu_dereference_protected(loggers[*pos][i], | 315 | logger = rcu_dereference_protected(loggers[*pos][i], |
| 316 | lockdep_is_held(&nf_log_mutex)); | 316 | lockdep_is_held(&nf_log_mutex)); |
| 317 | ret = seq_printf(s, "%s", logger->name); | 317 | seq_printf(s, "%s", logger->name); |
| 318 | if (ret < 0) | 318 | if (i == 0 && loggers[*pos][i + 1] != NULL) |
| 319 | return ret; | 319 | seq_printf(s, ","); |
| 320 | if (i == 0 && loggers[*pos][i + 1] != NULL) { | 320 | |
| 321 | ret = seq_printf(s, ","); | 321 | if (seq_has_overflowed(s)) |
| 322 | if (ret < 0) | 322 | return -ENOSPC; |
| 323 | return ret; | ||
| 324 | } | ||
| 325 | } | 323 | } |
| 326 | 324 | ||
| 327 | return seq_printf(s, ")\n"); | 325 | seq_printf(s, ")\n"); |
| 326 | |||
| 327 | if (seq_has_overflowed(s)) | ||
| 328 | return -ENOSPC; | ||
| 329 | return 0; | ||
| 328 | } | 330 | } |
| 329 | 331 | ||
| 330 | static const struct seq_operations nflog_seq_ops = { | 332 | static const struct seq_operations nflog_seq_ops = { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 11ab4b078f3b..66e8425dbfe7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -3484,13 +3484,8 @@ static void nft_chain_commit_update(struct nft_trans *trans) | |||
| 3484 | } | 3484 | } |
| 3485 | } | 3485 | } |
| 3486 | 3486 | ||
| 3487 | /* Schedule objects for release via rcu to make sure no packets are accesing | 3487 | static void nf_tables_commit_release(struct nft_trans *trans) |
| 3488 | * removed rules. | ||
| 3489 | */ | ||
| 3490 | static void nf_tables_commit_release_rcu(struct rcu_head *rt) | ||
| 3491 | { | 3488 | { |
| 3492 | struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head); | ||
| 3493 | |||
| 3494 | switch (trans->msg_type) { | 3489 | switch (trans->msg_type) { |
| 3495 | case NFT_MSG_DELTABLE: | 3490 | case NFT_MSG_DELTABLE: |
| 3496 | nf_tables_table_destroy(&trans->ctx); | 3491 | nf_tables_table_destroy(&trans->ctx); |
| @@ -3612,10 +3607,11 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
| 3612 | } | 3607 | } |
| 3613 | } | 3608 | } |
| 3614 | 3609 | ||
| 3610 | synchronize_rcu(); | ||
| 3611 | |||
| 3615 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { | 3612 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { |
| 3616 | list_del(&trans->list); | 3613 | list_del(&trans->list); |
| 3617 | trans->ctx.nla = NULL; | 3614 | nf_tables_commit_release(trans); |
| 3618 | call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu); | ||
| 3619 | } | 3615 | } |
| 3620 | 3616 | ||
| 3621 | nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); | 3617 | nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); |
| @@ -3623,13 +3619,8 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
| 3623 | return 0; | 3619 | return 0; |
| 3624 | } | 3620 | } |
| 3625 | 3621 | ||
| 3626 | /* Schedule objects for release via rcu to make sure no packets are accesing | 3622 | static void nf_tables_abort_release(struct nft_trans *trans) |
| 3627 | * aborted rules. | ||
| 3628 | */ | ||
| 3629 | static void nf_tables_abort_release_rcu(struct rcu_head *rt) | ||
| 3630 | { | 3623 | { |
| 3631 | struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head); | ||
| 3632 | |||
| 3633 | switch (trans->msg_type) { | 3624 | switch (trans->msg_type) { |
| 3634 | case NFT_MSG_NEWTABLE: | 3625 | case NFT_MSG_NEWTABLE: |
| 3635 | nf_tables_table_destroy(&trans->ctx); | 3626 | nf_tables_table_destroy(&trans->ctx); |
| @@ -3725,11 +3716,12 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
| 3725 | } | 3716 | } |
| 3726 | } | 3717 | } |
| 3727 | 3718 | ||
| 3719 | synchronize_rcu(); | ||
| 3720 | |||
| 3728 | list_for_each_entry_safe_reverse(trans, next, | 3721 | list_for_each_entry_safe_reverse(trans, next, |
| 3729 | &net->nft.commit_list, list) { | 3722 | &net->nft.commit_list, list) { |
| 3730 | list_del(&trans->list); | 3723 | list_del(&trans->list); |
| 3731 | trans->ctx.nla = NULL; | 3724 | nf_tables_abort_release(trans); |
| 3732 | call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu); | ||
| 3733 | } | 3725 | } |
| 3734 | 3726 | ||
| 3735 | return 0; | 3727 | return 0; |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 6c5a915cfa75..13c2e17bbe27 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
| @@ -47,6 +47,8 @@ static const int nfnl_group2type[NFNLGRP_MAX+1] = { | |||
| 47 | [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, | 47 | [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, |
| 48 | [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, | 48 | [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, |
| 49 | [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, | 49 | [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, |
| 50 | [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES, | ||
| 51 | [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT, | ||
| 50 | }; | 52 | }; |
| 51 | 53 | ||
| 52 | void nfnl_lock(__u8 subsys_id) | 54 | void nfnl_lock(__u8 subsys_id) |
| @@ -464,7 +466,12 @@ static void nfnetlink_rcv(struct sk_buff *skb) | |||
| 464 | static int nfnetlink_bind(int group) | 466 | static int nfnetlink_bind(int group) |
| 465 | { | 467 | { |
| 466 | const struct nfnetlink_subsystem *ss; | 468 | const struct nfnetlink_subsystem *ss; |
| 467 | int type = nfnl_group2type[group]; | 469 | int type; |
| 470 | |||
| 471 | if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) | ||
| 472 | return -EINVAL; | ||
| 473 | |||
| 474 | type = nfnl_group2type[group]; | ||
| 468 | 475 | ||
| 469 | rcu_read_lock(); | 476 | rcu_read_lock(); |
| 470 | ss = nfnetlink_get_subsys(type); | 477 | ss = nfnetlink_get_subsys(type); |
| @@ -514,6 +521,9 @@ static int __init nfnetlink_init(void) | |||
| 514 | { | 521 | { |
| 515 | int i; | 522 | int i; |
| 516 | 523 | ||
| 524 | for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++) | ||
| 525 | BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE); | ||
| 526 | |||
| 517 | for (i=0; i<NFNL_SUBSYS_COUNT; i++) | 527 | for (i=0; i<NFNL_SUBSYS_COUNT; i++) |
| 518 | mutex_init(&table[i].mutex); | 528 | mutex_init(&table[i].mutex); |
| 519 | 529 | ||
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 7c60ccd61a3e..0db8515e76da 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
| @@ -1242,12 +1242,13 @@ static int seq_show(struct seq_file *s, void *v) | |||
| 1242 | { | 1242 | { |
| 1243 | const struct nfqnl_instance *inst = v; | 1243 | const struct nfqnl_instance *inst = v; |
| 1244 | 1244 | ||
| 1245 | return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", | 1245 | seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", |
| 1246 | inst->queue_num, | 1246 | inst->queue_num, |
| 1247 | inst->peer_portid, inst->queue_total, | 1247 | inst->peer_portid, inst->queue_total, |
| 1248 | inst->copy_mode, inst->copy_range, | 1248 | inst->copy_mode, inst->copy_range, |
| 1249 | inst->queue_dropped, inst->queue_user_dropped, | 1249 | inst->queue_dropped, inst->queue_user_dropped, |
| 1250 | inst->id_sequence, 1); | 1250 | inst->id_sequence, 1); |
| 1251 | return seq_has_overflowed(s); | ||
| 1251 | } | 1252 | } |
| 1252 | 1253 | ||
| 1253 | static const struct seq_operations nfqnl_seq_ops = { | 1254 | static const struct seq_operations nfqnl_seq_ops = { |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 9d6d6f60a80f..265e190f2218 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
| @@ -21,45 +21,17 @@ | |||
| 21 | #include <linux/netfilter_ipv6/ip6_tables.h> | 21 | #include <linux/netfilter_ipv6/ip6_tables.h> |
| 22 | #include <net/netfilter/nf_tables.h> | 22 | #include <net/netfilter/nf_tables.h> |
| 23 | 23 | ||
| 24 | static const struct { | ||
| 25 | const char *name; | ||
| 26 | u8 type; | ||
| 27 | } table_to_chaintype[] = { | ||
| 28 | { "filter", NFT_CHAIN_T_DEFAULT }, | ||
| 29 | { "raw", NFT_CHAIN_T_DEFAULT }, | ||
| 30 | { "security", NFT_CHAIN_T_DEFAULT }, | ||
| 31 | { "mangle", NFT_CHAIN_T_ROUTE }, | ||
| 32 | { "nat", NFT_CHAIN_T_NAT }, | ||
| 33 | { }, | ||
| 34 | }; | ||
| 35 | |||
| 36 | static int nft_compat_table_to_chaintype(const char *table) | ||
| 37 | { | ||
| 38 | int i; | ||
| 39 | |||
| 40 | for (i = 0; table_to_chaintype[i].name != NULL; i++) { | ||
| 41 | if (strcmp(table_to_chaintype[i].name, table) == 0) | ||
| 42 | return table_to_chaintype[i].type; | ||
| 43 | } | ||
| 44 | |||
| 45 | return -1; | ||
| 46 | } | ||
| 47 | |||
| 48 | static int nft_compat_chain_validate_dependency(const char *tablename, | 24 | static int nft_compat_chain_validate_dependency(const char *tablename, |
| 49 | const struct nft_chain *chain) | 25 | const struct nft_chain *chain) |
| 50 | { | 26 | { |
| 51 | enum nft_chain_type type; | ||
| 52 | const struct nft_base_chain *basechain; | 27 | const struct nft_base_chain *basechain; |
| 53 | 28 | ||
| 54 | if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) | 29 | if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) |
| 55 | return 0; | 30 | return 0; |
| 56 | 31 | ||
| 57 | type = nft_compat_table_to_chaintype(tablename); | ||
| 58 | if (type < 0) | ||
| 59 | return -EINVAL; | ||
| 60 | |||
| 61 | basechain = nft_base_chain(chain); | 32 | basechain = nft_base_chain(chain); |
| 62 | if (basechain->type->type != type) | 33 | if (strcmp(tablename, "nat") == 0 && |
| 34 | basechain->type->type != NFT_CHAIN_T_NAT) | ||
| 63 | return -EINVAL; | 35 | return -EINVAL; |
| 64 | 36 | ||
| 65 | return 0; | 37 | return 0; |
| @@ -117,7 +89,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
| 117 | struct xt_target *target, void *info, | 89 | struct xt_target *target, void *info, |
| 118 | union nft_entry *entry, u8 proto, bool inv) | 90 | union nft_entry *entry, u8 proto, bool inv) |
| 119 | { | 91 | { |
| 120 | par->net = &init_net; | 92 | par->net = ctx->net; |
| 121 | par->table = ctx->table->name; | 93 | par->table = ctx->table->name; |
| 122 | switch (ctx->afi->family) { | 94 | switch (ctx->afi->family) { |
| 123 | case AF_INET: | 95 | case AF_INET: |
| @@ -324,7 +296,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
| 324 | struct xt_match *match, void *info, | 296 | struct xt_match *match, void *info, |
| 325 | union nft_entry *entry, u8 proto, bool inv) | 297 | union nft_entry *entry, u8 proto, bool inv) |
| 326 | { | 298 | { |
| 327 | par->net = &init_net; | 299 | par->net = ctx->net; |
| 328 | par->table = ctx->table->name; | 300 | par->table = ctx->table->name; |
| 329 | switch (ctx->afi->family) { | 301 | switch (ctx->afi->family) { |
| 330 | case AF_INET: | 302 | case AF_INET: |
| @@ -374,7 +346,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
| 374 | union nft_entry e = {}; | 346 | union nft_entry e = {}; |
| 375 | int ret; | 347 | int ret; |
| 376 | 348 | ||
| 377 | ret = nft_compat_chain_validate_dependency(match->name, ctx->chain); | 349 | ret = nft_compat_chain_validate_dependency(match->table, ctx->chain); |
| 378 | if (ret < 0) | 350 | if (ret < 0) |
| 379 | goto err; | 351 | goto err; |
| 380 | 352 | ||
| @@ -448,7 +420,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, | |||
| 448 | if (!(hook_mask & match->hooks)) | 420 | if (!(hook_mask & match->hooks)) |
| 449 | return -EINVAL; | 421 | return -EINVAL; |
| 450 | 422 | ||
| 451 | ret = nft_compat_chain_validate_dependency(match->name, | 423 | ret = nft_compat_chain_validate_dependency(match->table, |
| 452 | ctx->chain); | 424 | ctx->chain); |
| 453 | if (ret < 0) | 425 | if (ret < 0) |
| 454 | return ret; | 426 | return ret; |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 133eb4772f12..51a459c3c649 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
| @@ -947,9 +947,10 @@ static int xt_table_seq_show(struct seq_file *seq, void *v) | |||
| 947 | { | 947 | { |
| 948 | struct xt_table *table = list_entry(v, struct xt_table, list); | 948 | struct xt_table *table = list_entry(v, struct xt_table, list); |
| 949 | 949 | ||
| 950 | if (strlen(table->name)) | 950 | if (strlen(table->name)) { |
| 951 | return seq_printf(seq, "%s\n", table->name); | 951 | seq_printf(seq, "%s\n", table->name); |
| 952 | else | 952 | return seq_has_overflowed(seq); |
| 953 | } else | ||
| 953 | return 0; | 954 | return 0; |
| 954 | } | 955 | } |
| 955 | 956 | ||
| @@ -1086,8 +1087,10 @@ static int xt_match_seq_show(struct seq_file *seq, void *v) | |||
| 1086 | if (trav->curr == trav->head) | 1087 | if (trav->curr == trav->head) |
| 1087 | return 0; | 1088 | return 0; |
| 1088 | match = list_entry(trav->curr, struct xt_match, list); | 1089 | match = list_entry(trav->curr, struct xt_match, list); |
| 1089 | return (*match->name == '\0') ? 0 : | 1090 | if (*match->name == '\0') |
| 1090 | seq_printf(seq, "%s\n", match->name); | 1091 | return 0; |
| 1092 | seq_printf(seq, "%s\n", match->name); | ||
| 1093 | return seq_has_overflowed(seq); | ||
| 1091 | } | 1094 | } |
| 1092 | return 0; | 1095 | return 0; |
| 1093 | } | 1096 | } |
| @@ -1139,8 +1142,10 @@ static int xt_target_seq_show(struct seq_file *seq, void *v) | |||
| 1139 | if (trav->curr == trav->head) | 1142 | if (trav->curr == trav->head) |
| 1140 | return 0; | 1143 | return 0; |
| 1141 | target = list_entry(trav->curr, struct xt_target, list); | 1144 | target = list_entry(trav->curr, struct xt_target, list); |
| 1142 | return (*target->name == '\0') ? 0 : | 1145 | if (*target->name == '\0') |
| 1143 | seq_printf(seq, "%s\n", target->name); | 1146 | return 0; |
| 1147 | seq_printf(seq, "%s\n", target->name); | ||
| 1148 | return seq_has_overflowed(seq); | ||
| 1144 | } | 1149 | } |
| 1145 | return 0; | 1150 | return 0; |
| 1146 | } | 1151 | } |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 05fbc2a0be46..178696852bde 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
| @@ -789,7 +789,6 @@ static void dl_seq_stop(struct seq_file *s, void *v) | |||
| 789 | static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, | 789 | static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, |
| 790 | struct seq_file *s) | 790 | struct seq_file *s) |
| 791 | { | 791 | { |
| 792 | int res; | ||
| 793 | const struct xt_hashlimit_htable *ht = s->private; | 792 | const struct xt_hashlimit_htable *ht = s->private; |
| 794 | 793 | ||
| 795 | spin_lock(&ent->lock); | 794 | spin_lock(&ent->lock); |
| @@ -798,33 +797,32 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, | |||
| 798 | 797 | ||
| 799 | switch (family) { | 798 | switch (family) { |
| 800 | case NFPROTO_IPV4: | 799 | case NFPROTO_IPV4: |
| 801 | res = seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n", | 800 | seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n", |
| 802 | (long)(ent->expires - jiffies)/HZ, | 801 | (long)(ent->expires - jiffies)/HZ, |
| 803 | &ent->dst.ip.src, | 802 | &ent->dst.ip.src, |
| 804 | ntohs(ent->dst.src_port), | 803 | ntohs(ent->dst.src_port), |
| 805 | &ent->dst.ip.dst, | 804 | &ent->dst.ip.dst, |
| 806 | ntohs(ent->dst.dst_port), | 805 | ntohs(ent->dst.dst_port), |
| 807 | ent->rateinfo.credit, ent->rateinfo.credit_cap, | 806 | ent->rateinfo.credit, ent->rateinfo.credit_cap, |
| 808 | ent->rateinfo.cost); | 807 | ent->rateinfo.cost); |
| 809 | break; | 808 | break; |
| 810 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) | 809 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) |
| 811 | case NFPROTO_IPV6: | 810 | case NFPROTO_IPV6: |
| 812 | res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n", | 811 | seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n", |
| 813 | (long)(ent->expires - jiffies)/HZ, | 812 | (long)(ent->expires - jiffies)/HZ, |
| 814 | &ent->dst.ip6.src, | 813 | &ent->dst.ip6.src, |
| 815 | ntohs(ent->dst.src_port), | 814 | ntohs(ent->dst.src_port), |
| 816 | &ent->dst.ip6.dst, | 815 | &ent->dst.ip6.dst, |
| 817 | ntohs(ent->dst.dst_port), | 816 | ntohs(ent->dst.dst_port), |
| 818 | ent->rateinfo.credit, ent->rateinfo.credit_cap, | 817 | ent->rateinfo.credit, ent->rateinfo.credit_cap, |
| 819 | ent->rateinfo.cost); | 818 | ent->rateinfo.cost); |
| 820 | break; | 819 | break; |
| 821 | #endif | 820 | #endif |
| 822 | default: | 821 | default: |
| 823 | BUG(); | 822 | BUG(); |
| 824 | res = 0; | ||
| 825 | } | 823 | } |
| 826 | spin_unlock(&ent->lock); | 824 | spin_unlock(&ent->lock); |
| 827 | return res; | 825 | return seq_has_overflowed(s); |
| 828 | } | 826 | } |
| 829 | 827 | ||
| 830 | static int dl_seq_show(struct seq_file *s, void *v) | 828 | static int dl_seq_show(struct seq_file *s, void *v) |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index f1de72de273e..0007b8180397 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -1440,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups, | |||
| 1440 | return; | 1440 | return; |
| 1441 | 1441 | ||
| 1442 | for (undo = 0; undo < group; undo++) | 1442 | for (undo = 0; undo < group; undo++) |
| 1443 | if (test_bit(group, &groups)) | 1443 | if (test_bit(undo, &groups)) |
| 1444 | nlk->netlink_unbind(undo); | 1444 | nlk->netlink_unbind(undo); |
| 1445 | } | 1445 | } |
| 1446 | 1446 | ||
| @@ -1492,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
| 1492 | netlink_insert(sk, net, nladdr->nl_pid) : | 1492 | netlink_insert(sk, net, nladdr->nl_pid) : |
| 1493 | netlink_autobind(sock); | 1493 | netlink_autobind(sock); |
| 1494 | if (err) { | 1494 | if (err) { |
| 1495 | netlink_unbind(nlk->ngroups - 1, groups, nlk); | 1495 | netlink_unbind(nlk->ngroups, groups, nlk); |
| 1496 | return err; | 1496 | return err; |
| 1497 | } | 1497 | } |
| 1498 | } | 1498 | } |
| @@ -2509,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module, | |||
| 2509 | nl_table[unit].module = module; | 2509 | nl_table[unit].module = module; |
| 2510 | if (cfg) { | 2510 | if (cfg) { |
| 2511 | nl_table[unit].bind = cfg->bind; | 2511 | nl_table[unit].bind = cfg->bind; |
| 2512 | nl_table[unit].unbind = cfg->unbind; | ||
| 2512 | nl_table[unit].flags = cfg->flags; | 2513 | nl_table[unit].flags = cfg->flags; |
| 2513 | if (cfg->compare) | 2514 | if (cfg->compare) |
| 2514 | nl_table[unit].compare = cfg->compare; | 2515 | nl_table[unit].compare = cfg->compare; |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 006886dbee36..8c4229b11c34 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
| @@ -246,11 +246,11 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, | |||
| 246 | { | 246 | { |
| 247 | int transport_len = skb->len - skb_transport_offset(skb); | 247 | int transport_len = skb->len - skb_transport_offset(skb); |
| 248 | 248 | ||
| 249 | if (l4_proto == IPPROTO_TCP) { | 249 | if (l4_proto == NEXTHDR_TCP) { |
| 250 | if (likely(transport_len >= sizeof(struct tcphdr))) | 250 | if (likely(transport_len >= sizeof(struct tcphdr))) |
| 251 | inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, | 251 | inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, |
| 252 | addr, new_addr, 1); | 252 | addr, new_addr, 1); |
| 253 | } else if (l4_proto == IPPROTO_UDP) { | 253 | } else if (l4_proto == NEXTHDR_UDP) { |
| 254 | if (likely(transport_len >= sizeof(struct udphdr))) { | 254 | if (likely(transport_len >= sizeof(struct udphdr))) { |
| 255 | struct udphdr *uh = udp_hdr(skb); | 255 | struct udphdr *uh = udp_hdr(skb); |
| 256 | 256 | ||
| @@ -261,6 +261,10 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, | |||
| 261 | uh->check = CSUM_MANGLED_0; | 261 | uh->check = CSUM_MANGLED_0; |
| 262 | } | 262 | } |
| 263 | } | 263 | } |
| 264 | } else if (l4_proto == NEXTHDR_ICMP) { | ||
| 265 | if (likely(transport_len >= sizeof(struct icmp6hdr))) | ||
| 266 | inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, | ||
| 267 | skb, addr, new_addr, 1); | ||
| 264 | } | 268 | } |
| 265 | } | 269 | } |
| 266 | 270 | ||
| @@ -722,8 +726,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
| 722 | 726 | ||
| 723 | case OVS_ACTION_ATTR_SAMPLE: | 727 | case OVS_ACTION_ATTR_SAMPLE: |
| 724 | err = sample(dp, skb, key, a); | 728 | err = sample(dp, skb, key, a); |
| 725 | if (unlikely(err)) /* skb already freed. */ | ||
| 726 | return err; | ||
| 727 | break; | 729 | break; |
| 728 | } | 730 | } |
| 729 | 731 | ||
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e6d7255183eb..f9e556b56086 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
| @@ -1265,7 +1265,7 @@ static size_t ovs_dp_cmd_msg_size(void) | |||
| 1265 | return msgsize; | 1265 | return msgsize; |
| 1266 | } | 1266 | } |
| 1267 | 1267 | ||
| 1268 | /* Called with ovs_mutex or RCU read lock. */ | 1268 | /* Called with ovs_mutex. */ |
| 1269 | static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, | 1269 | static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, |
| 1270 | u32 portid, u32 seq, u32 flags, u8 cmd) | 1270 | u32 portid, u32 seq, u32 flags, u8 cmd) |
| 1271 | { | 1271 | { |
| @@ -1555,7 +1555,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
| 1555 | if (!reply) | 1555 | if (!reply) |
| 1556 | return -ENOMEM; | 1556 | return -ENOMEM; |
| 1557 | 1557 | ||
| 1558 | rcu_read_lock(); | 1558 | ovs_lock(); |
| 1559 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); | 1559 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
| 1560 | if (IS_ERR(dp)) { | 1560 | if (IS_ERR(dp)) { |
| 1561 | err = PTR_ERR(dp); | 1561 | err = PTR_ERR(dp); |
| @@ -1564,12 +1564,12 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
| 1564 | err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, | 1564 | err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, |
| 1565 | info->snd_seq, 0, OVS_DP_CMD_NEW); | 1565 | info->snd_seq, 0, OVS_DP_CMD_NEW); |
| 1566 | BUG_ON(err < 0); | 1566 | BUG_ON(err < 0); |
| 1567 | rcu_read_unlock(); | 1567 | ovs_unlock(); |
| 1568 | 1568 | ||
| 1569 | return genlmsg_reply(reply, info); | 1569 | return genlmsg_reply(reply, info); |
| 1570 | 1570 | ||
| 1571 | err_unlock_free: | 1571 | err_unlock_free: |
| 1572 | rcu_read_unlock(); | 1572 | ovs_unlock(); |
| 1573 | kfree_skb(reply); | 1573 | kfree_skb(reply); |
| 1574 | return err; | 1574 | return err; |
| 1575 | } | 1575 | } |
| @@ -1581,8 +1581,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1581 | int skip = cb->args[0]; | 1581 | int skip = cb->args[0]; |
| 1582 | int i = 0; | 1582 | int i = 0; |
| 1583 | 1583 | ||
| 1584 | rcu_read_lock(); | 1584 | ovs_lock(); |
| 1585 | list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) { | 1585 | list_for_each_entry(dp, &ovs_net->dps, list_node) { |
| 1586 | if (i >= skip && | 1586 | if (i >= skip && |
| 1587 | ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, | 1587 | ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, |
| 1588 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 1588 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
| @@ -1590,7 +1590,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1590 | break; | 1590 | break; |
| 1591 | i++; | 1591 | i++; |
| 1592 | } | 1592 | } |
| 1593 | rcu_read_unlock(); | 1593 | ovs_unlock(); |
| 1594 | 1594 | ||
| 1595 | cb->args[0] = i; | 1595 | cb->args[0] = i; |
| 1596 | 1596 | ||
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 939bcb32100f..089b195c064a 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
| @@ -145,7 +145,7 @@ static bool match_validate(const struct sw_flow_match *match, | |||
| 145 | if (match->key->eth.type == htons(ETH_P_ARP) | 145 | if (match->key->eth.type == htons(ETH_P_ARP) |
| 146 | || match->key->eth.type == htons(ETH_P_RARP)) { | 146 | || match->key->eth.type == htons(ETH_P_RARP)) { |
| 147 | key_expected |= 1 << OVS_KEY_ATTR_ARP; | 147 | key_expected |= 1 << OVS_KEY_ATTR_ARP; |
| 148 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | 148 | if (match->mask && (match->mask->key.tp.src == htons(0xff))) |
| 149 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; | 149 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; |
| 150 | } | 150 | } |
| 151 | 151 | ||
| @@ -689,6 +689,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
| 689 | ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); | 689 | ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); |
| 690 | return -EINVAL; | 690 | return -EINVAL; |
| 691 | } | 691 | } |
| 692 | |||
| 693 | if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) { | ||
| 694 | OVS_NLERR("IPv6 flow label %x is out of range (max=%x).\n", | ||
| 695 | ntohl(ipv6_key->ipv6_label), (1 << 20) - 1); | ||
| 696 | return -EINVAL; | ||
| 697 | } | ||
| 698 | |||
| 692 | SW_FLOW_KEY_PUT(match, ipv6.label, | 699 | SW_FLOW_KEY_PUT(match, ipv6.label, |
| 693 | ipv6_key->ipv6_label, is_mask); | 700 | ipv6_key->ipv6_label, is_mask); |
| 694 | SW_FLOW_KEY_PUT(match, ip.proto, | 701 | SW_FLOW_KEY_PUT(match, ip.proto, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 87d20f48ff06..586229a14ad3 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -378,7 +378,7 @@ static void unregister_prot_hook(struct sock *sk, bool sync) | |||
| 378 | __unregister_prot_hook(sk, sync); | 378 | __unregister_prot_hook(sk, sync); |
| 379 | } | 379 | } |
| 380 | 380 | ||
| 381 | static inline __pure struct page *pgv_to_page(void *addr) | 381 | static inline struct page * __pure pgv_to_page(void *addr) |
| 382 | { | 382 | { |
| 383 | if (is_vmalloc_addr(addr)) | 383 | if (is_vmalloc_addr(addr)) |
| 384 | return vmalloc_to_page(addr); | 384 | return vmalloc_to_page(addr); |
| @@ -2444,13 +2444,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2444 | goto out_unlock; | 2444 | goto out_unlock; |
| 2445 | 2445 | ||
| 2446 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | 2446 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
| 2447 | (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > | 2447 | (__virtio16_to_cpu(false, vnet_hdr.csum_start) + |
| 2448 | vnet_hdr.hdr_len)) | 2448 | __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 > |
| 2449 | vnet_hdr.hdr_len = vnet_hdr.csum_start + | 2449 | __virtio16_to_cpu(false, vnet_hdr.hdr_len))) |
| 2450 | vnet_hdr.csum_offset + 2; | 2450 | vnet_hdr.hdr_len = __cpu_to_virtio16(false, |
| 2451 | __virtio16_to_cpu(false, vnet_hdr.csum_start) + | ||
| 2452 | __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2); | ||
| 2451 | 2453 | ||
| 2452 | err = -EINVAL; | 2454 | err = -EINVAL; |
| 2453 | if (vnet_hdr.hdr_len > len) | 2455 | if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len) |
| 2454 | goto out_unlock; | 2456 | goto out_unlock; |
| 2455 | 2457 | ||
| 2456 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 2458 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
| @@ -2492,7 +2494,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2492 | err = -ENOBUFS; | 2494 | err = -ENOBUFS; |
| 2493 | hlen = LL_RESERVED_SPACE(dev); | 2495 | hlen = LL_RESERVED_SPACE(dev); |
| 2494 | tlen = dev->needed_tailroom; | 2496 | tlen = dev->needed_tailroom; |
| 2495 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len, | 2497 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, |
| 2498 | __virtio16_to_cpu(false, vnet_hdr.hdr_len), | ||
| 2496 | msg->msg_flags & MSG_DONTWAIT, &err); | 2499 | msg->msg_flags & MSG_DONTWAIT, &err); |
| 2497 | if (skb == NULL) | 2500 | if (skb == NULL) |
| 2498 | goto out_unlock; | 2501 | goto out_unlock; |
| @@ -2534,14 +2537,16 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2534 | 2537 | ||
| 2535 | if (po->has_vnet_hdr) { | 2538 | if (po->has_vnet_hdr) { |
| 2536 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 2539 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
| 2537 | if (!skb_partial_csum_set(skb, vnet_hdr.csum_start, | 2540 | u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start); |
| 2538 | vnet_hdr.csum_offset)) { | 2541 | u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset); |
| 2542 | if (!skb_partial_csum_set(skb, s, o)) { | ||
| 2539 | err = -EINVAL; | 2543 | err = -EINVAL; |
| 2540 | goto out_free; | 2544 | goto out_free; |
| 2541 | } | 2545 | } |
| 2542 | } | 2546 | } |
| 2543 | 2547 | ||
| 2544 | skb_shinfo(skb)->gso_size = vnet_hdr.gso_size; | 2548 | skb_shinfo(skb)->gso_size = |
| 2549 | __virtio16_to_cpu(false, vnet_hdr.gso_size); | ||
| 2545 | skb_shinfo(skb)->gso_type = gso_type; | 2550 | skb_shinfo(skb)->gso_type = gso_type; |
| 2546 | 2551 | ||
| 2547 | /* Header must be checked, and gso_segs computed. */ | 2552 | /* Header must be checked, and gso_segs computed. */ |
| @@ -2912,8 +2917,10 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 2912 | struct skb_shared_info *sinfo = skb_shinfo(skb); | 2917 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
| 2913 | 2918 | ||
| 2914 | /* This is a hint as to how much should be linear. */ | 2919 | /* This is a hint as to how much should be linear. */ |
| 2915 | vnet_hdr.hdr_len = skb_headlen(skb); | 2920 | vnet_hdr.hdr_len = |
| 2916 | vnet_hdr.gso_size = sinfo->gso_size; | 2921 | __cpu_to_virtio16(false, skb_headlen(skb)); |
| 2922 | vnet_hdr.gso_size = | ||
| 2923 | __cpu_to_virtio16(false, sinfo->gso_size); | ||
| 2917 | if (sinfo->gso_type & SKB_GSO_TCPV4) | 2924 | if (sinfo->gso_type & SKB_GSO_TCPV4) |
| 2918 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | 2925 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
| 2919 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | 2926 | else if (sinfo->gso_type & SKB_GSO_TCPV6) |
| @@ -2931,8 +2938,10 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 2931 | 2938 | ||
| 2932 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2939 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 2933 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | 2940 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
| 2934 | vnet_hdr.csum_start = skb_checksum_start_offset(skb); | 2941 | vnet_hdr.csum_start = __cpu_to_virtio16(false, |
| 2935 | vnet_hdr.csum_offset = skb->csum_offset; | 2942 | skb_checksum_start_offset(skb)); |
| 2943 | vnet_hdr.csum_offset = __cpu_to_virtio16(false, | ||
| 2944 | skb->csum_offset); | ||
| 2936 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { | 2945 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
| 2937 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; | 2946 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; |
| 2938 | } /* else everything is zero */ | 2947 | } /* else everything is zero */ |
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 0f62326c0f5e..2a4717967502 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c | |||
| @@ -63,6 +63,15 @@ static const struct rfkill_ops rfkill_gpio_ops = { | |||
| 63 | .set_block = rfkill_gpio_set_power, | 63 | .set_block = rfkill_gpio_set_power, |
| 64 | }; | 64 | }; |
| 65 | 65 | ||
| 66 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; | ||
| 67 | static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false }; | ||
| 68 | |||
| 69 | static const struct acpi_gpio_mapping acpi_rfkill_default_gpios[] = { | ||
| 70 | { "reset-gpios", &reset_gpios, 1 }, | ||
| 71 | { "shutdown-gpios", &shutdown_gpios, 1 }, | ||
| 72 | { }, | ||
| 73 | }; | ||
| 74 | |||
| 66 | static int rfkill_gpio_acpi_probe(struct device *dev, | 75 | static int rfkill_gpio_acpi_probe(struct device *dev, |
| 67 | struct rfkill_gpio_data *rfkill) | 76 | struct rfkill_gpio_data *rfkill) |
| 68 | { | 77 | { |
| @@ -75,7 +84,8 @@ static int rfkill_gpio_acpi_probe(struct device *dev, | |||
| 75 | rfkill->name = dev_name(dev); | 84 | rfkill->name = dev_name(dev); |
| 76 | rfkill->type = (unsigned)id->driver_data; | 85 | rfkill->type = (unsigned)id->driver_data; |
| 77 | 86 | ||
| 78 | return 0; | 87 | return acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), |
| 88 | acpi_rfkill_default_gpios); | ||
| 79 | } | 89 | } |
| 80 | 90 | ||
| 81 | static int rfkill_gpio_probe(struct platform_device *pdev) | 91 | static int rfkill_gpio_probe(struct platform_device *pdev) |
| @@ -102,7 +112,7 @@ static int rfkill_gpio_probe(struct platform_device *pdev) | |||
| 102 | 112 | ||
| 103 | rfkill->clk = devm_clk_get(&pdev->dev, NULL); | 113 | rfkill->clk = devm_clk_get(&pdev->dev, NULL); |
| 104 | 114 | ||
| 105 | gpio = devm_gpiod_get_index(&pdev->dev, "reset", 0); | 115 | gpio = devm_gpiod_get(&pdev->dev, "reset"); |
| 106 | if (!IS_ERR(gpio)) { | 116 | if (!IS_ERR(gpio)) { |
| 107 | ret = gpiod_direction_output(gpio, 0); | 117 | ret = gpiod_direction_output(gpio, 0); |
| 108 | if (ret) | 118 | if (ret) |
| @@ -110,7 +120,7 @@ static int rfkill_gpio_probe(struct platform_device *pdev) | |||
| 110 | rfkill->reset_gpio = gpio; | 120 | rfkill->reset_gpio = gpio; |
| 111 | } | 121 | } |
| 112 | 122 | ||
| 113 | gpio = devm_gpiod_get_index(&pdev->dev, "shutdown", 1); | 123 | gpio = devm_gpiod_get(&pdev->dev, "shutdown"); |
| 114 | if (!IS_ERR(gpio)) { | 124 | if (!IS_ERR(gpio)) { |
| 115 | ret = gpiod_direction_output(gpio, 0); | 125 | ret = gpiod_direction_output(gpio, 0); |
| 116 | if (ret) | 126 | if (ret) |
| @@ -150,6 +160,8 @@ static int rfkill_gpio_remove(struct platform_device *pdev) | |||
| 150 | rfkill_unregister(rfkill->rfkill_dev); | 160 | rfkill_unregister(rfkill->rfkill_dev); |
| 151 | rfkill_destroy(rfkill->rfkill_dev); | 161 | rfkill_destroy(rfkill->rfkill_dev); |
| 152 | 162 | ||
| 163 | acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev)); | ||
| 164 | |||
| 153 | return 0; | 165 | return 0; |
| 154 | } | 166 | } |
| 155 | 167 | ||
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 0e8529113dc5..fb7976aee61c 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
| @@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, | |||
| 862 | list_add(&cur_key->key_list, sh_keys); | 862 | list_add(&cur_key->key_list, sh_keys); |
| 863 | 863 | ||
| 864 | cur_key->key = key; | 864 | cur_key->key = key; |
| 865 | sctp_auth_key_hold(key); | ||
| 866 | |||
| 867 | return 0; | 865 | return 0; |
| 868 | nomem: | 866 | nomem: |
| 869 | if (!replace) | 867 | if (!replace) |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index ab734be8cb20..9f32741abb1c 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -2609,6 +2609,9 @@ do_addr_param: | |||
| 2609 | addr_param = param.v + sizeof(sctp_addip_param_t); | 2609 | addr_param = param.v + sizeof(sctp_addip_param_t); |
| 2610 | 2610 | ||
| 2611 | af = sctp_get_af_specific(param_type2af(param.p->type)); | 2611 | af = sctp_get_af_specific(param_type2af(param.p->type)); |
| 2612 | if (af == NULL) | ||
| 2613 | break; | ||
| 2614 | |||
| 2612 | af->from_addr_param(&addr, addr_param, | 2615 | af->from_addr_param(&addr, addr_param, |
| 2613 | htons(asoc->peer.port), 0); | 2616 | htons(asoc->peer.port), 0); |
| 2614 | 2617 | ||
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index 0754d0f466d2..fb78117b896c 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig | |||
| @@ -35,6 +35,7 @@ config RPCSEC_GSS_KRB5 | |||
| 35 | config SUNRPC_DEBUG | 35 | config SUNRPC_DEBUG |
| 36 | bool "RPC: Enable dprintk debugging" | 36 | bool "RPC: Enable dprintk debugging" |
| 37 | depends on SUNRPC && SYSCTL | 37 | depends on SUNRPC && SYSCTL |
| 38 | select DEBUG_FS | ||
| 38 | help | 39 | help |
| 39 | This option enables a sysctl-based debugging interface | 40 | This option enables a sysctl-based debugging interface |
| 40 | that is be used by the 'rpcdebug' utility to turn on or off | 41 | that is be used by the 'rpcdebug' utility to turn on or off |
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index e5a7a1cac8f3..15e6f6c23c5d 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile | |||
| @@ -14,6 +14,7 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ | |||
| 14 | addr.o rpcb_clnt.o timer.o xdr.o \ | 14 | addr.o rpcb_clnt.o timer.o xdr.o \ |
| 15 | sunrpc_syms.o cache.o rpc_pipe.o \ | 15 | sunrpc_syms.o cache.o rpc_pipe.o \ |
| 16 | svc_xprt.o | 16 | svc_xprt.o |
| 17 | sunrpc-$(CONFIG_SUNRPC_DEBUG) += debugfs.o | ||
| 17 | sunrpc-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel_rqst.o bc_svc.o | 18 | sunrpc-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel_rqst.o bc_svc.o |
| 18 | sunrpc-$(CONFIG_PROC_FS) += stats.o | 19 | sunrpc-$(CONFIG_PROC_FS) += stats.o |
| 19 | sunrpc-$(CONFIG_SYSCTL) += sysctl.o | 20 | sunrpc-$(CONFIG_SYSCTL) += sysctl.o |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 383eb919ac0b..47f38be4155f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | #include <linux/sunrpc/gss_api.h> | 16 | #include <linux/sunrpc/gss_api.h> |
| 17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
| 18 | 18 | ||
| 19 | #ifdef RPC_DEBUG | 19 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 20 | # define RPCDBG_FACILITY RPCDBG_AUTH | 20 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 21 | #endif | 21 | #endif |
| 22 | 22 | ||
| @@ -646,7 +646,7 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, | |||
| 646 | cred->cr_auth = auth; | 646 | cred->cr_auth = auth; |
| 647 | cred->cr_ops = ops; | 647 | cred->cr_ops = ops; |
| 648 | cred->cr_expire = jiffies; | 648 | cred->cr_expire = jiffies; |
| 649 | #ifdef RPC_DEBUG | 649 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 650 | cred->cr_magic = RPCAUTH_CRED_MAGIC; | 650 | cred->cr_magic = RPCAUTH_CRED_MAGIC; |
| 651 | #endif | 651 | #endif |
| 652 | cred->cr_uid = acred->uid; | 652 | cred->cr_uid = acred->uid; |
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c index 6f6b829c9e8e..41248b1820c7 100644 --- a/net/sunrpc/auth_generic.c +++ b/net/sunrpc/auth_generic.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include <linux/sunrpc/debug.h> | 14 | #include <linux/sunrpc/debug.h> |
| 15 | #include <linux/sunrpc/sched.h> | 15 | #include <linux/sunrpc/sched.h> |
| 16 | 16 | ||
| 17 | #ifdef RPC_DEBUG | 17 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 18 | # define RPCDBG_FACILITY RPCDBG_AUTH | 18 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 19 | #endif | 19 | #endif |
| 20 | 20 | ||
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index afb292cd797d..dace13d7638e 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
| @@ -66,7 +66,7 @@ static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; | |||
| 66 | #define GSS_KEY_EXPIRE_TIMEO 240 | 66 | #define GSS_KEY_EXPIRE_TIMEO 240 |
| 67 | static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO; | 67 | static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO; |
| 68 | 68 | ||
| 69 | #ifdef RPC_DEBUG | 69 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 70 | # define RPCDBG_FACILITY RPCDBG_AUTH | 70 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 71 | #endif | 71 | #endif |
| 72 | 72 | ||
| @@ -1353,6 +1353,7 @@ gss_stringify_acceptor(struct rpc_cred *cred) | |||
| 1353 | char *string = NULL; | 1353 | char *string = NULL; |
| 1354 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | 1354 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); |
| 1355 | struct gss_cl_ctx *ctx; | 1355 | struct gss_cl_ctx *ctx; |
| 1356 | unsigned int len; | ||
| 1356 | struct xdr_netobj *acceptor; | 1357 | struct xdr_netobj *acceptor; |
| 1357 | 1358 | ||
| 1358 | rcu_read_lock(); | 1359 | rcu_read_lock(); |
| @@ -1360,15 +1361,39 @@ gss_stringify_acceptor(struct rpc_cred *cred) | |||
| 1360 | if (!ctx) | 1361 | if (!ctx) |
| 1361 | goto out; | 1362 | goto out; |
| 1362 | 1363 | ||
| 1363 | acceptor = &ctx->gc_acceptor; | 1364 | len = ctx->gc_acceptor.len; |
| 1365 | rcu_read_unlock(); | ||
| 1364 | 1366 | ||
| 1365 | /* no point if there's no string */ | 1367 | /* no point if there's no string */ |
| 1366 | if (!acceptor->len) | 1368 | if (!len) |
| 1367 | goto out; | 1369 | return NULL; |
| 1368 | 1370 | realloc: | |
| 1369 | string = kmalloc(acceptor->len + 1, GFP_KERNEL); | 1371 | string = kmalloc(len + 1, GFP_KERNEL); |
| 1370 | if (!string) | 1372 | if (!string) |
| 1373 | return NULL; | ||
| 1374 | |||
| 1375 | rcu_read_lock(); | ||
| 1376 | ctx = rcu_dereference(gss_cred->gc_ctx); | ||
| 1377 | |||
| 1378 | /* did the ctx disappear or was it replaced by one with no acceptor? */ | ||
| 1379 | if (!ctx || !ctx->gc_acceptor.len) { | ||
| 1380 | kfree(string); | ||
| 1381 | string = NULL; | ||
| 1371 | goto out; | 1382 | goto out; |
| 1383 | } | ||
| 1384 | |||
| 1385 | acceptor = &ctx->gc_acceptor; | ||
| 1386 | |||
| 1387 | /* | ||
| 1388 | * Did we find a new acceptor that's longer than the original? Allocate | ||
| 1389 | * a longer buffer and try again. | ||
| 1390 | */ | ||
| 1391 | if (len < acceptor->len) { | ||
| 1392 | len = acceptor->len; | ||
| 1393 | rcu_read_unlock(); | ||
| 1394 | kfree(string); | ||
| 1395 | goto realloc; | ||
| 1396 | } | ||
| 1372 | 1397 | ||
| 1373 | memcpy(string, acceptor->data, acceptor->len); | 1398 | memcpy(string, acceptor->data, acceptor->len); |
| 1374 | string[acceptor->len] = '\0'; | 1399 | string[acceptor->len] = '\0'; |
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c index c586e92bcf76..254defe446a7 100644 --- a/net/sunrpc/auth_gss/gss_generic_token.c +++ b/net/sunrpc/auth_gss/gss_generic_token.c | |||
| @@ -38,7 +38,7 @@ | |||
| 38 | #include <linux/sunrpc/gss_asn1.h> | 38 | #include <linux/sunrpc/gss_asn1.h> |
| 39 | 39 | ||
| 40 | 40 | ||
| 41 | #ifdef RPC_DEBUG | 41 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 42 | # define RPCDBG_FACILITY RPCDBG_AUTH | 42 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 43 | #endif | 43 | #endif |
| 44 | 44 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index f5ed9f6ece06..b5408e8a37f2 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
| @@ -45,7 +45,7 @@ | |||
| 45 | #include <linux/sunrpc/gss_krb5.h> | 45 | #include <linux/sunrpc/gss_krb5.h> |
| 46 | #include <linux/sunrpc/xdr.h> | 46 | #include <linux/sunrpc/xdr.h> |
| 47 | 47 | ||
| 48 | #ifdef RPC_DEBUG | 48 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 49 | # define RPCDBG_FACILITY RPCDBG_AUTH | 49 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 50 | #endif | 50 | #endif |
| 51 | 51 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c index 24589bd2a4b6..234fa8d0fd9b 100644 --- a/net/sunrpc/auth_gss/gss_krb5_keys.c +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c | |||
| @@ -61,7 +61,7 @@ | |||
| 61 | #include <linux/sunrpc/xdr.h> | 61 | #include <linux/sunrpc/xdr.h> |
| 62 | #include <linux/lcm.h> | 62 | #include <linux/lcm.h> |
| 63 | 63 | ||
| 64 | #ifdef RPC_DEBUG | 64 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 65 | # define RPCDBG_FACILITY RPCDBG_AUTH | 65 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 66 | #endif | 66 | #endif |
| 67 | 67 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 0d3c158ef8fa..28db442a0034 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
| @@ -45,7 +45,7 @@ | |||
| 45 | #include <linux/crypto.h> | 45 | #include <linux/crypto.h> |
| 46 | #include <linux/sunrpc/gss_krb5_enctypes.h> | 46 | #include <linux/sunrpc/gss_krb5_enctypes.h> |
| 47 | 47 | ||
| 48 | #ifdef RPC_DEBUG | 48 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 49 | # define RPCDBG_FACILITY RPCDBG_AUTH | 49 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 50 | #endif | 50 | #endif |
| 51 | 51 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 42768e5c3994..1d74d653e6c0 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c | |||
| @@ -64,7 +64,7 @@ | |||
| 64 | #include <linux/random.h> | 64 | #include <linux/random.h> |
| 65 | #include <linux/crypto.h> | 65 | #include <linux/crypto.h> |
| 66 | 66 | ||
| 67 | #ifdef RPC_DEBUG | 67 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 68 | # define RPCDBG_FACILITY RPCDBG_AUTH | 68 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 69 | #endif | 69 | #endif |
| 70 | 70 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index 62ac90c62cb1..20d55c793eb6 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | #include <linux/sunrpc/gss_krb5.h> | 35 | #include <linux/sunrpc/gss_krb5.h> |
| 36 | #include <linux/crypto.h> | 36 | #include <linux/crypto.h> |
| 37 | 37 | ||
| 38 | #ifdef RPC_DEBUG | 38 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 39 | # define RPCDBG_FACILITY RPCDBG_AUTH | 39 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 40 | #endif | 40 | #endif |
| 41 | 41 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 6c981ddc19f8..dcf9515d9aef 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c | |||
| @@ -62,7 +62,7 @@ | |||
| 62 | #include <linux/sunrpc/gss_krb5.h> | 62 | #include <linux/sunrpc/gss_krb5.h> |
| 63 | #include <linux/crypto.h> | 63 | #include <linux/crypto.h> |
| 64 | 64 | ||
| 65 | #ifdef RPC_DEBUG | 65 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 66 | # define RPCDBG_FACILITY RPCDBG_AUTH | 66 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 67 | #endif | 67 | #endif |
| 68 | 68 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 4b614c604fe0..ca7e92a32f84 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | #include <linux/pagemap.h> | 35 | #include <linux/pagemap.h> |
| 36 | #include <linux/crypto.h> | 36 | #include <linux/crypto.h> |
| 37 | 37 | ||
| 38 | #ifdef RPC_DEBUG | 38 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 39 | # define RPCDBG_FACILITY RPCDBG_AUTH | 39 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 40 | #endif | 40 | #endif |
| 41 | 41 | ||
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 92d5ab99fbf3..7063d856a598 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | #include <linux/sunrpc/gss_api.h> | 46 | #include <linux/sunrpc/gss_api.h> |
| 47 | #include <linux/sunrpc/clnt.h> | 47 | #include <linux/sunrpc/clnt.h> |
| 48 | 48 | ||
| 49 | #ifdef RPC_DEBUG | 49 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 50 | # define RPCDBG_FACILITY RPCDBG_AUTH | 50 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 51 | #endif | 51 | #endif |
| 52 | 52 | ||
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.h b/net/sunrpc/auth_gss/gss_rpc_xdr.h index 685a688f3d8a..9d88c6239f01 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.h +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.h | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | #include <linux/sunrpc/clnt.h> | 25 | #include <linux/sunrpc/clnt.h> |
| 26 | #include <linux/sunrpc/xprtsock.h> | 26 | #include <linux/sunrpc/xprtsock.h> |
| 27 | 27 | ||
| 28 | #ifdef RPC_DEBUG | 28 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 29 | # define RPCDBG_FACILITY RPCDBG_AUTH | 29 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 30 | #endif | 30 | #endif |
| 31 | 31 | ||
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index c548ab213f76..de856ddf5fed 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
| @@ -51,7 +51,7 @@ | |||
| 51 | #include "gss_rpc_upcall.h" | 51 | #include "gss_rpc_upcall.h" |
| 52 | 52 | ||
| 53 | 53 | ||
| 54 | #ifdef RPC_DEBUG | 54 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 55 | # define RPCDBG_FACILITY RPCDBG_AUTH | 55 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 56 | #endif | 56 | #endif |
| 57 | 57 | ||
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index 712c123e04e9..c2a2b584a056 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/sunrpc/clnt.h> | 11 | #include <linux/sunrpc/clnt.h> |
| 12 | 12 | ||
| 13 | #ifdef RPC_DEBUG | 13 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 14 | # define RPCDBG_FACILITY RPCDBG_AUTH | 14 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 15 | #endif | 15 | #endif |
| 16 | 16 | ||
| @@ -138,7 +138,7 @@ struct rpc_cred null_cred = { | |||
| 138 | .cr_ops = &null_credops, | 138 | .cr_ops = &null_credops, |
| 139 | .cr_count = ATOMIC_INIT(1), | 139 | .cr_count = ATOMIC_INIT(1), |
| 140 | .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, | 140 | .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, |
| 141 | #ifdef RPC_DEBUG | 141 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 142 | .cr_magic = RPCAUTH_CRED_MAGIC, | 142 | .cr_magic = RPCAUTH_CRED_MAGIC, |
| 143 | #endif | 143 | #endif |
| 144 | }; | 144 | }; |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index d5d692366294..4feda2d0a833 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
| @@ -25,7 +25,7 @@ struct unx_cred { | |||
| 25 | 25 | ||
| 26 | #define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2)) | 26 | #define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2)) |
| 27 | 27 | ||
| 28 | #ifdef RPC_DEBUG | 28 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 29 | # define RPCDBG_FACILITY RPCDBG_AUTH | 29 | # define RPCDBG_FACILITY RPCDBG_AUTH |
| 30 | #endif | 30 | #endif |
| 31 | 31 | ||
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 9761a0da964d..651f49ab601f 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
| @@ -27,7 +27,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| 27 | #include <linux/export.h> | 27 | #include <linux/export.h> |
| 28 | #include <linux/sunrpc/bc_xprt.h> | 28 | #include <linux/sunrpc/bc_xprt.h> |
| 29 | 29 | ||
| 30 | #ifdef RPC_DEBUG | 30 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 31 | #define RPCDBG_FACILITY RPCDBG_TRANS | 31 | #define RPCDBG_FACILITY RPCDBG_TRANS |
| 32 | #endif | 32 | #endif |
| 33 | 33 | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 9acd6ce88db7..05da12a33945 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -42,7 +42,7 @@ | |||
| 42 | #include "sunrpc.h" | 42 | #include "sunrpc.h" |
| 43 | #include "netns.h" | 43 | #include "netns.h" |
| 44 | 44 | ||
| 45 | #ifdef RPC_DEBUG | 45 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 46 | # define RPCDBG_FACILITY RPCDBG_CALL | 46 | # define RPCDBG_FACILITY RPCDBG_CALL |
| 47 | #endif | 47 | #endif |
| 48 | 48 | ||
| @@ -305,6 +305,10 @@ static int rpc_client_register(struct rpc_clnt *clnt, | |||
| 305 | struct super_block *pipefs_sb; | 305 | struct super_block *pipefs_sb; |
| 306 | int err; | 306 | int err; |
| 307 | 307 | ||
| 308 | err = rpc_clnt_debugfs_register(clnt); | ||
| 309 | if (err) | ||
| 310 | return err; | ||
| 311 | |||
| 308 | pipefs_sb = rpc_get_sb_net(net); | 312 | pipefs_sb = rpc_get_sb_net(net); |
| 309 | if (pipefs_sb) { | 313 | if (pipefs_sb) { |
| 310 | err = rpc_setup_pipedir(pipefs_sb, clnt); | 314 | err = rpc_setup_pipedir(pipefs_sb, clnt); |
| @@ -331,6 +335,7 @@ err_auth: | |||
| 331 | out: | 335 | out: |
| 332 | if (pipefs_sb) | 336 | if (pipefs_sb) |
| 333 | rpc_put_sb_net(net); | 337 | rpc_put_sb_net(net); |
| 338 | rpc_clnt_debugfs_unregister(clnt); | ||
| 334 | return err; | 339 | return err; |
| 335 | } | 340 | } |
| 336 | 341 | ||
| @@ -670,6 +675,7 @@ int rpc_switch_client_transport(struct rpc_clnt *clnt, | |||
| 670 | 675 | ||
| 671 | rpc_unregister_client(clnt); | 676 | rpc_unregister_client(clnt); |
| 672 | __rpc_clnt_remove_pipedir(clnt); | 677 | __rpc_clnt_remove_pipedir(clnt); |
| 678 | rpc_clnt_debugfs_unregister(clnt); | ||
| 673 | 679 | ||
| 674 | /* | 680 | /* |
| 675 | * A new transport was created. "clnt" therefore | 681 | * A new transport was created. "clnt" therefore |
| @@ -771,6 +777,7 @@ rpc_free_client(struct rpc_clnt *clnt) | |||
| 771 | rcu_dereference(clnt->cl_xprt)->servername); | 777 | rcu_dereference(clnt->cl_xprt)->servername); |
| 772 | if (clnt->cl_parent != clnt) | 778 | if (clnt->cl_parent != clnt) |
| 773 | parent = clnt->cl_parent; | 779 | parent = clnt->cl_parent; |
| 780 | rpc_clnt_debugfs_unregister(clnt); | ||
| 774 | rpc_clnt_remove_pipedir(clnt); | 781 | rpc_clnt_remove_pipedir(clnt); |
| 775 | rpc_unregister_client(clnt); | 782 | rpc_unregister_client(clnt); |
| 776 | rpc_free_iostats(clnt->cl_metrics); | 783 | rpc_free_iostats(clnt->cl_metrics); |
| @@ -1396,8 +1403,9 @@ rpc_restart_call(struct rpc_task *task) | |||
| 1396 | } | 1403 | } |
| 1397 | EXPORT_SYMBOL_GPL(rpc_restart_call); | 1404 | EXPORT_SYMBOL_GPL(rpc_restart_call); |
| 1398 | 1405 | ||
| 1399 | #ifdef RPC_DEBUG | 1406 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 1400 | static const char *rpc_proc_name(const struct rpc_task *task) | 1407 | const char |
| 1408 | *rpc_proc_name(const struct rpc_task *task) | ||
| 1401 | { | 1409 | { |
| 1402 | const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; | 1410 | const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; |
| 1403 | 1411 | ||
| @@ -2421,7 +2429,7 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int | |||
| 2421 | } | 2429 | } |
| 2422 | EXPORT_SYMBOL_GPL(rpc_call_null); | 2430 | EXPORT_SYMBOL_GPL(rpc_call_null); |
| 2423 | 2431 | ||
| 2424 | #ifdef RPC_DEBUG | 2432 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 2425 | static void rpc_show_header(void) | 2433 | static void rpc_show_header(void) |
| 2426 | { | 2434 | { |
| 2427 | printk(KERN_INFO "-pid- flgs status -client- --rqstp- " | 2435 | printk(KERN_INFO "-pid- flgs status -client- --rqstp- " |
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c new file mode 100644 index 000000000000..e811f390f9f6 --- /dev/null +++ b/net/sunrpc/debugfs.c | |||
| @@ -0,0 +1,292 @@ | |||
| 1 | /** | ||
| 2 | * debugfs interface for sunrpc | ||
| 3 | * | ||
| 4 | * (c) 2014 Jeff Layton <jlayton@primarydata.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/debugfs.h> | ||
| 8 | #include <linux/sunrpc/sched.h> | ||
| 9 | #include <linux/sunrpc/clnt.h> | ||
| 10 | #include "netns.h" | ||
| 11 | |||
| 12 | static struct dentry *topdir; | ||
| 13 | static struct dentry *rpc_clnt_dir; | ||
| 14 | static struct dentry *rpc_xprt_dir; | ||
| 15 | |||
| 16 | struct rpc_clnt_iter { | ||
| 17 | struct rpc_clnt *clnt; | ||
| 18 | loff_t pos; | ||
| 19 | }; | ||
| 20 | |||
| 21 | static int | ||
| 22 | tasks_show(struct seq_file *f, void *v) | ||
| 23 | { | ||
| 24 | u32 xid = 0; | ||
| 25 | struct rpc_task *task = v; | ||
| 26 | struct rpc_clnt *clnt = task->tk_client; | ||
| 27 | const char *rpc_waitq = "none"; | ||
| 28 | |||
| 29 | if (RPC_IS_QUEUED(task)) | ||
| 30 | rpc_waitq = rpc_qname(task->tk_waitqueue); | ||
| 31 | |||
| 32 | if (task->tk_rqstp) | ||
| 33 | xid = be32_to_cpu(task->tk_rqstp->rq_xid); | ||
| 34 | |||
| 35 | seq_printf(f, "%5u %04x %6d 0x%x 0x%x %8ld %ps %sv%u %s a:%ps q:%s\n", | ||
| 36 | task->tk_pid, task->tk_flags, task->tk_status, | ||
| 37 | clnt->cl_clid, xid, task->tk_timeout, task->tk_ops, | ||
| 38 | clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), | ||
| 39 | task->tk_action, rpc_waitq); | ||
| 40 | return 0; | ||
| 41 | } | ||
| 42 | |||
| 43 | static void * | ||
| 44 | tasks_start(struct seq_file *f, loff_t *ppos) | ||
| 45 | __acquires(&clnt->cl_lock) | ||
| 46 | { | ||
| 47 | struct rpc_clnt_iter *iter = f->private; | ||
| 48 | loff_t pos = *ppos; | ||
| 49 | struct rpc_clnt *clnt = iter->clnt; | ||
| 50 | struct rpc_task *task; | ||
| 51 | |||
| 52 | iter->pos = pos + 1; | ||
| 53 | spin_lock(&clnt->cl_lock); | ||
| 54 | list_for_each_entry(task, &clnt->cl_tasks, tk_task) | ||
| 55 | if (pos-- == 0) | ||
| 56 | return task; | ||
| 57 | return NULL; | ||
| 58 | } | ||
| 59 | |||
| 60 | static void * | ||
| 61 | tasks_next(struct seq_file *f, void *v, loff_t *pos) | ||
| 62 | { | ||
| 63 | struct rpc_clnt_iter *iter = f->private; | ||
| 64 | struct rpc_clnt *clnt = iter->clnt; | ||
| 65 | struct rpc_task *task = v; | ||
| 66 | struct list_head *next = task->tk_task.next; | ||
| 67 | |||
| 68 | ++iter->pos; | ||
| 69 | ++*pos; | ||
| 70 | |||
| 71 | /* If there's another task on list, return it */ | ||
| 72 | if (next == &clnt->cl_tasks) | ||
| 73 | return NULL; | ||
| 74 | return list_entry(next, struct rpc_task, tk_task); | ||
| 75 | } | ||
| 76 | |||
| 77 | static void | ||
| 78 | tasks_stop(struct seq_file *f, void *v) | ||
| 79 | __releases(&clnt->cl_lock) | ||
| 80 | { | ||
| 81 | struct rpc_clnt_iter *iter = f->private; | ||
| 82 | struct rpc_clnt *clnt = iter->clnt; | ||
| 83 | |||
| 84 | spin_unlock(&clnt->cl_lock); | ||
| 85 | } | ||
| 86 | |||
| 87 | static const struct seq_operations tasks_seq_operations = { | ||
| 88 | .start = tasks_start, | ||
| 89 | .next = tasks_next, | ||
| 90 | .stop = tasks_stop, | ||
| 91 | .show = tasks_show, | ||
| 92 | }; | ||
| 93 | |||
| 94 | static int tasks_open(struct inode *inode, struct file *filp) | ||
| 95 | { | ||
| 96 | int ret = seq_open_private(filp, &tasks_seq_operations, | ||
| 97 | sizeof(struct rpc_clnt_iter)); | ||
| 98 | |||
| 99 | if (!ret) { | ||
| 100 | struct seq_file *seq = filp->private_data; | ||
| 101 | struct rpc_clnt_iter *iter = seq->private; | ||
| 102 | |||
| 103 | iter->clnt = inode->i_private; | ||
| 104 | |||
| 105 | if (!atomic_inc_not_zero(&iter->clnt->cl_count)) { | ||
| 106 | seq_release_private(inode, filp); | ||
| 107 | ret = -EINVAL; | ||
| 108 | } | ||
| 109 | } | ||
| 110 | |||
| 111 | return ret; | ||
| 112 | } | ||
| 113 | |||
| 114 | static int | ||
| 115 | tasks_release(struct inode *inode, struct file *filp) | ||
| 116 | { | ||
| 117 | struct seq_file *seq = filp->private_data; | ||
| 118 | struct rpc_clnt_iter *iter = seq->private; | ||
| 119 | |||
| 120 | rpc_release_client(iter->clnt); | ||
| 121 | return seq_release_private(inode, filp); | ||
| 122 | } | ||
| 123 | |||
| 124 | static const struct file_operations tasks_fops = { | ||
| 125 | .owner = THIS_MODULE, | ||
| 126 | .open = tasks_open, | ||
| 127 | .read = seq_read, | ||
| 128 | .llseek = seq_lseek, | ||
| 129 | .release = tasks_release, | ||
| 130 | }; | ||
| 131 | |||
| 132 | int | ||
| 133 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | ||
| 134 | { | ||
| 135 | int len, err; | ||
| 136 | char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */ | ||
| 137 | |||
| 138 | /* Already registered? */ | ||
| 139 | if (clnt->cl_debugfs) | ||
| 140 | return 0; | ||
| 141 | |||
| 142 | len = snprintf(name, sizeof(name), "%x", clnt->cl_clid); | ||
| 143 | if (len >= sizeof(name)) | ||
| 144 | return -EINVAL; | ||
| 145 | |||
| 146 | /* make the per-client dir */ | ||
| 147 | clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir); | ||
| 148 | if (!clnt->cl_debugfs) | ||
| 149 | return -ENOMEM; | ||
| 150 | |||
| 151 | /* make tasks file */ | ||
| 152 | err = -ENOMEM; | ||
| 153 | if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs, | ||
| 154 | clnt, &tasks_fops)) | ||
| 155 | goto out_err; | ||
| 156 | |||
| 157 | err = -EINVAL; | ||
| 158 | rcu_read_lock(); | ||
| 159 | len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", | ||
| 160 | rcu_dereference(clnt->cl_xprt)->debugfs->d_name.name); | ||
| 161 | rcu_read_unlock(); | ||
| 162 | if (len >= sizeof(name)) | ||
| 163 | goto out_err; | ||
| 164 | |||
| 165 | err = -ENOMEM; | ||
| 166 | if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name)) | ||
| 167 | goto out_err; | ||
| 168 | |||
| 169 | return 0; | ||
| 170 | out_err: | ||
| 171 | debugfs_remove_recursive(clnt->cl_debugfs); | ||
| 172 | clnt->cl_debugfs = NULL; | ||
| 173 | return err; | ||
| 174 | } | ||
| 175 | |||
| 176 | void | ||
| 177 | rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) | ||
| 178 | { | ||
| 179 | debugfs_remove_recursive(clnt->cl_debugfs); | ||
| 180 | clnt->cl_debugfs = NULL; | ||
| 181 | } | ||
| 182 | |||
| 183 | static int | ||
| 184 | xprt_info_show(struct seq_file *f, void *v) | ||
| 185 | { | ||
| 186 | struct rpc_xprt *xprt = f->private; | ||
| 187 | |||
| 188 | seq_printf(f, "netid: %s\n", xprt->address_strings[RPC_DISPLAY_NETID]); | ||
| 189 | seq_printf(f, "addr: %s\n", xprt->address_strings[RPC_DISPLAY_ADDR]); | ||
| 190 | seq_printf(f, "port: %s\n", xprt->address_strings[RPC_DISPLAY_PORT]); | ||
| 191 | seq_printf(f, "state: 0x%lx\n", xprt->state); | ||
| 192 | return 0; | ||
| 193 | } | ||
| 194 | |||
| 195 | static int | ||
| 196 | xprt_info_open(struct inode *inode, struct file *filp) | ||
| 197 | { | ||
| 198 | int ret; | ||
| 199 | struct rpc_xprt *xprt = inode->i_private; | ||
| 200 | |||
| 201 | ret = single_open(filp, xprt_info_show, xprt); | ||
| 202 | |||
| 203 | if (!ret) { | ||
| 204 | if (!xprt_get(xprt)) { | ||
| 205 | single_release(inode, filp); | ||
| 206 | ret = -EINVAL; | ||
| 207 | } | ||
| 208 | } | ||
| 209 | return ret; | ||
| 210 | } | ||
| 211 | |||
| 212 | static int | ||
| 213 | xprt_info_release(struct inode *inode, struct file *filp) | ||
| 214 | { | ||
| 215 | struct rpc_xprt *xprt = inode->i_private; | ||
| 216 | |||
| 217 | xprt_put(xprt); | ||
| 218 | return single_release(inode, filp); | ||
| 219 | } | ||
| 220 | |||
| 221 | static const struct file_operations xprt_info_fops = { | ||
| 222 | .owner = THIS_MODULE, | ||
| 223 | .open = xprt_info_open, | ||
| 224 | .read = seq_read, | ||
| 225 | .llseek = seq_lseek, | ||
| 226 | .release = xprt_info_release, | ||
| 227 | }; | ||
| 228 | |||
| 229 | int | ||
| 230 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) | ||
| 231 | { | ||
| 232 | int len, id; | ||
| 233 | static atomic_t cur_id; | ||
| 234 | char name[9]; /* 8 hex digits + NULL term */ | ||
| 235 | |||
| 236 | id = (unsigned int)atomic_inc_return(&cur_id); | ||
| 237 | |||
| 238 | len = snprintf(name, sizeof(name), "%x", id); | ||
| 239 | if (len >= sizeof(name)) | ||
| 240 | return -EINVAL; | ||
| 241 | |||
| 242 | /* make the per-client dir */ | ||
| 243 | xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir); | ||
| 244 | if (!xprt->debugfs) | ||
| 245 | return -ENOMEM; | ||
| 246 | |||
| 247 | /* make tasks file */ | ||
| 248 | if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs, | ||
| 249 | xprt, &xprt_info_fops)) { | ||
| 250 | debugfs_remove_recursive(xprt->debugfs); | ||
| 251 | xprt->debugfs = NULL; | ||
| 252 | return -ENOMEM; | ||
| 253 | } | ||
| 254 | |||
| 255 | return 0; | ||
| 256 | } | ||
| 257 | |||
| 258 | void | ||
| 259 | rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt) | ||
| 260 | { | ||
| 261 | debugfs_remove_recursive(xprt->debugfs); | ||
| 262 | xprt->debugfs = NULL; | ||
| 263 | } | ||
| 264 | |||
| 265 | void __exit | ||
| 266 | sunrpc_debugfs_exit(void) | ||
| 267 | { | ||
| 268 | debugfs_remove_recursive(topdir); | ||
| 269 | } | ||
| 270 | |||
| 271 | int __init | ||
| 272 | sunrpc_debugfs_init(void) | ||
| 273 | { | ||
| 274 | topdir = debugfs_create_dir("sunrpc", NULL); | ||
| 275 | if (!topdir) | ||
| 276 | goto out; | ||
| 277 | |||
| 278 | rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir); | ||
| 279 | if (!rpc_clnt_dir) | ||
| 280 | goto out_remove; | ||
| 281 | |||
| 282 | rpc_xprt_dir = debugfs_create_dir("rpc_xprt", topdir); | ||
| 283 | if (!rpc_xprt_dir) | ||
| 284 | goto out_remove; | ||
| 285 | |||
| 286 | return 0; | ||
| 287 | out_remove: | ||
| 288 | debugfs_remove_recursive(topdir); | ||
| 289 | topdir = NULL; | ||
| 290 | out: | ||
| 291 | return -ENOMEM; | ||
| 292 | } | ||
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 1891a1022c17..05202012bcfc 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | 32 | ||
| 33 | #include "netns.h" | 33 | #include "netns.h" |
| 34 | 34 | ||
| 35 | #ifdef RPC_DEBUG | 35 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 36 | # define RPCDBG_FACILITY RPCDBG_BIND | 36 | # define RPCDBG_FACILITY RPCDBG_BIND |
| 37 | #endif | 37 | #endif |
| 38 | 38 | ||
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index fe3441abdbe5..d20f2329eea3 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | #include "sunrpc.h" | 25 | #include "sunrpc.h" |
| 26 | 26 | ||
| 27 | #ifdef RPC_DEBUG | 27 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 28 | #define RPCDBG_FACILITY RPCDBG_SCHED | 28 | #define RPCDBG_FACILITY RPCDBG_SCHED |
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
| @@ -258,7 +258,7 @@ static int rpc_wait_bit_killable(struct wait_bit_key *key) | |||
| 258 | return 0; | 258 | return 0; |
| 259 | } | 259 | } |
| 260 | 260 | ||
| 261 | #if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) | 261 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) |
| 262 | static void rpc_task_set_debuginfo(struct rpc_task *task) | 262 | static void rpc_task_set_debuginfo(struct rpc_task *task) |
| 263 | { | 263 | { |
| 264 | static atomic_t rpc_pid; | 264 | static atomic_t rpc_pid; |
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 54530490944e..9711a155bc50 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c | |||
| @@ -116,7 +116,15 @@ EXPORT_SYMBOL_GPL(svc_seq_show); | |||
| 116 | */ | 116 | */ |
| 117 | struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) | 117 | struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) |
| 118 | { | 118 | { |
| 119 | return kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL); | 119 | struct rpc_iostats *stats; |
| 120 | int i; | ||
| 121 | |||
| 122 | stats = kcalloc(clnt->cl_maxproc, sizeof(*stats), GFP_KERNEL); | ||
| 123 | if (stats) { | ||
| 124 | for (i = 0; i < clnt->cl_maxproc; i++) | ||
| 125 | spin_lock_init(&stats[i].om_lock); | ||
| 126 | } | ||
| 127 | return stats; | ||
| 120 | } | 128 | } |
| 121 | EXPORT_SYMBOL_GPL(rpc_alloc_iostats); | 129 | EXPORT_SYMBOL_GPL(rpc_alloc_iostats); |
| 122 | 130 | ||
| @@ -135,20 +143,21 @@ EXPORT_SYMBOL_GPL(rpc_free_iostats); | |||
| 135 | * rpc_count_iostats - tally up per-task stats | 143 | * rpc_count_iostats - tally up per-task stats |
| 136 | * @task: completed rpc_task | 144 | * @task: completed rpc_task |
| 137 | * @stats: array of stat structures | 145 | * @stats: array of stat structures |
| 138 | * | ||
| 139 | * Relies on the caller for serialization. | ||
| 140 | */ | 146 | */ |
| 141 | void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) | 147 | void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) |
| 142 | { | 148 | { |
| 143 | struct rpc_rqst *req = task->tk_rqstp; | 149 | struct rpc_rqst *req = task->tk_rqstp; |
| 144 | struct rpc_iostats *op_metrics; | 150 | struct rpc_iostats *op_metrics; |
| 145 | ktime_t delta; | 151 | ktime_t delta, now; |
| 146 | 152 | ||
| 147 | if (!stats || !req) | 153 | if (!stats || !req) |
| 148 | return; | 154 | return; |
| 149 | 155 | ||
| 156 | now = ktime_get(); | ||
| 150 | op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx]; | 157 | op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx]; |
| 151 | 158 | ||
| 159 | spin_lock(&op_metrics->om_lock); | ||
| 160 | |||
| 152 | op_metrics->om_ops++; | 161 | op_metrics->om_ops++; |
| 153 | op_metrics->om_ntrans += req->rq_ntrans; | 162 | op_metrics->om_ntrans += req->rq_ntrans; |
| 154 | op_metrics->om_timeouts += task->tk_timeouts; | 163 | op_metrics->om_timeouts += task->tk_timeouts; |
| @@ -161,8 +170,10 @@ void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) | |||
| 161 | 170 | ||
| 162 | op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt); | 171 | op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt); |
| 163 | 172 | ||
| 164 | delta = ktime_sub(ktime_get(), task->tk_start); | 173 | delta = ktime_sub(now, task->tk_start); |
| 165 | op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta); | 174 | op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta); |
| 175 | |||
| 176 | spin_unlock(&op_metrics->om_lock); | ||
| 166 | } | 177 | } |
| 167 | EXPORT_SYMBOL_GPL(rpc_count_iostats); | 178 | EXPORT_SYMBOL_GPL(rpc_count_iostats); |
| 168 | 179 | ||
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index cd30120de9e4..e37fbed87956 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
| @@ -97,13 +97,20 @@ init_sunrpc(void) | |||
| 97 | err = register_rpc_pipefs(); | 97 | err = register_rpc_pipefs(); |
| 98 | if (err) | 98 | if (err) |
| 99 | goto out4; | 99 | goto out4; |
| 100 | #ifdef RPC_DEBUG | 100 | |
| 101 | err = sunrpc_debugfs_init(); | ||
| 102 | if (err) | ||
| 103 | goto out5; | ||
| 104 | |||
| 105 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
| 101 | rpc_register_sysctl(); | 106 | rpc_register_sysctl(); |
| 102 | #endif | 107 | #endif |
| 103 | svc_init_xprt_sock(); /* svc sock transport */ | 108 | svc_init_xprt_sock(); /* svc sock transport */ |
| 104 | init_socket_xprt(); /* clnt sock transport */ | 109 | init_socket_xprt(); /* clnt sock transport */ |
| 105 | return 0; | 110 | return 0; |
| 106 | 111 | ||
| 112 | out5: | ||
| 113 | unregister_rpc_pipefs(); | ||
| 107 | out4: | 114 | out4: |
| 108 | unregister_pernet_subsys(&sunrpc_net_ops); | 115 | unregister_pernet_subsys(&sunrpc_net_ops); |
| 109 | out3: | 116 | out3: |
| @@ -120,10 +127,11 @@ cleanup_sunrpc(void) | |||
| 120 | rpcauth_remove_module(); | 127 | rpcauth_remove_module(); |
| 121 | cleanup_socket_xprt(); | 128 | cleanup_socket_xprt(); |
| 122 | svc_cleanup_xprt_sock(); | 129 | svc_cleanup_xprt_sock(); |
| 130 | sunrpc_debugfs_exit(); | ||
| 123 | unregister_rpc_pipefs(); | 131 | unregister_rpc_pipefs(); |
| 124 | rpc_destroy_mempool(); | 132 | rpc_destroy_mempool(); |
| 125 | unregister_pernet_subsys(&sunrpc_net_ops); | 133 | unregister_pernet_subsys(&sunrpc_net_ops); |
| 126 | #ifdef RPC_DEBUG | 134 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 127 | rpc_unregister_sysctl(); | 135 | rpc_unregister_sysctl(); |
| 128 | #endif | 136 | #endif |
| 129 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | 137 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index ca8a7958f4e6..2783fd80c229 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
| @@ -28,6 +28,8 @@ | |||
| 28 | #include <linux/sunrpc/clnt.h> | 28 | #include <linux/sunrpc/clnt.h> |
| 29 | #include <linux/sunrpc/bc_xprt.h> | 29 | #include <linux/sunrpc/bc_xprt.h> |
| 30 | 30 | ||
| 31 | #include <trace/events/sunrpc.h> | ||
| 32 | |||
| 31 | #define RPCDBG_FACILITY RPCDBG_SVCDSP | 33 | #define RPCDBG_FACILITY RPCDBG_SVCDSP |
| 32 | 34 | ||
| 33 | static void svc_unregister(const struct svc_serv *serv, struct net *net); | 35 | static void svc_unregister(const struct svc_serv *serv, struct net *net); |
| @@ -1040,7 +1042,7 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net) | |||
| 1040 | /* | 1042 | /* |
| 1041 | * dprintk the given error with the address of the client that caused it. | 1043 | * dprintk the given error with the address of the client that caused it. |
| 1042 | */ | 1044 | */ |
| 1043 | #ifdef RPC_DEBUG | 1045 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 1044 | static __printf(2, 3) | 1046 | static __printf(2, 3) |
| 1045 | void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) | 1047 | void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) |
| 1046 | { | 1048 | { |
| @@ -1314,24 +1316,25 @@ svc_process(struct svc_rqst *rqstp) | |||
| 1314 | rqstp->rq_res.tail[0].iov_base = NULL; | 1316 | rqstp->rq_res.tail[0].iov_base = NULL; |
| 1315 | rqstp->rq_res.tail[0].iov_len = 0; | 1317 | rqstp->rq_res.tail[0].iov_len = 0; |
| 1316 | 1318 | ||
| 1317 | rqstp->rq_xid = svc_getu32(argv); | ||
| 1318 | |||
| 1319 | dir = svc_getnl(argv); | 1319 | dir = svc_getnl(argv); |
| 1320 | if (dir != 0) { | 1320 | if (dir != 0) { |
| 1321 | /* direction != CALL */ | 1321 | /* direction != CALL */ |
| 1322 | svc_printk(rqstp, "bad direction %d, dropping request\n", dir); | 1322 | svc_printk(rqstp, "bad direction %d, dropping request\n", dir); |
| 1323 | serv->sv_stats->rpcbadfmt++; | 1323 | serv->sv_stats->rpcbadfmt++; |
| 1324 | svc_drop(rqstp); | 1324 | goto out_drop; |
| 1325 | return 0; | ||
| 1326 | } | 1325 | } |
| 1327 | 1326 | ||
| 1328 | /* Returns 1 for send, 0 for drop */ | 1327 | /* Returns 1 for send, 0 for drop */ |
| 1329 | if (svc_process_common(rqstp, argv, resv)) | 1328 | if (likely(svc_process_common(rqstp, argv, resv))) { |
| 1330 | return svc_send(rqstp); | 1329 | int ret = svc_send(rqstp); |
| 1331 | else { | 1330 | |
| 1332 | svc_drop(rqstp); | 1331 | trace_svc_process(rqstp, ret); |
| 1333 | return 0; | 1332 | return ret; |
| 1334 | } | 1333 | } |
| 1334 | out_drop: | ||
| 1335 | trace_svc_process(rqstp, 0); | ||
| 1336 | svc_drop(rqstp); | ||
| 1337 | return 0; | ||
| 1335 | } | 1338 | } |
| 1336 | 1339 | ||
| 1337 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) | 1340 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index c179ca2a5aa4..bbb3b044b877 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/sunrpc/svcsock.h> | 15 | #include <linux/sunrpc/svcsock.h> |
| 16 | #include <linux/sunrpc/xprt.h> | 16 | #include <linux/sunrpc/xprt.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <trace/events/sunrpc.h> | ||
| 18 | 19 | ||
| 19 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | 20 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
| 20 | 21 | ||
| @@ -773,35 +774,43 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 773 | 774 | ||
| 774 | err = svc_alloc_arg(rqstp); | 775 | err = svc_alloc_arg(rqstp); |
| 775 | if (err) | 776 | if (err) |
| 776 | return err; | 777 | goto out; |
| 777 | 778 | ||
| 778 | try_to_freeze(); | 779 | try_to_freeze(); |
| 779 | cond_resched(); | 780 | cond_resched(); |
| 781 | err = -EINTR; | ||
| 780 | if (signalled() || kthread_should_stop()) | 782 | if (signalled() || kthread_should_stop()) |
| 781 | return -EINTR; | 783 | goto out; |
| 782 | 784 | ||
| 783 | xprt = svc_get_next_xprt(rqstp, timeout); | 785 | xprt = svc_get_next_xprt(rqstp, timeout); |
| 784 | if (IS_ERR(xprt)) | 786 | if (IS_ERR(xprt)) { |
| 785 | return PTR_ERR(xprt); | 787 | err = PTR_ERR(xprt); |
| 788 | goto out; | ||
| 789 | } | ||
| 786 | 790 | ||
| 787 | len = svc_handle_xprt(rqstp, xprt); | 791 | len = svc_handle_xprt(rqstp, xprt); |
| 788 | 792 | ||
| 789 | /* No data, incomplete (TCP) read, or accept() */ | 793 | /* No data, incomplete (TCP) read, or accept() */ |
| 794 | err = -EAGAIN; | ||
| 790 | if (len <= 0) | 795 | if (len <= 0) |
| 791 | goto out; | 796 | goto out_release; |
| 792 | 797 | ||
| 793 | clear_bit(XPT_OLD, &xprt->xpt_flags); | 798 | clear_bit(XPT_OLD, &xprt->xpt_flags); |
| 794 | 799 | ||
| 795 | rqstp->rq_secure = xprt->xpt_ops->xpo_secure_port(rqstp); | 800 | rqstp->rq_secure = xprt->xpt_ops->xpo_secure_port(rqstp); |
| 796 | rqstp->rq_chandle.defer = svc_defer; | 801 | rqstp->rq_chandle.defer = svc_defer; |
| 802 | rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); | ||
| 797 | 803 | ||
| 798 | if (serv->sv_stats) | 804 | if (serv->sv_stats) |
| 799 | serv->sv_stats->netcnt++; | 805 | serv->sv_stats->netcnt++; |
| 806 | trace_svc_recv(rqstp, len); | ||
| 800 | return len; | 807 | return len; |
| 801 | out: | 808 | out_release: |
| 802 | rqstp->rq_res.len = 0; | 809 | rqstp->rq_res.len = 0; |
| 803 | svc_xprt_release(rqstp); | 810 | svc_xprt_release(rqstp); |
| 804 | return -EAGAIN; | 811 | out: |
| 812 | trace_svc_recv(rqstp, err); | ||
| 813 | return err; | ||
| 805 | } | 814 | } |
| 806 | EXPORT_SYMBOL_GPL(svc_recv); | 815 | EXPORT_SYMBOL_GPL(svc_recv); |
| 807 | 816 | ||
| @@ -821,12 +830,12 @@ EXPORT_SYMBOL_GPL(svc_drop); | |||
| 821 | int svc_send(struct svc_rqst *rqstp) | 830 | int svc_send(struct svc_rqst *rqstp) |
| 822 | { | 831 | { |
| 823 | struct svc_xprt *xprt; | 832 | struct svc_xprt *xprt; |
| 824 | int len; | 833 | int len = -EFAULT; |
| 825 | struct xdr_buf *xb; | 834 | struct xdr_buf *xb; |
| 826 | 835 | ||
| 827 | xprt = rqstp->rq_xprt; | 836 | xprt = rqstp->rq_xprt; |
| 828 | if (!xprt) | 837 | if (!xprt) |
| 829 | return -EFAULT; | 838 | goto out; |
| 830 | 839 | ||
| 831 | /* release the receive skb before sending the reply */ | 840 | /* release the receive skb before sending the reply */ |
| 832 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); | 841 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); |
| @@ -849,7 +858,9 @@ int svc_send(struct svc_rqst *rqstp) | |||
| 849 | svc_xprt_release(rqstp); | 858 | svc_xprt_release(rqstp); |
| 850 | 859 | ||
| 851 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) | 860 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) |
| 852 | return 0; | 861 | len = 0; |
| 862 | out: | ||
| 863 | trace_svc_send(rqstp, len); | ||
| 853 | return len; | 864 | return len; |
| 854 | } | 865 | } |
| 855 | 866 | ||
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 3f959c681885..f9c052d508f0 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
| @@ -1019,17 +1019,12 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
| 1019 | xid = *p++; | 1019 | xid = *p++; |
| 1020 | calldir = *p; | 1020 | calldir = *p; |
| 1021 | 1021 | ||
| 1022 | if (bc_xprt) | 1022 | if (!bc_xprt) |
| 1023 | req = xprt_lookup_rqst(bc_xprt, xid); | ||
| 1024 | |||
| 1025 | if (!req) { | ||
| 1026 | printk(KERN_NOTICE | ||
| 1027 | "%s: Got unrecognized reply: " | ||
| 1028 | "calldir 0x%x xpt_bc_xprt %p xid %08x\n", | ||
| 1029 | __func__, ntohl(calldir), | ||
| 1030 | bc_xprt, ntohl(xid)); | ||
| 1031 | return -EAGAIN; | 1023 | return -EAGAIN; |
| 1032 | } | 1024 | spin_lock_bh(&bc_xprt->transport_lock); |
| 1025 | req = xprt_lookup_rqst(bc_xprt, xid); | ||
| 1026 | if (!req) | ||
| 1027 | goto unlock_notfound; | ||
| 1033 | 1028 | ||
| 1034 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); | 1029 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); |
| 1035 | /* | 1030 | /* |
| @@ -1040,11 +1035,21 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
| 1040 | dst = &req->rq_private_buf.head[0]; | 1035 | dst = &req->rq_private_buf.head[0]; |
| 1041 | src = &rqstp->rq_arg.head[0]; | 1036 | src = &rqstp->rq_arg.head[0]; |
| 1042 | if (dst->iov_len < src->iov_len) | 1037 | if (dst->iov_len < src->iov_len) |
| 1043 | return -EAGAIN; /* whatever; just giving up. */ | 1038 | goto unlock_eagain; /* whatever; just giving up. */ |
| 1044 | memcpy(dst->iov_base, src->iov_base, src->iov_len); | 1039 | memcpy(dst->iov_base, src->iov_base, src->iov_len); |
| 1045 | xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); | 1040 | xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); |
| 1046 | rqstp->rq_arg.len = 0; | 1041 | rqstp->rq_arg.len = 0; |
| 1042 | spin_unlock_bh(&bc_xprt->transport_lock); | ||
| 1047 | return 0; | 1043 | return 0; |
| 1044 | unlock_notfound: | ||
| 1045 | printk(KERN_NOTICE | ||
| 1046 | "%s: Got unrecognized reply: " | ||
| 1047 | "calldir 0x%x xpt_bc_xprt %p xid %08x\n", | ||
| 1048 | __func__, ntohl(calldir), | ||
| 1049 | bc_xprt, ntohl(xid)); | ||
| 1050 | unlock_eagain: | ||
| 1051 | spin_unlock_bh(&bc_xprt->transport_lock); | ||
| 1052 | return -EAGAIN; | ||
| 1048 | } | 1053 | } |
| 1049 | 1054 | ||
| 1050 | static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) | 1055 | static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) |
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c index c99c58e2ee66..887f0183b4c6 100644 --- a/net/sunrpc/sysctl.c +++ b/net/sunrpc/sysctl.c | |||
| @@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(nfsd_debug); | |||
| 37 | unsigned int nlm_debug; | 37 | unsigned int nlm_debug; |
| 38 | EXPORT_SYMBOL_GPL(nlm_debug); | 38 | EXPORT_SYMBOL_GPL(nlm_debug); |
| 39 | 39 | ||
| 40 | #ifdef RPC_DEBUG | 40 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 41 | 41 | ||
| 42 | static struct ctl_table_header *sunrpc_table_header; | 42 | static struct ctl_table_header *sunrpc_table_header; |
| 43 | static struct ctl_table sunrpc_table[]; | 43 | static struct ctl_table sunrpc_table[]; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 56e4e150e80e..ebbefad21a37 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -49,13 +49,15 @@ | |||
| 49 | #include <linux/sunrpc/metrics.h> | 49 | #include <linux/sunrpc/metrics.h> |
| 50 | #include <linux/sunrpc/bc_xprt.h> | 50 | #include <linux/sunrpc/bc_xprt.h> |
| 51 | 51 | ||
| 52 | #include <trace/events/sunrpc.h> | ||
| 53 | |||
| 52 | #include "sunrpc.h" | 54 | #include "sunrpc.h" |
| 53 | 55 | ||
| 54 | /* | 56 | /* |
| 55 | * Local variables | 57 | * Local variables |
| 56 | */ | 58 | */ |
| 57 | 59 | ||
| 58 | #ifdef RPC_DEBUG | 60 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 59 | # define RPCDBG_FACILITY RPCDBG_XPRT | 61 | # define RPCDBG_FACILITY RPCDBG_XPRT |
| 60 | #endif | 62 | #endif |
| 61 | 63 | ||
| @@ -772,11 +774,14 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) | |||
| 772 | struct rpc_rqst *entry; | 774 | struct rpc_rqst *entry; |
| 773 | 775 | ||
| 774 | list_for_each_entry(entry, &xprt->recv, rq_list) | 776 | list_for_each_entry(entry, &xprt->recv, rq_list) |
| 775 | if (entry->rq_xid == xid) | 777 | if (entry->rq_xid == xid) { |
| 778 | trace_xprt_lookup_rqst(xprt, xid, 0); | ||
| 776 | return entry; | 779 | return entry; |
| 780 | } | ||
| 777 | 781 | ||
| 778 | dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", | 782 | dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", |
| 779 | ntohl(xid)); | 783 | ntohl(xid)); |
| 784 | trace_xprt_lookup_rqst(xprt, xid, -ENOENT); | ||
| 780 | xprt->stat.bad_xids++; | 785 | xprt->stat.bad_xids++; |
| 781 | return NULL; | 786 | return NULL; |
| 782 | } | 787 | } |
| @@ -810,6 +815,7 @@ void xprt_complete_rqst(struct rpc_task *task, int copied) | |||
| 810 | 815 | ||
| 811 | dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", | 816 | dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", |
| 812 | task->tk_pid, ntohl(req->rq_xid), copied); | 817 | task->tk_pid, ntohl(req->rq_xid), copied); |
| 818 | trace_xprt_complete_rqst(xprt, req->rq_xid, copied); | ||
| 813 | 819 | ||
| 814 | xprt->stat.recvs++; | 820 | xprt->stat.recvs++; |
| 815 | req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime); | 821 | req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime); |
| @@ -926,6 +932,7 @@ void xprt_transmit(struct rpc_task *task) | |||
| 926 | 932 | ||
| 927 | req->rq_xtime = ktime_get(); | 933 | req->rq_xtime = ktime_get(); |
| 928 | status = xprt->ops->send_request(task); | 934 | status = xprt->ops->send_request(task); |
| 935 | trace_xprt_transmit(xprt, req->rq_xid, status); | ||
| 929 | if (status != 0) { | 936 | if (status != 0) { |
| 930 | task->tk_status = status; | 937 | task->tk_status = status; |
| 931 | return; | 938 | return; |
| @@ -1296,6 +1303,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) | |||
| 1296 | */ | 1303 | */ |
| 1297 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) | 1304 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) |
| 1298 | { | 1305 | { |
| 1306 | int err; | ||
| 1299 | struct rpc_xprt *xprt; | 1307 | struct rpc_xprt *xprt; |
| 1300 | struct xprt_class *t; | 1308 | struct xprt_class *t; |
| 1301 | 1309 | ||
| @@ -1336,6 +1344,12 @@ found: | |||
| 1336 | return ERR_PTR(-ENOMEM); | 1344 | return ERR_PTR(-ENOMEM); |
| 1337 | } | 1345 | } |
| 1338 | 1346 | ||
| 1347 | err = rpc_xprt_debugfs_register(xprt); | ||
| 1348 | if (err) { | ||
| 1349 | xprt_destroy(xprt); | ||
| 1350 | return ERR_PTR(err); | ||
| 1351 | } | ||
| 1352 | |||
| 1339 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 1353 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
| 1340 | xprt->max_reqs); | 1354 | xprt->max_reqs); |
| 1341 | out: | 1355 | out: |
| @@ -1352,6 +1366,7 @@ static void xprt_destroy(struct rpc_xprt *xprt) | |||
| 1352 | dprintk("RPC: destroying transport %p\n", xprt); | 1366 | dprintk("RPC: destroying transport %p\n", xprt); |
| 1353 | del_timer_sync(&xprt->timer); | 1367 | del_timer_sync(&xprt->timer); |
| 1354 | 1368 | ||
| 1369 | rpc_xprt_debugfs_unregister(xprt); | ||
| 1355 | rpc_destroy_wait_queue(&xprt->binding); | 1370 | rpc_destroy_wait_queue(&xprt->binding); |
| 1356 | rpc_destroy_wait_queue(&xprt->pending); | 1371 | rpc_destroy_wait_queue(&xprt->pending); |
| 1357 | rpc_destroy_wait_queue(&xprt->sending); | 1372 | rpc_destroy_wait_queue(&xprt->sending); |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 6166c985fe24..df01d124936c 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
| @@ -49,11 +49,11 @@ | |||
| 49 | 49 | ||
| 50 | #include <linux/highmem.h> | 50 | #include <linux/highmem.h> |
| 51 | 51 | ||
| 52 | #ifdef RPC_DEBUG | 52 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 53 | # define RPCDBG_FACILITY RPCDBG_TRANS | 53 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 54 | #endif | 54 | #endif |
| 55 | 55 | ||
| 56 | #ifdef RPC_DEBUG | 56 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 57 | static const char transfertypes[][12] = { | 57 | static const char transfertypes[][12] = { |
| 58 | "pure inline", /* no chunks */ | 58 | "pure inline", /* no chunks */ |
| 59 | " read chunk", /* some argument via rdma read */ | 59 | " read chunk", /* some argument via rdma read */ |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 6a4615dd0261..bbd6155d3e34 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
| @@ -55,7 +55,7 @@ | |||
| 55 | 55 | ||
| 56 | #include "xprt_rdma.h" | 56 | #include "xprt_rdma.h" |
| 57 | 57 | ||
| 58 | #ifdef RPC_DEBUG | 58 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 59 | # define RPCDBG_FACILITY RPCDBG_TRANS | 59 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 60 | #endif | 60 | #endif |
| 61 | 61 | ||
| @@ -73,9 +73,9 @@ static unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; | |||
| 73 | static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; | 73 | static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; |
| 74 | static unsigned int xprt_rdma_inline_write_padding; | 74 | static unsigned int xprt_rdma_inline_write_padding; |
| 75 | static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; | 75 | static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; |
| 76 | int xprt_rdma_pad_optimize = 0; | 76 | int xprt_rdma_pad_optimize = 1; |
| 77 | 77 | ||
| 78 | #ifdef RPC_DEBUG | 78 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 79 | 79 | ||
| 80 | static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE; | 80 | static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE; |
| 81 | static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE; | 81 | static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE; |
| @@ -599,7 +599,7 @@ xprt_rdma_send_request(struct rpc_task *task) | |||
| 599 | 599 | ||
| 600 | if (req->rl_niovs == 0) | 600 | if (req->rl_niovs == 0) |
| 601 | rc = rpcrdma_marshal_req(rqst); | 601 | rc = rpcrdma_marshal_req(rqst); |
| 602 | else if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR) | 602 | else if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_ALLPHYSICAL) |
| 603 | rc = rpcrdma_marshal_chunks(rqst, 0); | 603 | rc = rpcrdma_marshal_chunks(rqst, 0); |
| 604 | if (rc < 0) | 604 | if (rc < 0) |
| 605 | goto failed_marshal; | 605 | goto failed_marshal; |
| @@ -705,7 +705,7 @@ static void __exit xprt_rdma_cleanup(void) | |||
| 705 | int rc; | 705 | int rc; |
| 706 | 706 | ||
| 707 | dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n"); | 707 | dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n"); |
| 708 | #ifdef RPC_DEBUG | 708 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 709 | if (sunrpc_table_header) { | 709 | if (sunrpc_table_header) { |
| 710 | unregister_sysctl_table(sunrpc_table_header); | 710 | unregister_sysctl_table(sunrpc_table_header); |
| 711 | sunrpc_table_header = NULL; | 711 | sunrpc_table_header = NULL; |
| @@ -736,7 +736,7 @@ static int __init xprt_rdma_init(void) | |||
| 736 | dprintk("\tPadding %d\n\tMemreg %d\n", | 736 | dprintk("\tPadding %d\n\tMemreg %d\n", |
| 737 | xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy); | 737 | xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy); |
| 738 | 738 | ||
| 739 | #ifdef RPC_DEBUG | 739 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 740 | if (!sunrpc_table_header) | 740 | if (!sunrpc_table_header) |
| 741 | sunrpc_table_header = register_sysctl_table(sunrpc_table); | 741 | sunrpc_table_header = register_sysctl_table(sunrpc_table); |
| 742 | #endif | 742 | #endif |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 61c41298b4ea..c98e40643910 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
| @@ -57,11 +57,12 @@ | |||
| 57 | * Globals/Macros | 57 | * Globals/Macros |
| 58 | */ | 58 | */ |
| 59 | 59 | ||
| 60 | #ifdef RPC_DEBUG | 60 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 61 | # define RPCDBG_FACILITY RPCDBG_TRANS | 61 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 62 | #endif | 62 | #endif |
| 63 | 63 | ||
| 64 | static void rpcrdma_reset_frmrs(struct rpcrdma_ia *); | 64 | static void rpcrdma_reset_frmrs(struct rpcrdma_ia *); |
| 65 | static void rpcrdma_reset_fmrs(struct rpcrdma_ia *); | ||
| 65 | 66 | ||
| 66 | /* | 67 | /* |
| 67 | * internal functions | 68 | * internal functions |
| @@ -105,13 +106,51 @@ rpcrdma_run_tasklet(unsigned long data) | |||
| 105 | 106 | ||
| 106 | static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL); | 107 | static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL); |
| 107 | 108 | ||
| 109 | static const char * const async_event[] = { | ||
| 110 | "CQ error", | ||
| 111 | "QP fatal error", | ||
| 112 | "QP request error", | ||
| 113 | "QP access error", | ||
| 114 | "communication established", | ||
| 115 | "send queue drained", | ||
| 116 | "path migration successful", | ||
| 117 | "path mig error", | ||
| 118 | "device fatal error", | ||
| 119 | "port active", | ||
| 120 | "port error", | ||
| 121 | "LID change", | ||
| 122 | "P_key change", | ||
| 123 | "SM change", | ||
| 124 | "SRQ error", | ||
| 125 | "SRQ limit reached", | ||
| 126 | "last WQE reached", | ||
| 127 | "client reregister", | ||
| 128 | "GID change", | ||
| 129 | }; | ||
| 130 | |||
| 131 | #define ASYNC_MSG(status) \ | ||
| 132 | ((status) < ARRAY_SIZE(async_event) ? \ | ||
| 133 | async_event[(status)] : "unknown async error") | ||
| 134 | |||
| 135 | static void | ||
| 136 | rpcrdma_schedule_tasklet(struct list_head *sched_list) | ||
| 137 | { | ||
| 138 | unsigned long flags; | ||
| 139 | |||
| 140 | spin_lock_irqsave(&rpcrdma_tk_lock_g, flags); | ||
| 141 | list_splice_tail(sched_list, &rpcrdma_tasklets_g); | ||
| 142 | spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags); | ||
| 143 | tasklet_schedule(&rpcrdma_tasklet_g); | ||
| 144 | } | ||
| 145 | |||
| 108 | static void | 146 | static void |
| 109 | rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) | 147 | rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) |
| 110 | { | 148 | { |
| 111 | struct rpcrdma_ep *ep = context; | 149 | struct rpcrdma_ep *ep = context; |
| 112 | 150 | ||
| 113 | dprintk("RPC: %s: QP error %X on device %s ep %p\n", | 151 | pr_err("RPC: %s: %s on device %s ep %p\n", |
| 114 | __func__, event->event, event->device->name, context); | 152 | __func__, ASYNC_MSG(event->event), |
| 153 | event->device->name, context); | ||
| 115 | if (ep->rep_connected == 1) { | 154 | if (ep->rep_connected == 1) { |
| 116 | ep->rep_connected = -EIO; | 155 | ep->rep_connected = -EIO; |
| 117 | ep->rep_func(ep); | 156 | ep->rep_func(ep); |
| @@ -124,8 +163,9 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context) | |||
| 124 | { | 163 | { |
| 125 | struct rpcrdma_ep *ep = context; | 164 | struct rpcrdma_ep *ep = context; |
| 126 | 165 | ||
| 127 | dprintk("RPC: %s: CQ error %X on device %s ep %p\n", | 166 | pr_err("RPC: %s: %s on device %s ep %p\n", |
| 128 | __func__, event->event, event->device->name, context); | 167 | __func__, ASYNC_MSG(event->event), |
| 168 | event->device->name, context); | ||
| 129 | if (ep->rep_connected == 1) { | 169 | if (ep->rep_connected == 1) { |
| 130 | ep->rep_connected = -EIO; | 170 | ep->rep_connected = -EIO; |
| 131 | ep->rep_func(ep); | 171 | ep->rep_func(ep); |
| @@ -243,7 +283,6 @@ rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep) | |||
| 243 | struct list_head sched_list; | 283 | struct list_head sched_list; |
| 244 | struct ib_wc *wcs; | 284 | struct ib_wc *wcs; |
| 245 | int budget, count, rc; | 285 | int budget, count, rc; |
| 246 | unsigned long flags; | ||
| 247 | 286 | ||
| 248 | INIT_LIST_HEAD(&sched_list); | 287 | INIT_LIST_HEAD(&sched_list); |
| 249 | budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE; | 288 | budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE; |
| @@ -261,10 +300,7 @@ rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep) | |||
| 261 | rc = 0; | 300 | rc = 0; |
| 262 | 301 | ||
| 263 | out_schedule: | 302 | out_schedule: |
| 264 | spin_lock_irqsave(&rpcrdma_tk_lock_g, flags); | 303 | rpcrdma_schedule_tasklet(&sched_list); |
| 265 | list_splice_tail(&sched_list, &rpcrdma_tasklets_g); | ||
| 266 | spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags); | ||
| 267 | tasklet_schedule(&rpcrdma_tasklet_g); | ||
| 268 | return rc; | 304 | return rc; |
| 269 | } | 305 | } |
| 270 | 306 | ||
| @@ -309,11 +345,18 @@ rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context) | |||
| 309 | static void | 345 | static void |
| 310 | rpcrdma_flush_cqs(struct rpcrdma_ep *ep) | 346 | rpcrdma_flush_cqs(struct rpcrdma_ep *ep) |
| 311 | { | 347 | { |
| 312 | rpcrdma_recvcq_upcall(ep->rep_attr.recv_cq, ep); | 348 | struct ib_wc wc; |
| 313 | rpcrdma_sendcq_upcall(ep->rep_attr.send_cq, ep); | 349 | LIST_HEAD(sched_list); |
| 350 | |||
| 351 | while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0) | ||
| 352 | rpcrdma_recvcq_process_wc(&wc, &sched_list); | ||
| 353 | if (!list_empty(&sched_list)) | ||
| 354 | rpcrdma_schedule_tasklet(&sched_list); | ||
| 355 | while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0) | ||
| 356 | rpcrdma_sendcq_process_wc(&wc); | ||
| 314 | } | 357 | } |
| 315 | 358 | ||
| 316 | #ifdef RPC_DEBUG | 359 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 317 | static const char * const conn[] = { | 360 | static const char * const conn[] = { |
| 318 | "address resolved", | 361 | "address resolved", |
| 319 | "address error", | 362 | "address error", |
| @@ -344,7 +387,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) | |||
| 344 | struct rpcrdma_xprt *xprt = id->context; | 387 | struct rpcrdma_xprt *xprt = id->context; |
| 345 | struct rpcrdma_ia *ia = &xprt->rx_ia; | 388 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
| 346 | struct rpcrdma_ep *ep = &xprt->rx_ep; | 389 | struct rpcrdma_ep *ep = &xprt->rx_ep; |
| 347 | #ifdef RPC_DEBUG | 390 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 348 | struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr; | 391 | struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr; |
| 349 | #endif | 392 | #endif |
| 350 | struct ib_qp_attr attr; | 393 | struct ib_qp_attr attr; |
| @@ -408,7 +451,7 @@ connected: | |||
| 408 | break; | 451 | break; |
| 409 | } | 452 | } |
| 410 | 453 | ||
| 411 | #ifdef RPC_DEBUG | 454 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 412 | if (connstate == 1) { | 455 | if (connstate == 1) { |
| 413 | int ird = attr.max_dest_rd_atomic; | 456 | int ird = attr.max_dest_rd_atomic; |
| 414 | int tird = ep->rep_remote_cma.responder_resources; | 457 | int tird = ep->rep_remote_cma.responder_resources; |
| @@ -733,7 +776,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
| 733 | 776 | ||
| 734 | /* set trigger for requesting send completion */ | 777 | /* set trigger for requesting send completion */ |
| 735 | ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1; | 778 | ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1; |
| 736 | if (ep->rep_cqinit <= 2) | 779 | if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS) |
| 780 | ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS; | ||
| 781 | else if (ep->rep_cqinit <= 2) | ||
| 737 | ep->rep_cqinit = 0; | 782 | ep->rep_cqinit = 0; |
| 738 | INIT_CQCOUNT(ep); | 783 | INIT_CQCOUNT(ep); |
| 739 | ep->rep_ia = ia; | 784 | ep->rep_ia = ia; |
| @@ -866,8 +911,19 @@ retry: | |||
| 866 | rpcrdma_ep_disconnect(ep, ia); | 911 | rpcrdma_ep_disconnect(ep, ia); |
| 867 | rpcrdma_flush_cqs(ep); | 912 | rpcrdma_flush_cqs(ep); |
| 868 | 913 | ||
| 869 | if (ia->ri_memreg_strategy == RPCRDMA_FRMR) | 914 | switch (ia->ri_memreg_strategy) { |
| 915 | case RPCRDMA_FRMR: | ||
| 870 | rpcrdma_reset_frmrs(ia); | 916 | rpcrdma_reset_frmrs(ia); |
| 917 | break; | ||
| 918 | case RPCRDMA_MTHCAFMR: | ||
| 919 | rpcrdma_reset_fmrs(ia); | ||
| 920 | break; | ||
| 921 | case RPCRDMA_ALLPHYSICAL: | ||
| 922 | break; | ||
| 923 | default: | ||
| 924 | rc = -EIO; | ||
| 925 | goto out; | ||
| 926 | } | ||
| 871 | 927 | ||
| 872 | xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); | 928 | xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); |
| 873 | id = rpcrdma_create_id(xprt, ia, | 929 | id = rpcrdma_create_id(xprt, ia, |
| @@ -1287,6 +1343,34 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |||
| 1287 | kfree(buf->rb_pool); | 1343 | kfree(buf->rb_pool); |
| 1288 | } | 1344 | } |
| 1289 | 1345 | ||
| 1346 | /* After a disconnect, unmap all FMRs. | ||
| 1347 | * | ||
| 1348 | * This is invoked only in the transport connect worker in order | ||
| 1349 | * to serialize with rpcrdma_register_fmr_external(). | ||
| 1350 | */ | ||
| 1351 | static void | ||
| 1352 | rpcrdma_reset_fmrs(struct rpcrdma_ia *ia) | ||
| 1353 | { | ||
| 1354 | struct rpcrdma_xprt *r_xprt = | ||
| 1355 | container_of(ia, struct rpcrdma_xprt, rx_ia); | ||
| 1356 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | ||
| 1357 | struct list_head *pos; | ||
| 1358 | struct rpcrdma_mw *r; | ||
| 1359 | LIST_HEAD(l); | ||
| 1360 | int rc; | ||
| 1361 | |||
| 1362 | list_for_each(pos, &buf->rb_all) { | ||
| 1363 | r = list_entry(pos, struct rpcrdma_mw, mw_all); | ||
| 1364 | |||
| 1365 | INIT_LIST_HEAD(&l); | ||
| 1366 | list_add(&r->r.fmr->list, &l); | ||
| 1367 | rc = ib_unmap_fmr(&l); | ||
| 1368 | if (rc) | ||
| 1369 | dprintk("RPC: %s: ib_unmap_fmr failed %i\n", | ||
| 1370 | __func__, rc); | ||
| 1371 | } | ||
| 1372 | } | ||
| 1373 | |||
| 1290 | /* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in | 1374 | /* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in |
| 1291 | * an unusable state. Find FRMRs in this state and dereg / reg | 1375 | * an unusable state. Find FRMRs in this state and dereg / reg |
| 1292 | * each. FRMRs that are VALID and attached to an rpcrdma_req are | 1376 | * each. FRMRs that are VALID and attached to an rpcrdma_req are |
| @@ -1918,10 +2002,10 @@ rpcrdma_register_external(struct rpcrdma_mr_seg *seg, | |||
| 1918 | break; | 2002 | break; |
| 1919 | 2003 | ||
| 1920 | default: | 2004 | default: |
| 1921 | return -1; | 2005 | return -EIO; |
| 1922 | } | 2006 | } |
| 1923 | if (rc) | 2007 | if (rc) |
| 1924 | return -1; | 2008 | return rc; |
| 1925 | 2009 | ||
| 1926 | return nsegs; | 2010 | return nsegs; |
| 1927 | } | 2011 | } |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index ac7fc9a31342..b799041b75bf 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
| @@ -97,6 +97,12 @@ struct rpcrdma_ep { | |||
| 97 | struct ib_wc rep_recv_wcs[RPCRDMA_POLLSIZE]; | 97 | struct ib_wc rep_recv_wcs[RPCRDMA_POLLSIZE]; |
| 98 | }; | 98 | }; |
| 99 | 99 | ||
| 100 | /* | ||
| 101 | * Force a signaled SEND Work Request every so often, | ||
| 102 | * in case the provider needs to do some housekeeping. | ||
| 103 | */ | ||
| 104 | #define RPCRDMA_MAX_UNSIGNALED_SENDS (32) | ||
| 105 | |||
| 100 | #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) | 106 | #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) |
| 101 | #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) | 107 | #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) |
| 102 | 108 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 3b305ab17afe..87ce7e8bb8dc 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -75,7 +75,7 @@ static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; | |||
| 75 | * someone else's file names! | 75 | * someone else's file names! |
| 76 | */ | 76 | */ |
| 77 | 77 | ||
| 78 | #ifdef RPC_DEBUG | 78 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 79 | 79 | ||
| 80 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; | 80 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; |
| 81 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; | 81 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; |
| @@ -186,7 +186,7 @@ static struct ctl_table sunrpc_table[] = { | |||
| 186 | */ | 186 | */ |
| 187 | #define XS_IDLE_DISC_TO (5U * 60 * HZ) | 187 | #define XS_IDLE_DISC_TO (5U * 60 * HZ) |
| 188 | 188 | ||
| 189 | #ifdef RPC_DEBUG | 189 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 190 | # undef RPC_DEBUG_DATA | 190 | # undef RPC_DEBUG_DATA |
| 191 | # define RPCDBG_FACILITY RPCDBG_TRANS | 191 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 192 | #endif | 192 | #endif |
| @@ -216,65 +216,6 @@ static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) | |||
| 216 | } | 216 | } |
| 217 | #endif | 217 | #endif |
| 218 | 218 | ||
| 219 | struct sock_xprt { | ||
| 220 | struct rpc_xprt xprt; | ||
| 221 | |||
| 222 | /* | ||
| 223 | * Network layer | ||
| 224 | */ | ||
| 225 | struct socket * sock; | ||
| 226 | struct sock * inet; | ||
| 227 | |||
| 228 | /* | ||
| 229 | * State of TCP reply receive | ||
| 230 | */ | ||
| 231 | __be32 tcp_fraghdr, | ||
| 232 | tcp_xid, | ||
| 233 | tcp_calldir; | ||
| 234 | |||
| 235 | u32 tcp_offset, | ||
| 236 | tcp_reclen; | ||
| 237 | |||
| 238 | unsigned long tcp_copied, | ||
| 239 | tcp_flags; | ||
| 240 | |||
| 241 | /* | ||
| 242 | * Connection of transports | ||
| 243 | */ | ||
| 244 | struct delayed_work connect_worker; | ||
| 245 | struct sockaddr_storage srcaddr; | ||
| 246 | unsigned short srcport; | ||
| 247 | |||
| 248 | /* | ||
| 249 | * UDP socket buffer size parameters | ||
| 250 | */ | ||
| 251 | size_t rcvsize, | ||
| 252 | sndsize; | ||
| 253 | |||
| 254 | /* | ||
| 255 | * Saved socket callback addresses | ||
| 256 | */ | ||
| 257 | void (*old_data_ready)(struct sock *); | ||
| 258 | void (*old_state_change)(struct sock *); | ||
| 259 | void (*old_write_space)(struct sock *); | ||
| 260 | void (*old_error_report)(struct sock *); | ||
| 261 | }; | ||
| 262 | |||
| 263 | /* | ||
| 264 | * TCP receive state flags | ||
| 265 | */ | ||
| 266 | #define TCP_RCV_LAST_FRAG (1UL << 0) | ||
| 267 | #define TCP_RCV_COPY_FRAGHDR (1UL << 1) | ||
| 268 | #define TCP_RCV_COPY_XID (1UL << 2) | ||
| 269 | #define TCP_RCV_COPY_DATA (1UL << 3) | ||
| 270 | #define TCP_RCV_READ_CALLDIR (1UL << 4) | ||
| 271 | #define TCP_RCV_COPY_CALLDIR (1UL << 5) | ||
| 272 | |||
| 273 | /* | ||
| 274 | * TCP RPC flags | ||
| 275 | */ | ||
| 276 | #define TCP_RPC_REPLY (1UL << 6) | ||
| 277 | |||
| 278 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) | 219 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) |
| 279 | { | 220 | { |
| 280 | return (struct rpc_xprt *) sk->sk_user_data; | 221 | return (struct rpc_xprt *) sk->sk_user_data; |
| @@ -1415,6 +1356,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns | |||
| 1415 | 1356 | ||
| 1416 | dprintk("RPC: xs_tcp_data_recv started\n"); | 1357 | dprintk("RPC: xs_tcp_data_recv started\n"); |
| 1417 | do { | 1358 | do { |
| 1359 | trace_xs_tcp_data_recv(transport); | ||
| 1418 | /* Read in a new fragment marker if necessary */ | 1360 | /* Read in a new fragment marker if necessary */ |
| 1419 | /* Can we ever really expect to get completely empty fragments? */ | 1361 | /* Can we ever really expect to get completely empty fragments? */ |
| 1420 | if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) { | 1362 | if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) { |
| @@ -1439,6 +1381,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns | |||
| 1439 | /* Skip over any trailing bytes on short reads */ | 1381 | /* Skip over any trailing bytes on short reads */ |
| 1440 | xs_tcp_read_discard(transport, &desc); | 1382 | xs_tcp_read_discard(transport, &desc); |
| 1441 | } while (desc.count); | 1383 | } while (desc.count); |
| 1384 | trace_xs_tcp_data_recv(transport); | ||
| 1442 | dprintk("RPC: xs_tcp_data_recv done\n"); | 1385 | dprintk("RPC: xs_tcp_data_recv done\n"); |
| 1443 | return len - desc.count; | 1386 | return len - desc.count; |
| 1444 | } | 1387 | } |
| @@ -1454,12 +1397,15 @@ static void xs_tcp_data_ready(struct sock *sk) | |||
| 1454 | struct rpc_xprt *xprt; | 1397 | struct rpc_xprt *xprt; |
| 1455 | read_descriptor_t rd_desc; | 1398 | read_descriptor_t rd_desc; |
| 1456 | int read; | 1399 | int read; |
| 1400 | unsigned long total = 0; | ||
| 1457 | 1401 | ||
| 1458 | dprintk("RPC: xs_tcp_data_ready...\n"); | 1402 | dprintk("RPC: xs_tcp_data_ready...\n"); |
| 1459 | 1403 | ||
| 1460 | read_lock_bh(&sk->sk_callback_lock); | 1404 | read_lock_bh(&sk->sk_callback_lock); |
| 1461 | if (!(xprt = xprt_from_sock(sk))) | 1405 | if (!(xprt = xprt_from_sock(sk))) { |
| 1406 | read = 0; | ||
| 1462 | goto out; | 1407 | goto out; |
| 1408 | } | ||
| 1463 | /* Any data means we had a useful conversation, so | 1409 | /* Any data means we had a useful conversation, so |
| 1464 | * the we don't need to delay the next reconnect | 1410 | * the we don't need to delay the next reconnect |
| 1465 | */ | 1411 | */ |
| @@ -1471,8 +1417,11 @@ static void xs_tcp_data_ready(struct sock *sk) | |||
| 1471 | do { | 1417 | do { |
| 1472 | rd_desc.count = 65536; | 1418 | rd_desc.count = 65536; |
| 1473 | read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); | 1419 | read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); |
| 1420 | if (read > 0) | ||
| 1421 | total += read; | ||
| 1474 | } while (read > 0); | 1422 | } while (read > 0); |
| 1475 | out: | 1423 | out: |
| 1424 | trace_xs_tcp_data_ready(xprt, read, total); | ||
| 1476 | read_unlock_bh(&sk->sk_callback_lock); | 1425 | read_unlock_bh(&sk->sk_callback_lock); |
| 1477 | } | 1426 | } |
| 1478 | 1427 | ||
| @@ -3042,7 +2991,7 @@ static struct xprt_class xs_bc_tcp_transport = { | |||
| 3042 | */ | 2991 | */ |
| 3043 | int init_socket_xprt(void) | 2992 | int init_socket_xprt(void) |
| 3044 | { | 2993 | { |
| 3045 | #ifdef RPC_DEBUG | 2994 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 3046 | if (!sunrpc_table_header) | 2995 | if (!sunrpc_table_header) |
| 3047 | sunrpc_table_header = register_sysctl_table(sunrpc_table); | 2996 | sunrpc_table_header = register_sysctl_table(sunrpc_table); |
| 3048 | #endif | 2997 | #endif |
| @@ -3061,7 +3010,7 @@ int init_socket_xprt(void) | |||
| 3061 | */ | 3010 | */ |
| 3062 | void cleanup_socket_xprt(void) | 3011 | void cleanup_socket_xprt(void) |
| 3063 | { | 3012 | { |
| 3064 | #ifdef RPC_DEBUG | 3013 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 3065 | if (sunrpc_table_header) { | 3014 | if (sunrpc_table_header) { |
| 3066 | unregister_sysctl_table(sunrpc_table_header); | 3015 | unregister_sysctl_table(sunrpc_table_header); |
| 3067 | sunrpc_table_header = NULL; | 3016 | sunrpc_table_header = NULL; |
