diff options
Diffstat (limited to 'net')
378 files changed, 8016 insertions, 4533 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index c584a0af77d3..50f58f5f1c34 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
12 | return NET_RX_DROP; | 12 | return NET_RX_DROP; |
13 | 13 | ||
14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
15 | goto drop; | 15 | skb->deliver_no_wcard = 1; |
16 | 16 | ||
17 | skb->skb_iif = skb->dev->ifindex; | 17 | skb->skb_iif = skb->dev->ifindex; |
18 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 18 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
@@ -61,7 +61,7 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb) | |||
61 | dev->dev_addr)) | 61 | dev->dev_addr)) |
62 | skb->pkt_type = PACKET_HOST; | 62 | skb->pkt_type = PACKET_HOST; |
63 | break; | 63 | break; |
64 | }; | 64 | } |
65 | return 0; | 65 | return 0; |
66 | } | 66 | } |
67 | 67 | ||
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | |||
84 | struct sk_buff *p; | 84 | struct sk_buff *p; |
85 | 85 | ||
86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
87 | goto drop; | 87 | skb->deliver_no_wcard = 1; |
88 | 88 | ||
89 | skb->skb_iif = skb->dev->ifindex; | 89 | skb->skb_iif = skb->dev->ifindex; |
90 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 90 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index b5249c5fd4d3..529842677817 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -327,7 +327,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, | |||
327 | len = skb->len; | 327 | len = skb->len; |
328 | ret = dev_queue_xmit(skb); | 328 | ret = dev_queue_xmit(skb); |
329 | 329 | ||
330 | if (likely(ret == NET_XMIT_SUCCESS)) { | 330 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { |
331 | txq->tx_packets++; | 331 | txq->tx_packets++; |
332 | txq->tx_bytes += len; | 332 | txq->tx_bytes += len; |
333 | } else | 333 | } else |
@@ -353,7 +353,7 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | |||
353 | len = skb->len; | 353 | len = skb->len; |
354 | ret = dev_queue_xmit(skb); | 354 | ret = dev_queue_xmit(skb); |
355 | 355 | ||
356 | if (likely(ret == NET_XMIT_SUCCESS)) { | 356 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { |
357 | txq->tx_packets++; | 357 | txq->tx_packets++; |
358 | txq->tx_bytes += len; | 358 | txq->tx_bytes += len; |
359 | } else | 359 | } else |
@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev) | |||
708 | netif_carrier_off(dev); | 708 | netif_carrier_off(dev); |
709 | 709 | ||
710 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ | 710 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ |
711 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); | 711 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | |
712 | IFF_MASTER | IFF_SLAVE); | ||
712 | dev->iflink = real_dev->ifindex; | 713 | dev->iflink = real_dev->ifindex; |
713 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | | 714 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | |
714 | (1<<__LINK_STATE_DORMANT))) | | 715 | (1<<__LINK_STATE_DORMANT))) | |
diff --git a/net/9p/client.c b/net/9p/client.c index 0aa79faa9850..37c8da07a80b 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -1321,7 +1321,8 @@ static int p9_client_statsize(struct p9_wstat *wst, int proto_version) | |||
1321 | if (wst->muid) | 1321 | if (wst->muid) |
1322 | ret += strlen(wst->muid); | 1322 | ret += strlen(wst->muid); |
1323 | 1323 | ||
1324 | if (proto_version == p9_proto_2000u) { | 1324 | if ((proto_version == p9_proto_2000u) || |
1325 | (proto_version == p9_proto_2000L)) { | ||
1325 | ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ | 1326 | ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ |
1326 | if (wst->extension) | 1327 | if (wst->extension) |
1327 | ret += strlen(wst->extension); | 1328 | ret += strlen(wst->extension); |
@@ -1364,3 +1365,70 @@ error: | |||
1364 | return err; | 1365 | return err; |
1365 | } | 1366 | } |
1366 | EXPORT_SYMBOL(p9_client_wstat); | 1367 | EXPORT_SYMBOL(p9_client_wstat); |
1368 | |||
1369 | int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb) | ||
1370 | { | ||
1371 | int err; | ||
1372 | struct p9_req_t *req; | ||
1373 | struct p9_client *clnt; | ||
1374 | |||
1375 | err = 0; | ||
1376 | clnt = fid->clnt; | ||
1377 | |||
1378 | P9_DPRINTK(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid); | ||
1379 | |||
1380 | req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid); | ||
1381 | if (IS_ERR(req)) { | ||
1382 | err = PTR_ERR(req); | ||
1383 | goto error; | ||
1384 | } | ||
1385 | |||
1386 | err = p9pdu_readf(req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type, | ||
1387 | &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail, | ||
1388 | &sb->files, &sb->ffree, &sb->fsid, &sb->namelen); | ||
1389 | if (err) { | ||
1390 | p9pdu_dump(1, req->rc); | ||
1391 | p9_free_req(clnt, req); | ||
1392 | goto error; | ||
1393 | } | ||
1394 | |||
1395 | P9_DPRINTK(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld " | ||
1396 | "blocks %llu bfree %llu bavail %llu files %llu ffree %llu " | ||
1397 | "fsid %llu namelen %ld\n", | ||
1398 | fid->fid, (long unsigned int)sb->type, (long int)sb->bsize, | ||
1399 | sb->blocks, sb->bfree, sb->bavail, sb->files, sb->ffree, | ||
1400 | sb->fsid, (long int)sb->namelen); | ||
1401 | |||
1402 | p9_free_req(clnt, req); | ||
1403 | error: | ||
1404 | return err; | ||
1405 | } | ||
1406 | EXPORT_SYMBOL(p9_client_statfs); | ||
1407 | |||
1408 | int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name) | ||
1409 | { | ||
1410 | int err; | ||
1411 | struct p9_req_t *req; | ||
1412 | struct p9_client *clnt; | ||
1413 | |||
1414 | err = 0; | ||
1415 | clnt = fid->clnt; | ||
1416 | |||
1417 | P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n", | ||
1418 | fid->fid, newdirfid->fid, name); | ||
1419 | |||
1420 | req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid, | ||
1421 | newdirfid->fid, name); | ||
1422 | if (IS_ERR(req)) { | ||
1423 | err = PTR_ERR(req); | ||
1424 | goto error; | ||
1425 | } | ||
1426 | |||
1427 | P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid); | ||
1428 | |||
1429 | p9_free_req(clnt, req); | ||
1430 | error: | ||
1431 | return err; | ||
1432 | } | ||
1433 | EXPORT_SYMBOL(p9_client_rename); | ||
1434 | |||
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index e7541d5b0118..149f82160130 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c | |||
@@ -341,7 +341,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
341 | } | 341 | } |
342 | break; | 342 | break; |
343 | case '?': | 343 | case '?': |
344 | if (proto_version != p9_proto_2000u) | 344 | if ((proto_version != p9_proto_2000u) && |
345 | (proto_version != p9_proto_2000L)) | ||
345 | return 0; | 346 | return 0; |
346 | break; | 347 | break; |
347 | default: | 348 | default: |
@@ -393,7 +394,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
393 | const char *sptr = va_arg(ap, const char *); | 394 | const char *sptr = va_arg(ap, const char *); |
394 | int16_t len = 0; | 395 | int16_t len = 0; |
395 | if (sptr) | 396 | if (sptr) |
396 | len = MIN(strlen(sptr), USHORT_MAX); | 397 | len = MIN(strlen(sptr), USHRT_MAX); |
397 | 398 | ||
398 | errcode = p9pdu_writef(pdu, proto_version, | 399 | errcode = p9pdu_writef(pdu, proto_version, |
399 | "w", len); | 400 | "w", len); |
@@ -488,7 +489,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
488 | } | 489 | } |
489 | break; | 490 | break; |
490 | case '?': | 491 | case '?': |
491 | if (proto_version != p9_proto_2000u) | 492 | if ((proto_version != p9_proto_2000u) && |
493 | (proto_version != p9_proto_2000L)) | ||
492 | return 0; | 494 | return 0; |
493 | break; | 495 | break; |
494 | default: | 496 | default: |
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 041101ab4aa5..0ea20c30466c 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c | |||
@@ -308,7 +308,6 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, | |||
308 | req, err, status); | 308 | req, err, status); |
309 | rdma->state = P9_RDMA_FLUSHING; | 309 | rdma->state = P9_RDMA_FLUSHING; |
310 | client->status = Disconnected; | 310 | client->status = Disconnected; |
311 | return; | ||
312 | } | 311 | } |
313 | 312 | ||
314 | static void | 313 | static void |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 7eb78ecc1618..dcfbe99ff81c 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -137,7 +137,7 @@ static void req_done(struct virtqueue *vq) | |||
137 | 137 | ||
138 | P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); | 138 | P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); |
139 | 139 | ||
140 | while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) { | 140 | while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) { |
141 | P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); | 141 | P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); |
142 | P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); | 142 | P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); |
143 | req = p9_tag_lookup(chan->client, rc->tag); | 143 | req = p9_tag_lookup(chan->client, rc->tag); |
@@ -209,13 +209,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) | |||
209 | 209 | ||
210 | req->status = REQ_STATUS_SENT; | 210 | req->status = REQ_STATUS_SENT; |
211 | 211 | ||
212 | if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { | 212 | if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { |
213 | P9_DPRINTK(P9_DEBUG_TRANS, | 213 | P9_DPRINTK(P9_DEBUG_TRANS, |
214 | "9p debug: virtio rpc add_buf returned failure"); | 214 | "9p debug: virtio rpc add_buf returned failure"); |
215 | return -EIO; | 215 | return -EIO; |
216 | } | 216 | } |
217 | 217 | ||
218 | chan->vq->vq_ops->kick(chan->vq); | 218 | virtqueue_kick(chan->vq); |
219 | 219 | ||
220 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); | 220 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); |
221 | return 0; | 221 | return 0; |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index d6c7ceaf13e9..6719af6a59fa 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -446,7 +446,6 @@ error: | |||
446 | net_dev->stats.rx_errors++; | 446 | net_dev->stats.rx_errors++; |
447 | free_skb: | 447 | free_skb: |
448 | dev_kfree_skb(skb); | 448 | dev_kfree_skb(skb); |
449 | return; | ||
450 | } | 449 | } |
451 | 450 | ||
452 | /* | 451 | /* |
diff --git a/net/atm/lec.c b/net/atm/lec.c index feeaf5718472..d98bde1a0ac8 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -161,8 +161,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) | |||
161 | skb_queue_tail(&sk->sk_receive_queue, skb2); | 161 | skb_queue_tail(&sk->sk_receive_queue, skb2); |
162 | sk->sk_data_ready(sk, skb2->len); | 162 | sk->sk_data_ready(sk, skb2->len); |
163 | } | 163 | } |
164 | |||
165 | return; | ||
166 | } | 164 | } |
167 | #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ | 165 | #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ |
168 | 166 | ||
@@ -640,7 +638,6 @@ static void lec_set_multicast_list(struct net_device *dev) | |||
640 | * by default, all multicast frames arrive over the bus. | 638 | * by default, all multicast frames arrive over the bus. |
641 | * eventually support selective multicast service | 639 | * eventually support selective multicast service |
642 | */ | 640 | */ |
643 | return; | ||
644 | } | 641 | } |
645 | 642 | ||
646 | static const struct net_device_ops lec_netdev_ops = { | 643 | static const struct net_device_ops lec_netdev_ops = { |
@@ -1199,8 +1196,6 @@ static void __exit lane_module_cleanup(void) | |||
1199 | dev_lec[i] = NULL; | 1196 | dev_lec[i] = NULL; |
1200 | } | 1197 | } |
1201 | } | 1198 | } |
1202 | |||
1203 | return; | ||
1204 | } | 1199 | } |
1205 | 1200 | ||
1206 | module_init(lane_module_init); | 1201 | module_init(lane_module_init); |
@@ -1334,7 +1329,6 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr, | |||
1334 | priv->lane2_ops->associate_indicator(dev, mac_addr, | 1329 | priv->lane2_ops->associate_indicator(dev, mac_addr, |
1335 | tlvs, sizeoftlvs); | 1330 | tlvs, sizeoftlvs); |
1336 | } | 1331 | } |
1337 | return; | ||
1338 | } | 1332 | } |
1339 | 1333 | ||
1340 | /* | 1334 | /* |
diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 436f2e177657..622b471e14e0 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c | |||
@@ -455,7 +455,6 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr, | |||
455 | if (end_of_tlvs - tlvs != 0) | 455 | if (end_of_tlvs - tlvs != 0) |
456 | pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n", | 456 | pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n", |
457 | dev->name, end_of_tlvs - tlvs); | 457 | dev->name, end_of_tlvs - tlvs); |
458 | return; | ||
459 | } | 458 | } |
460 | 459 | ||
461 | /* | 460 | /* |
@@ -684,8 +683,6 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev) | |||
684 | 683 | ||
685 | if (in_entry == NULL && eg_entry == NULL) | 684 | if (in_entry == NULL && eg_entry == NULL) |
686 | dprintk("(%s) unused vcc closed\n", dev->name); | 685 | dprintk("(%s) unused vcc closed\n", dev->name); |
687 | |||
688 | return; | ||
689 | } | 686 | } |
690 | 687 | ||
691 | static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) | 688 | static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) |
@@ -783,8 +780,6 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) | |||
783 | 780 | ||
784 | memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); | 781 | memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); |
785 | netif_rx(new_skb); | 782 | netif_rx(new_skb); |
786 | |||
787 | return; | ||
788 | } | 783 | } |
789 | 784 | ||
790 | static struct atmdev_ops mpc_ops = { /* only send is required */ | 785 | static struct atmdev_ops mpc_ops = { /* only send is required */ |
@@ -873,8 +868,6 @@ static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc) | |||
873 | mesg.type = SET_MPS_CTRL_ADDR; | 868 | mesg.type = SET_MPS_CTRL_ADDR; |
874 | memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN); | 869 | memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN); |
875 | msg_to_mpoad(&mesg, mpc); | 870 | msg_to_mpoad(&mesg, mpc); |
876 | |||
877 | return; | ||
878 | } | 871 | } |
879 | 872 | ||
880 | static void mpoad_close(struct atm_vcc *vcc) | 873 | static void mpoad_close(struct atm_vcc *vcc) |
@@ -911,8 +904,6 @@ static void mpoad_close(struct atm_vcc *vcc) | |||
911 | pr_info("(%s) going down\n", | 904 | pr_info("(%s) going down\n", |
912 | (mpc->dev) ? mpc->dev->name : "<unknown>"); | 905 | (mpc->dev) ? mpc->dev->name : "<unknown>"); |
913 | module_put(THIS_MODULE); | 906 | module_put(THIS_MODULE); |
914 | |||
915 | return; | ||
916 | } | 907 | } |
917 | 908 | ||
918 | /* | 909 | /* |
@@ -1122,7 +1113,6 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc) | |||
1122 | pr_info("(%s) entry already in resolving state\n", | 1113 | pr_info("(%s) entry already in resolving state\n", |
1123 | (mpc->dev) ? mpc->dev->name : "<unknown>"); | 1114 | (mpc->dev) ? mpc->dev->name : "<unknown>"); |
1124 | mpc->in_ops->put(entry); | 1115 | mpc->in_ops->put(entry); |
1125 | return; | ||
1126 | } | 1116 | } |
1127 | 1117 | ||
1128 | /* | 1118 | /* |
@@ -1166,7 +1156,6 @@ static void check_qos_and_open_shortcut(struct k_message *msg, | |||
1166 | } else | 1156 | } else |
1167 | memset(&msg->qos, 0, sizeof(struct atm_qos)); | 1157 | memset(&msg->qos, 0, sizeof(struct atm_qos)); |
1168 | msg_to_mpoad(msg, client); | 1158 | msg_to_mpoad(msg, client); |
1169 | return; | ||
1170 | } | 1159 | } |
1171 | 1160 | ||
1172 | static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc) | 1161 | static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc) |
@@ -1240,8 +1229,6 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) | |||
1240 | mpc->in_ops->put(entry); | 1229 | mpc->in_ops->put(entry); |
1241 | entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); | 1230 | entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); |
1242 | } while (entry != NULL); | 1231 | } while (entry != NULL); |
1243 | |||
1244 | return; | ||
1245 | } | 1232 | } |
1246 | 1233 | ||
1247 | static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) | 1234 | static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) |
@@ -1260,8 +1247,6 @@ static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) | |||
1260 | write_unlock_irq(&mpc->egress_lock); | 1247 | write_unlock_irq(&mpc->egress_lock); |
1261 | 1248 | ||
1262 | mpc->eg_ops->put(entry); | 1249 | mpc->eg_ops->put(entry); |
1263 | |||
1264 | return; | ||
1265 | } | 1250 | } |
1266 | 1251 | ||
1267 | static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) | 1252 | static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) |
@@ -1295,8 +1280,6 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) | |||
1295 | skb_queue_tail(&sk->sk_receive_queue, skb); | 1280 | skb_queue_tail(&sk->sk_receive_queue, skb); |
1296 | sk->sk_data_ready(sk, skb->len); | 1281 | sk->sk_data_ready(sk, skb->len); |
1297 | dprintk("exiting\n"); | 1282 | dprintk("exiting\n"); |
1298 | |||
1299 | return; | ||
1300 | } | 1283 | } |
1301 | 1284 | ||
1302 | /* | 1285 | /* |
@@ -1325,8 +1308,6 @@ static void mps_death(struct k_message *msg, struct mpoa_client *mpc) | |||
1325 | 1308 | ||
1326 | mpc->in_ops->destroy_cache(mpc); | 1309 | mpc->in_ops->destroy_cache(mpc); |
1327 | mpc->eg_ops->destroy_cache(mpc); | 1310 | mpc->eg_ops->destroy_cache(mpc); |
1328 | |||
1329 | return; | ||
1330 | } | 1311 | } |
1331 | 1312 | ||
1332 | static void MPOA_cache_impos_rcvd(struct k_message *msg, | 1313 | static void MPOA_cache_impos_rcvd(struct k_message *msg, |
@@ -1353,8 +1334,6 @@ static void MPOA_cache_impos_rcvd(struct k_message *msg, | |||
1353 | write_unlock_irq(&mpc->egress_lock); | 1334 | write_unlock_irq(&mpc->egress_lock); |
1354 | 1335 | ||
1355 | mpc->eg_ops->put(entry); | 1336 | mpc->eg_ops->put(entry); |
1356 | |||
1357 | return; | ||
1358 | } | 1337 | } |
1359 | 1338 | ||
1360 | static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, | 1339 | static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, |
@@ -1392,8 +1371,6 @@ static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, | |||
1392 | pr_info("(%s) targetless LE_ARP request failed\n", | 1371 | pr_info("(%s) targetless LE_ARP request failed\n", |
1393 | mpc->dev->name); | 1372 | mpc->dev->name); |
1394 | } | 1373 | } |
1395 | |||
1396 | return; | ||
1397 | } | 1374 | } |
1398 | 1375 | ||
1399 | static void set_mps_mac_addr_rcvd(struct k_message *msg, | 1376 | static void set_mps_mac_addr_rcvd(struct k_message *msg, |
@@ -1409,8 +1386,6 @@ static void set_mps_mac_addr_rcvd(struct k_message *msg, | |||
1409 | return; | 1386 | return; |
1410 | } | 1387 | } |
1411 | client->number_of_mps_macs = 1; | 1388 | client->number_of_mps_macs = 1; |
1412 | |||
1413 | return; | ||
1414 | } | 1389 | } |
1415 | 1390 | ||
1416 | /* | 1391 | /* |
@@ -1436,7 +1411,6 @@ static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action) | |||
1436 | 1411 | ||
1437 | msg->type = action; | 1412 | msg->type = action; |
1438 | msg_to_mpoad(msg, mpc); | 1413 | msg_to_mpoad(msg, mpc); |
1439 | return; | ||
1440 | } | 1414 | } |
1441 | 1415 | ||
1442 | static void mpc_timer_refresh(void) | 1416 | static void mpc_timer_refresh(void) |
@@ -1445,8 +1419,6 @@ static void mpc_timer_refresh(void) | |||
1445 | mpc_timer.data = mpc_timer.expires; | 1419 | mpc_timer.data = mpc_timer.expires; |
1446 | mpc_timer.function = mpc_cache_check; | 1420 | mpc_timer.function = mpc_cache_check; |
1447 | add_timer(&mpc_timer); | 1421 | add_timer(&mpc_timer); |
1448 | |||
1449 | return; | ||
1450 | } | 1422 | } |
1451 | 1423 | ||
1452 | static void mpc_cache_check(unsigned long checking_time) | 1424 | static void mpc_cache_check(unsigned long checking_time) |
@@ -1471,8 +1443,6 @@ static void mpc_cache_check(unsigned long checking_time) | |||
1471 | mpc = mpc->next; | 1443 | mpc = mpc->next; |
1472 | } | 1444 | } |
1473 | mpc_timer_refresh(); | 1445 | mpc_timer_refresh(); |
1474 | |||
1475 | return; | ||
1476 | } | 1446 | } |
1477 | 1447 | ||
1478 | static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, | 1448 | static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, |
@@ -1561,8 +1531,6 @@ static void __exit atm_mpoa_cleanup(void) | |||
1561 | kfree(qos); | 1531 | kfree(qos); |
1562 | qos = nextqos; | 1532 | qos = nextqos; |
1563 | } | 1533 | } |
1564 | |||
1565 | return; | ||
1566 | } | 1534 | } |
1567 | 1535 | ||
1568 | module_init(atm_mpoa_init); | 1536 | module_init(atm_mpoa_init); |
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c index e773d8336918..d1b2d9a03144 100644 --- a/net/atm/mpoa_caches.c +++ b/net/atm/mpoa_caches.c | |||
@@ -182,8 +182,6 @@ static void in_cache_put(in_cache_entry *entry) | |||
182 | memset(entry, 0, sizeof(in_cache_entry)); | 182 | memset(entry, 0, sizeof(in_cache_entry)); |
183 | kfree(entry); | 183 | kfree(entry); |
184 | } | 184 | } |
185 | |||
186 | return; | ||
187 | } | 185 | } |
188 | 186 | ||
189 | /* | 187 | /* |
@@ -221,8 +219,6 @@ static void in_cache_remove_entry(in_cache_entry *entry, | |||
221 | } | 219 | } |
222 | vcc_release_async(vcc, -EPIPE); | 220 | vcc_release_async(vcc, -EPIPE); |
223 | } | 221 | } |
224 | |||
225 | return; | ||
226 | } | 222 | } |
227 | 223 | ||
228 | /* Call this every MPC-p2 seconds... Not exactly correct solution, | 224 | /* Call this every MPC-p2 seconds... Not exactly correct solution, |
@@ -248,8 +244,6 @@ static void clear_count_and_expired(struct mpoa_client *client) | |||
248 | entry = next_entry; | 244 | entry = next_entry; |
249 | } | 245 | } |
250 | write_unlock_bh(&client->ingress_lock); | 246 | write_unlock_bh(&client->ingress_lock); |
251 | |||
252 | return; | ||
253 | } | 247 | } |
254 | 248 | ||
255 | /* Call this every MPC-p4 seconds. */ | 249 | /* Call this every MPC-p4 seconds. */ |
@@ -334,8 +328,6 @@ static void in_destroy_cache(struct mpoa_client *mpc) | |||
334 | while (mpc->in_cache != NULL) | 328 | while (mpc->in_cache != NULL) |
335 | mpc->in_ops->remove_entry(mpc->in_cache, mpc); | 329 | mpc->in_ops->remove_entry(mpc->in_cache, mpc); |
336 | write_unlock_irq(&mpc->ingress_lock); | 330 | write_unlock_irq(&mpc->ingress_lock); |
337 | |||
338 | return; | ||
339 | } | 331 | } |
340 | 332 | ||
341 | static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, | 333 | static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, |
@@ -427,8 +419,6 @@ static void eg_cache_put(eg_cache_entry *entry) | |||
427 | memset(entry, 0, sizeof(eg_cache_entry)); | 419 | memset(entry, 0, sizeof(eg_cache_entry)); |
428 | kfree(entry); | 420 | kfree(entry); |
429 | } | 421 | } |
430 | |||
431 | return; | ||
432 | } | 422 | } |
433 | 423 | ||
434 | /* | 424 | /* |
@@ -463,8 +453,6 @@ static void eg_cache_remove_entry(eg_cache_entry *entry, | |||
463 | } | 453 | } |
464 | vcc_release_async(vcc, -EPIPE); | 454 | vcc_release_async(vcc, -EPIPE); |
465 | } | 455 | } |
466 | |||
467 | return; | ||
468 | } | 456 | } |
469 | 457 | ||
470 | static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, | 458 | static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, |
@@ -509,8 +497,6 @@ static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time) | |||
509 | do_gettimeofday(&(entry->tv)); | 497 | do_gettimeofday(&(entry->tv)); |
510 | entry->entry_state = EGRESS_RESOLVED; | 498 | entry->entry_state = EGRESS_RESOLVED; |
511 | entry->ctrl_info.holding_time = holding_time; | 499 | entry->ctrl_info.holding_time = holding_time; |
512 | |||
513 | return; | ||
514 | } | 500 | } |
515 | 501 | ||
516 | static void clear_expired(struct mpoa_client *client) | 502 | static void clear_expired(struct mpoa_client *client) |
@@ -537,8 +523,6 @@ static void clear_expired(struct mpoa_client *client) | |||
537 | entry = next_entry; | 523 | entry = next_entry; |
538 | } | 524 | } |
539 | write_unlock_irq(&client->egress_lock); | 525 | write_unlock_irq(&client->egress_lock); |
540 | |||
541 | return; | ||
542 | } | 526 | } |
543 | 527 | ||
544 | static void eg_destroy_cache(struct mpoa_client *mpc) | 528 | static void eg_destroy_cache(struct mpoa_client *mpc) |
@@ -547,8 +531,6 @@ static void eg_destroy_cache(struct mpoa_client *mpc) | |||
547 | while (mpc->eg_cache != NULL) | 531 | while (mpc->eg_cache != NULL) |
548 | mpc->eg_ops->remove_entry(mpc->eg_cache, mpc); | 532 | mpc->eg_ops->remove_entry(mpc->eg_cache, mpc); |
549 | write_unlock_irq(&mpc->egress_lock); | 533 | write_unlock_irq(&mpc->egress_lock); |
550 | |||
551 | return; | ||
552 | } | 534 | } |
553 | 535 | ||
554 | 536 | ||
@@ -584,6 +566,4 @@ void atm_mpoa_init_cache(struct mpoa_client *mpc) | |||
584 | { | 566 | { |
585 | mpc->in_ops = &ingress_ops; | 567 | mpc->in_ops = &ingress_ops; |
586 | mpc->eg_ops = &egress_ops; | 568 | mpc->eg_ops = &egress_ops; |
587 | |||
588 | return; | ||
589 | } | 569 | } |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 5e83f8e0877a..2f768de87011 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -1316,8 +1316,6 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) | |||
1316 | } | 1316 | } |
1317 | 1317 | ||
1318 | tasklet_schedule(&hdev->tx_task); | 1318 | tasklet_schedule(&hdev->tx_task); |
1319 | |||
1320 | return; | ||
1321 | } | 1319 | } |
1322 | EXPORT_SYMBOL(hci_send_acl); | 1320 | EXPORT_SYMBOL(hci_send_acl); |
1323 | 1321 | ||
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 673a36886716..1b682a5aa061 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -1322,8 +1322,6 @@ static void l2cap_drop_acked_frames(struct sock *sk) | |||
1322 | 1322 | ||
1323 | if (!l2cap_pi(sk)->unacked_frames) | 1323 | if (!l2cap_pi(sk)->unacked_frames) |
1324 | del_timer(&l2cap_pi(sk)->retrans_timer); | 1324 | del_timer(&l2cap_pi(sk)->retrans_timer); |
1325 | |||
1326 | return; | ||
1327 | } | 1325 | } |
1328 | 1326 | ||
1329 | static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) | 1327 | static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) |
@@ -4667,7 +4665,6 @@ void l2cap_load(void) | |||
4667 | /* Dummy function to trigger automatic L2CAP module loading by | 4665 | /* Dummy function to trigger automatic L2CAP module loading by |
4668 | * other modules that use L2CAP sockets but don't use any other | 4666 | * other modules that use L2CAP sockets but don't use any other |
4669 | * symbols from it. */ | 4667 | * symbols from it. */ |
4670 | return; | ||
4671 | } | 4668 | } |
4672 | EXPORT_SYMBOL(l2cap_load); | 4669 | EXPORT_SYMBOL(l2cap_load); |
4673 | 4670 | ||
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index cab71ea2796d..309b6c261b25 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -1014,8 +1014,6 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) | |||
1014 | rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud, | 1014 | rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud, |
1015 | data_bits, stop_bits, parity, | 1015 | data_bits, stop_bits, parity, |
1016 | RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes); | 1016 | RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes); |
1017 | |||
1018 | return; | ||
1019 | } | 1017 | } |
1020 | 1018 | ||
1021 | static void rfcomm_tty_throttle(struct tty_struct *tty) | 1019 | static void rfcomm_tty_throttle(struct tty_struct *tty) |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 4767928a93d3..d0927d1fdada 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -273,7 +273,6 @@ static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) | |||
273 | 273 | ||
274 | drop: | 274 | drop: |
275 | kfree_skb(skb); | 275 | kfree_skb(skb); |
276 | return; | ||
277 | } | 276 | } |
278 | 277 | ||
279 | /* -------- Socket interface ---------- */ | 278 | /* -------- Socket interface ---------- */ |
diff --git a/net/bridge/br.c b/net/bridge/br.c index e1241c76239a..76357b547752 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c | |||
@@ -38,7 +38,7 @@ static int __init br_init(void) | |||
38 | 38 | ||
39 | err = stp_proto_register(&br_stp_proto); | 39 | err = stp_proto_register(&br_stp_proto); |
40 | if (err < 0) { | 40 | if (err < 0) { |
41 | printk(KERN_ERR "bridge: can't register sap for STP\n"); | 41 | pr_err("bridge: can't register sap for STP\n"); |
42 | return err; | 42 | return err; |
43 | } | 43 | } |
44 | 44 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 074c59690fc5..eedf2c94820e 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/etherdevice.h> | 17 | #include <linux/etherdevice.h> |
18 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/netfilter_bridge.h> | ||
20 | 21 | ||
21 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
22 | #include "br_private.h" | 23 | #include "br_private.h" |
@@ -30,6 +31,13 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
30 | struct net_bridge_mdb_entry *mdst; | 31 | struct net_bridge_mdb_entry *mdst; |
31 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); | 32 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); |
32 | 33 | ||
34 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
35 | if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { | ||
36 | br_nf_pre_routing_finish_bridge_slow(skb); | ||
37 | return NETDEV_TX_OK; | ||
38 | } | ||
39 | #endif | ||
40 | |||
33 | brstats->tx_packets++; | 41 | brstats->tx_packets++; |
34 | brstats->tx_bytes += skb->len; | 42 | brstats->tx_bytes += skb->len; |
35 | 43 | ||
@@ -191,7 +199,7 @@ static int br_set_tx_csum(struct net_device *dev, u32 data) | |||
191 | } | 199 | } |
192 | 200 | ||
193 | #ifdef CONFIG_NET_POLL_CONTROLLER | 201 | #ifdef CONFIG_NET_POLL_CONTROLLER |
194 | bool br_devices_support_netpoll(struct net_bridge *br) | 202 | static bool br_devices_support_netpoll(struct net_bridge *br) |
195 | { | 203 | { |
196 | struct net_bridge_port *p; | 204 | struct net_bridge_port *p; |
197 | bool ret = true; | 205 | bool ret = true; |
@@ -217,9 +225,9 @@ static void br_poll_controller(struct net_device *br_dev) | |||
217 | netpoll_poll_dev(np->real_dev); | 225 | netpoll_poll_dev(np->real_dev); |
218 | } | 226 | } |
219 | 227 | ||
220 | void br_netpoll_cleanup(struct net_device *br_dev) | 228 | void br_netpoll_cleanup(struct net_device *dev) |
221 | { | 229 | { |
222 | struct net_bridge *br = netdev_priv(br_dev); | 230 | struct net_bridge *br = netdev_priv(dev); |
223 | struct net_bridge_port *p, *n; | 231 | struct net_bridge_port *p, *n; |
224 | const struct net_device_ops *ops; | 232 | const struct net_device_ops *ops; |
225 | 233 | ||
@@ -235,10 +243,29 @@ void br_netpoll_cleanup(struct net_device *br_dev) | |||
235 | } | 243 | } |
236 | } | 244 | } |
237 | 245 | ||
238 | #else | 246 | void br_netpoll_disable(struct net_bridge *br, |
247 | struct net_device *dev) | ||
248 | { | ||
249 | if (br_devices_support_netpoll(br)) | ||
250 | br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | ||
251 | if (dev->netdev_ops->ndo_netpoll_cleanup) | ||
252 | dev->netdev_ops->ndo_netpoll_cleanup(dev); | ||
253 | else | ||
254 | dev->npinfo = NULL; | ||
255 | } | ||
239 | 256 | ||
240 | void br_netpoll_cleanup(struct net_device *br_dev) | 257 | void br_netpoll_enable(struct net_bridge *br, |
258 | struct net_device *dev) | ||
241 | { | 259 | { |
260 | if (br_devices_support_netpoll(br)) { | ||
261 | br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | ||
262 | if (br->dev->npinfo) | ||
263 | dev->npinfo = br->dev->npinfo; | ||
264 | } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) { | ||
265 | br->dev->priv_flags |= IFF_DISABLE_NETPOLL; | ||
266 | br_info(br,"new device %s does not support netpoll (disabling)", | ||
267 | dev->name); | ||
268 | } | ||
242 | } | 269 | } |
243 | 270 | ||
244 | #endif | 271 | #endif |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 9101a4e56201..26637439965b 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -353,8 +353,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, | |||
353 | */ | 353 | */ |
354 | if (fdb->is_local) | 354 | if (fdb->is_local) |
355 | return 0; | 355 | return 0; |
356 | 356 | br_warn(br, "adding interface %s with same address " | |
357 | printk(KERN_WARNING "%s adding interface with same address " | ||
358 | "as a received packet\n", | 357 | "as a received packet\n", |
359 | source->dev->name); | 358 | source->dev->name); |
360 | fdb_delete(fdb); | 359 | fdb_delete(fdb); |
@@ -397,9 +396,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, | |||
397 | /* attempt to update an entry for a local interface */ | 396 | /* attempt to update an entry for a local interface */ |
398 | if (unlikely(fdb->is_local)) { | 397 | if (unlikely(fdb->is_local)) { |
399 | if (net_ratelimit()) | 398 | if (net_ratelimit()) |
400 | printk(KERN_WARNING "%s: received packet with " | 399 | br_warn(br, "received packet on %s with " |
401 | "own address as source address\n", | 400 | "own address as source address\n", |
402 | source->dev->name); | 401 | source->dev->name); |
403 | } else { | 402 | } else { |
404 | /* fastpath: update of existing entry */ | 403 | /* fastpath: update of existing entry */ |
405 | fdb->dst = source; | 404 | fdb->dst = source; |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 92ad9feb199d..a98ef1393097 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -45,7 +45,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb) | |||
45 | if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) | 45 | if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) |
46 | kfree_skb(skb); | 46 | kfree_skb(skb); |
47 | else { | 47 | else { |
48 | /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */ | 48 | /* ip_fragment doesn't copy the MAC header */ |
49 | if (nf_bridge_maybe_copy_header(skb)) | 49 | if (nf_bridge_maybe_copy_header(skb)) |
50 | kfree_skb(skb); | 50 | kfree_skb(skb); |
51 | else { | 51 | else { |
@@ -66,7 +66,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb) | |||
66 | 66 | ||
67 | int br_forward_finish(struct sk_buff *skb) | 67 | int br_forward_finish(struct sk_buff *skb) |
68 | { | 68 | { |
69 | return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, | 69 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, |
70 | br_dev_queue_push_xmit); | 70 | br_dev_queue_push_xmit); |
71 | 71 | ||
72 | } | 72 | } |
@@ -84,8 +84,8 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | |||
84 | } | 84 | } |
85 | #endif | 85 | #endif |
86 | skb->dev = to->dev; | 86 | skb->dev = to->dev; |
87 | NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 87 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, |
88 | br_forward_finish); | 88 | br_forward_finish); |
89 | #ifdef CONFIG_NET_POLL_CONTROLLER | 89 | #ifdef CONFIG_NET_POLL_CONTROLLER |
90 | if (skb->dev->npinfo) | 90 | if (skb->dev->npinfo) |
91 | skb->dev->npinfo->netpoll->dev = br->dev; | 91 | skb->dev->npinfo->netpoll->dev = br->dev; |
@@ -105,8 +105,8 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) | |||
105 | skb->dev = to->dev; | 105 | skb->dev = to->dev; |
106 | skb_forward_csum(skb); | 106 | skb_forward_csum(skb); |
107 | 107 | ||
108 | NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, | 108 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, |
109 | br_forward_finish); | 109 | br_forward_finish); |
110 | } | 110 | } |
111 | 111 | ||
112 | /* called with rcu_read_lock */ | 112 | /* called with rcu_read_lock */ |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 537bdd60d9b9..18b245e2c00e 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -133,7 +133,7 @@ static void del_nbp(struct net_bridge_port *p) | |||
133 | struct net_bridge *br = p->br; | 133 | struct net_bridge *br = p->br; |
134 | struct net_device *dev = p->dev; | 134 | struct net_device *dev = p->dev; |
135 | 135 | ||
136 | sysfs_remove_link(br->ifobj, dev->name); | 136 | sysfs_remove_link(br->ifobj, p->dev->name); |
137 | 137 | ||
138 | dev_set_promiscuity(dev, -1); | 138 | dev_set_promiscuity(dev, -1); |
139 | 139 | ||
@@ -154,14 +154,7 @@ static void del_nbp(struct net_bridge_port *p) | |||
154 | kobject_uevent(&p->kobj, KOBJ_REMOVE); | 154 | kobject_uevent(&p->kobj, KOBJ_REMOVE); |
155 | kobject_del(&p->kobj); | 155 | kobject_del(&p->kobj); |
156 | 156 | ||
157 | #ifdef CONFIG_NET_POLL_CONTROLLER | 157 | br_netpoll_disable(br, dev); |
158 | if (br_devices_support_netpoll(br)) | ||
159 | br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | ||
160 | if (dev->netdev_ops->ndo_netpoll_cleanup) | ||
161 | dev->netdev_ops->ndo_netpoll_cleanup(dev); | ||
162 | else | ||
163 | dev->npinfo = NULL; | ||
164 | #endif | ||
165 | call_rcu(&p->rcu, destroy_nbp_rcu); | 158 | call_rcu(&p->rcu, destroy_nbp_rcu); |
166 | } | 159 | } |
167 | 160 | ||
@@ -455,19 +448,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
455 | 448 | ||
456 | kobject_uevent(&p->kobj, KOBJ_ADD); | 449 | kobject_uevent(&p->kobj, KOBJ_ADD); |
457 | 450 | ||
458 | #ifdef CONFIG_NET_POLL_CONTROLLER | 451 | br_netpoll_enable(br, dev); |
459 | if (br_devices_support_netpoll(br)) { | ||
460 | br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | ||
461 | if (br->dev->npinfo) | ||
462 | dev->npinfo = br->dev->npinfo; | ||
463 | } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) { | ||
464 | br->dev->priv_flags |= IFF_DISABLE_NETPOLL; | ||
465 | printk(KERN_INFO "New device %s does not support netpoll\n", | ||
466 | dev->name); | ||
467 | printk(KERN_INFO "Disabling netpoll for %s\n", | ||
468 | br->dev->name); | ||
469 | } | ||
470 | #endif | ||
471 | 452 | ||
472 | return 0; | 453 | return 0; |
473 | err2: | 454 | err2: |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index e7f4c1d02f57..d36e700f7a26 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -33,7 +33,7 @@ static int br_pass_frame_up(struct sk_buff *skb) | |||
33 | indev = skb->dev; | 33 | indev = skb->dev; |
34 | skb->dev = brdev; | 34 | skb->dev = brdev; |
35 | 35 | ||
36 | return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, | 36 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, |
37 | netif_receive_skb); | 37 | netif_receive_skb); |
38 | } | 38 | } |
39 | 39 | ||
@@ -156,7 +156,7 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) | |||
156 | if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0) | 156 | if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0) |
157 | goto forward; | 157 | goto forward; |
158 | 158 | ||
159 | if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, | 159 | if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, |
160 | NULL, br_handle_local_finish)) | 160 | NULL, br_handle_local_finish)) |
161 | return NULL; /* frame consumed by filter */ | 161 | return NULL; /* frame consumed by filter */ |
162 | else | 162 | else |
@@ -177,7 +177,7 @@ forward: | |||
177 | if (!compare_ether_addr(p->br->dev->dev_addr, dest)) | 177 | if (!compare_ether_addr(p->br->dev->dev_addr, dest)) |
178 | skb->pkt_type = PACKET_HOST; | 178 | skb->pkt_type = PACKET_HOST; |
179 | 179 | ||
180 | NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, | 180 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, |
181 | br_handle_frame_finish); | 181 | br_handle_frame_finish); |
182 | break; | 182 | break; |
183 | default: | 183 | default: |
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 995afc4b04dc..cb43312b846e 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c | |||
@@ -412,6 +412,6 @@ int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
412 | 412 | ||
413 | } | 413 | } |
414 | 414 | ||
415 | pr_debug("Bridge does not support ioctl 0x%x\n", cmd); | 415 | br_debug(br, "Bridge does not support ioctl 0x%x\n", cmd); |
416 | return -EOPNOTSUPP; | 416 | return -EOPNOTSUPP; |
417 | } | 417 | } |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 7128abdce45f..9d21d98ae5fa 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -585,10 +585,9 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( | |||
585 | 585 | ||
586 | if (unlikely(count > br->hash_elasticity && count)) { | 586 | if (unlikely(count > br->hash_elasticity && count)) { |
587 | if (net_ratelimit()) | 587 | if (net_ratelimit()) |
588 | printk(KERN_INFO "%s: Multicast hash table " | 588 | br_info(br, "Multicast hash table " |
589 | "chain limit reached: %s\n", | 589 | "chain limit reached: %s\n", |
590 | br->dev->name, port ? port->dev->name : | 590 | port ? port->dev->name : br->dev->name); |
591 | br->dev->name); | ||
592 | 591 | ||
593 | elasticity = br->hash_elasticity; | 592 | elasticity = br->hash_elasticity; |
594 | } | 593 | } |
@@ -596,11 +595,9 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( | |||
596 | if (mdb->size >= max) { | 595 | if (mdb->size >= max) { |
597 | max *= 2; | 596 | max *= 2; |
598 | if (unlikely(max >= br->hash_max)) { | 597 | if (unlikely(max >= br->hash_max)) { |
599 | printk(KERN_WARNING "%s: Multicast hash table maximum " | 598 | br_warn(br, "Multicast hash table maximum " |
600 | "reached, disabling snooping: %s, %d\n", | 599 | "reached, disabling snooping: %s, %d\n", |
601 | br->dev->name, port ? port->dev->name : | 600 | port ? port->dev->name : br->dev->name, max); |
602 | br->dev->name, | ||
603 | max); | ||
604 | err = -E2BIG; | 601 | err = -E2BIG; |
605 | disable: | 602 | disable: |
606 | br->multicast_disabled = 1; | 603 | br->multicast_disabled = 1; |
@@ -611,22 +608,19 @@ disable: | |||
611 | if (max > mdb->max || elasticity) { | 608 | if (max > mdb->max || elasticity) { |
612 | if (mdb->old) { | 609 | if (mdb->old) { |
613 | if (net_ratelimit()) | 610 | if (net_ratelimit()) |
614 | printk(KERN_INFO "%s: Multicast hash table " | 611 | br_info(br, "Multicast hash table " |
615 | "on fire: %s\n", | 612 | "on fire: %s\n", |
616 | br->dev->name, port ? port->dev->name : | 613 | port ? port->dev->name : br->dev->name); |
617 | br->dev->name); | ||
618 | err = -EEXIST; | 614 | err = -EEXIST; |
619 | goto err; | 615 | goto err; |
620 | } | 616 | } |
621 | 617 | ||
622 | err = br_mdb_rehash(&br->mdb, max, elasticity); | 618 | err = br_mdb_rehash(&br->mdb, max, elasticity); |
623 | if (err) { | 619 | if (err) { |
624 | printk(KERN_WARNING "%s: Cannot rehash multicast " | 620 | br_warn(br, "Cannot rehash multicast " |
625 | "hash table, disabling snooping: " | 621 | "hash table, disabling snooping: %s, %d, %d\n", |
626 | "%s, %d, %d\n", | 622 | port ? port->dev->name : br->dev->name, |
627 | br->dev->name, port ? port->dev->name : | 623 | mdb->size, err); |
628 | br->dev->name, | ||
629 | mdb->size, err); | ||
630 | goto disable; | 624 | goto disable; |
631 | } | 625 | } |
632 | 626 | ||
@@ -814,7 +808,7 @@ static void __br_multicast_send_query(struct net_bridge *br, | |||
814 | if (port) { | 808 | if (port) { |
815 | __skb_push(skb, sizeof(struct ethhdr)); | 809 | __skb_push(skb, sizeof(struct ethhdr)); |
816 | skb->dev = port->dev; | 810 | skb->dev = port->dev; |
817 | NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 811 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, |
818 | dev_queue_xmit); | 812 | dev_queue_xmit); |
819 | } else | 813 | } else |
820 | netif_rx(skb); | 814 | netif_rx(skb); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 4c4977d12fd6..44420992f72f 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -3,15 +3,8 @@ | |||
3 | * Linux ethernet bridge | 3 | * Linux ethernet bridge |
4 | * | 4 | * |
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * Bart De Schuymer (maintainer) <bdschuym@pandora.be> | 7 | * Bart De Schuymer <bdschuym@pandora.be> |
8 | * | ||
9 | * Changes: | ||
10 | * Apr 29 2003: physdev module support (bdschuym) | ||
11 | * Jun 19 2003: let arptables see bridged ARP traffic (bdschuym) | ||
12 | * Oct 06 2003: filter encapsulated IP/ARP VLAN traffic on untagged bridge | ||
13 | * (bdschuym) | ||
14 | * Sep 01 2004: add IPv6 filtering (bdschuym) | ||
15 | * | 8 | * |
16 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
17 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -204,15 +197,24 @@ static inline void nf_bridge_save_header(struct sk_buff *skb) | |||
204 | skb->nf_bridge->data, header_size); | 197 | skb->nf_bridge->data, header_size); |
205 | } | 198 | } |
206 | 199 | ||
207 | /* | 200 | static inline void nf_bridge_update_protocol(struct sk_buff *skb) |
208 | * When forwarding bridge frames, we save a copy of the original | 201 | { |
209 | * header before processing. | 202 | if (skb->nf_bridge->mask & BRNF_8021Q) |
203 | skb->protocol = htons(ETH_P_8021Q); | ||
204 | else if (skb->nf_bridge->mask & BRNF_PPPoE) | ||
205 | skb->protocol = htons(ETH_P_PPP_SES); | ||
206 | } | ||
207 | |||
208 | /* Fill in the header for fragmented IP packets handled by | ||
209 | * the IPv4 connection tracking code. | ||
210 | */ | 210 | */ |
211 | int nf_bridge_copy_header(struct sk_buff *skb) | 211 | int nf_bridge_copy_header(struct sk_buff *skb) |
212 | { | 212 | { |
213 | int err; | 213 | int err; |
214 | int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); | 214 | unsigned int header_size; |
215 | 215 | ||
216 | nf_bridge_update_protocol(skb); | ||
217 | header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); | ||
216 | err = skb_cow_head(skb, header_size); | 218 | err = skb_cow_head(skb, header_size); |
217 | if (err) | 219 | if (err) |
218 | return err; | 220 | return err; |
@@ -246,27 +248,48 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) | |||
246 | skb_dst_set(skb, &rt->u.dst); | 248 | skb_dst_set(skb, &rt->u.dst); |
247 | 249 | ||
248 | skb->dev = nf_bridge->physindev; | 250 | skb->dev = nf_bridge->physindev; |
251 | nf_bridge_update_protocol(skb); | ||
249 | nf_bridge_push_encap_header(skb); | 252 | nf_bridge_push_encap_header(skb); |
250 | NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, | 253 | NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, |
251 | br_handle_frame_finish, 1); | 254 | br_handle_frame_finish, 1); |
252 | 255 | ||
253 | return 0; | 256 | return 0; |
254 | } | 257 | } |
255 | 258 | ||
256 | static void __br_dnat_complain(void) | 259 | /* Obtain the correct destination MAC address, while preserving the original |
260 | * source MAC address. If we already know this address, we just copy it. If we | ||
261 | * don't, we use the neighbour framework to find out. In both cases, we make | ||
262 | * sure that br_handle_frame_finish() is called afterwards. | ||
263 | */ | ||
264 | static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) | ||
257 | { | 265 | { |
258 | static unsigned long last_complaint; | 266 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; |
267 | struct dst_entry *dst; | ||
259 | 268 | ||
260 | if (jiffies - last_complaint >= 5 * HZ) { | 269 | skb->dev = bridge_parent(skb->dev); |
261 | printk(KERN_WARNING "Performing cross-bridge DNAT requires IP " | 270 | if (!skb->dev) |
262 | "forwarding to be enabled\n"); | 271 | goto free_skb; |
263 | last_complaint = jiffies; | 272 | dst = skb_dst(skb); |
273 | if (dst->hh) { | ||
274 | neigh_hh_bridge(dst->hh, skb); | ||
275 | skb->dev = nf_bridge->physindev; | ||
276 | return br_handle_frame_finish(skb); | ||
277 | } else if (dst->neighbour) { | ||
278 | /* the neighbour function below overwrites the complete | ||
279 | * MAC header, so we save the Ethernet source address and | ||
280 | * protocol number. */ | ||
281 | skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); | ||
282 | /* tell br_dev_xmit to continue with forwarding */ | ||
283 | nf_bridge->mask |= BRNF_BRIDGED_DNAT; | ||
284 | return dst->neighbour->output(skb); | ||
264 | } | 285 | } |
286 | free_skb: | ||
287 | kfree_skb(skb); | ||
288 | return 0; | ||
265 | } | 289 | } |
266 | 290 | ||
267 | /* This requires some explaining. If DNAT has taken place, | 291 | /* This requires some explaining. If DNAT has taken place, |
268 | * we will need to fix up the destination Ethernet address, | 292 | * we will need to fix up the destination Ethernet address. |
269 | * and this is a tricky process. | ||
270 | * | 293 | * |
271 | * There are two cases to consider: | 294 | * There are two cases to consider: |
272 | * 1. The packet was DNAT'ed to a device in the same bridge | 295 | * 1. The packet was DNAT'ed to a device in the same bridge |
@@ -280,62 +303,29 @@ static void __br_dnat_complain(void) | |||
280 | * call ip_route_input() and to look at skb->dst->dev, which is | 303 | * call ip_route_input() and to look at skb->dst->dev, which is |
281 | * changed to the destination device if ip_route_input() succeeds. | 304 | * changed to the destination device if ip_route_input() succeeds. |
282 | * | 305 | * |
283 | * Let us first consider the case that ip_route_input() succeeds: | 306 | * Let's first consider the case that ip_route_input() succeeds: |
284 | * | ||
285 | * If skb->dst->dev equals the logical bridge device the packet | ||
286 | * came in on, we can consider this bridging. The packet is passed | ||
287 | * through the neighbour output function to build a new destination | ||
288 | * MAC address, which will make the packet enter br_nf_local_out() | ||
289 | * not much later. In that function it is assured that the iptables | ||
290 | * FORWARD chain is traversed for the packet. | ||
291 | * | 307 | * |
308 | * If the output device equals the logical bridge device the packet | ||
309 | * came in on, we can consider this bridging. The corresponding MAC | ||
310 | * address will be obtained in br_nf_pre_routing_finish_bridge. | ||
292 | * Otherwise, the packet is considered to be routed and we just | 311 | * Otherwise, the packet is considered to be routed and we just |
293 | * change the destination MAC address so that the packet will | 312 | * change the destination MAC address so that the packet will |
294 | * later be passed up to the IP stack to be routed. For a redirected | 313 | * later be passed up to the IP stack to be routed. For a redirected |
295 | * packet, ip_route_input() will give back the localhost as output device, | 314 | * packet, ip_route_input() will give back the localhost as output device, |
296 | * which differs from the bridge device. | 315 | * which differs from the bridge device. |
297 | * | 316 | * |
298 | * Let us now consider the case that ip_route_input() fails: | 317 | * Let's now consider the case that ip_route_input() fails: |
299 | * | 318 | * |
300 | * This can be because the destination address is martian, in which case | 319 | * This can be because the destination address is martian, in which case |
301 | * the packet will be dropped. | 320 | * the packet will be dropped. |
302 | * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input() | 321 | * If IP forwarding is disabled, ip_route_input() will fail, while |
303 | * will fail, while __ip_route_output_key() will return success. The source | 322 | * ip_route_output_key() can return success. The source |
304 | * address for __ip_route_output_key() is set to zero, so __ip_route_output_key | 323 | * address for ip_route_output_key() is set to zero, so ip_route_output_key() |
305 | * thinks we're handling a locally generated packet and won't care | 324 | * thinks we're handling a locally generated packet and won't care |
306 | * if IP forwarding is allowed. We send a warning message to the users's | 325 | * if IP forwarding is enabled. If the output device equals the logical bridge |
307 | * log telling her to put IP forwarding on. | 326 | * device, we proceed as if ip_route_input() succeeded. If it differs from the |
308 | * | 327 | * logical bridge port or if ip_route_output_key() fails we drop the packet. |
309 | * ip_route_input() will also fail if there is no route available. | 328 | */ |
310 | * In that case we just drop the packet. | ||
311 | * | ||
312 | * --Lennert, 20020411 | ||
313 | * --Bart, 20020416 (updated) | ||
314 | * --Bart, 20021007 (updated) | ||
315 | * --Bart, 20062711 (updated) */ | ||
316 | static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) | ||
317 | { | ||
318 | if (skb->pkt_type == PACKET_OTHERHOST) { | ||
319 | skb->pkt_type = PACKET_HOST; | ||
320 | skb->nf_bridge->mask |= BRNF_PKT_TYPE; | ||
321 | } | ||
322 | skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; | ||
323 | |||
324 | skb->dev = bridge_parent(skb->dev); | ||
325 | if (skb->dev) { | ||
326 | struct dst_entry *dst = skb_dst(skb); | ||
327 | |||
328 | nf_bridge_pull_encap_header(skb); | ||
329 | |||
330 | if (dst->hh) | ||
331 | return neigh_hh_output(dst->hh, skb); | ||
332 | else if (dst->neighbour) | ||
333 | return dst->neighbour->output(skb); | ||
334 | } | ||
335 | kfree_skb(skb); | ||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static int br_nf_pre_routing_finish(struct sk_buff *skb) | 329 | static int br_nf_pre_routing_finish(struct sk_buff *skb) |
340 | { | 330 | { |
341 | struct net_device *dev = skb->dev; | 331 | struct net_device *dev = skb->dev; |
@@ -379,11 +369,6 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) | |||
379 | skb_dst_set(skb, (struct dst_entry *)rt); | 369 | skb_dst_set(skb, (struct dst_entry *)rt); |
380 | goto bridged_dnat; | 370 | goto bridged_dnat; |
381 | } | 371 | } |
382 | /* we are sure that forwarding is disabled, so printing | ||
383 | * this message is no problem. Note that the packet could | ||
384 | * still have a martian destination address, in which case | ||
385 | * the packet could be dropped even if forwarding were enabled */ | ||
386 | __br_dnat_complain(); | ||
387 | dst_release((struct dst_entry *)rt); | 372 | dst_release((struct dst_entry *)rt); |
388 | } | 373 | } |
389 | free_skb: | 374 | free_skb: |
@@ -392,12 +377,11 @@ free_skb: | |||
392 | } else { | 377 | } else { |
393 | if (skb_dst(skb)->dev == dev) { | 378 | if (skb_dst(skb)->dev == dev) { |
394 | bridged_dnat: | 379 | bridged_dnat: |
395 | /* Tell br_nf_local_out this is a | ||
396 | * bridged frame */ | ||
397 | nf_bridge->mask |= BRNF_BRIDGED_DNAT; | ||
398 | skb->dev = nf_bridge->physindev; | 380 | skb->dev = nf_bridge->physindev; |
381 | nf_bridge_update_protocol(skb); | ||
399 | nf_bridge_push_encap_header(skb); | 382 | nf_bridge_push_encap_header(skb); |
400 | NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, | 383 | NF_HOOK_THRESH(NFPROTO_BRIDGE, |
384 | NF_BR_PRE_ROUTING, | ||
401 | skb, skb->dev, NULL, | 385 | skb, skb->dev, NULL, |
402 | br_nf_pre_routing_finish_bridge, | 386 | br_nf_pre_routing_finish_bridge, |
403 | 1); | 387 | 1); |
@@ -417,8 +401,9 @@ bridged_dnat: | |||
417 | } | 401 | } |
418 | 402 | ||
419 | skb->dev = nf_bridge->physindev; | 403 | skb->dev = nf_bridge->physindev; |
404 | nf_bridge_update_protocol(skb); | ||
420 | nf_bridge_push_encap_header(skb); | 405 | nf_bridge_push_encap_header(skb); |
421 | NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, | 406 | NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, |
422 | br_handle_frame_finish, 1); | 407 | br_handle_frame_finish, 1); |
423 | 408 | ||
424 | return 0; | 409 | return 0; |
@@ -437,6 +422,10 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb) | |||
437 | nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; | 422 | nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; |
438 | nf_bridge->physindev = skb->dev; | 423 | nf_bridge->physindev = skb->dev; |
439 | skb->dev = bridge_parent(skb->dev); | 424 | skb->dev = bridge_parent(skb->dev); |
425 | if (skb->protocol == htons(ETH_P_8021Q)) | ||
426 | nf_bridge->mask |= BRNF_8021Q; | ||
427 | else if (skb->protocol == htons(ETH_P_PPP_SES)) | ||
428 | nf_bridge->mask |= BRNF_PPPoE; | ||
440 | 429 | ||
441 | return skb->dev; | 430 | return skb->dev; |
442 | } | 431 | } |
@@ -535,7 +524,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook, | |||
535 | if (!setup_pre_routing(skb)) | 524 | if (!setup_pre_routing(skb)) |
536 | return NF_DROP; | 525 | return NF_DROP; |
537 | 526 | ||
538 | NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, | 527 | skb->protocol = htons(ETH_P_IPV6); |
528 | NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, | ||
539 | br_nf_pre_routing_finish_ipv6); | 529 | br_nf_pre_routing_finish_ipv6); |
540 | 530 | ||
541 | return NF_STOLEN; | 531 | return NF_STOLEN; |
@@ -607,8 +597,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb, | |||
607 | if (!setup_pre_routing(skb)) | 597 | if (!setup_pre_routing(skb)) |
608 | return NF_DROP; | 598 | return NF_DROP; |
609 | store_orig_dstaddr(skb); | 599 | store_orig_dstaddr(skb); |
600 | skb->protocol = htons(ETH_P_IP); | ||
610 | 601 | ||
611 | NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, | 602 | NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, |
612 | br_nf_pre_routing_finish); | 603 | br_nf_pre_routing_finish); |
613 | 604 | ||
614 | return NF_STOLEN; | 605 | return NF_STOLEN; |
@@ -652,11 +643,13 @@ static int br_nf_forward_finish(struct sk_buff *skb) | |||
652 | skb->pkt_type = PACKET_OTHERHOST; | 643 | skb->pkt_type = PACKET_OTHERHOST; |
653 | nf_bridge->mask ^= BRNF_PKT_TYPE; | 644 | nf_bridge->mask ^= BRNF_PKT_TYPE; |
654 | } | 645 | } |
646 | nf_bridge_update_protocol(skb); | ||
655 | } else { | 647 | } else { |
656 | in = *((struct net_device **)(skb->cb)); | 648 | in = *((struct net_device **)(skb->cb)); |
657 | } | 649 | } |
658 | nf_bridge_push_encap_header(skb); | 650 | nf_bridge_push_encap_header(skb); |
659 | NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in, | 651 | |
652 | NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in, | ||
660 | skb->dev, br_forward_finish, 1); | 653 | skb->dev, br_forward_finish, 1); |
661 | return 0; | 654 | return 0; |
662 | } | 655 | } |
@@ -707,6 +700,10 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, | |||
707 | /* The physdev module checks on this */ | 700 | /* The physdev module checks on this */ |
708 | nf_bridge->mask |= BRNF_BRIDGED; | 701 | nf_bridge->mask |= BRNF_BRIDGED; |
709 | nf_bridge->physoutdev = skb->dev; | 702 | nf_bridge->physoutdev = skb->dev; |
703 | if (pf == PF_INET) | ||
704 | skb->protocol = htons(ETH_P_IP); | ||
705 | else | ||
706 | skb->protocol = htons(ETH_P_IPV6); | ||
710 | 707 | ||
711 | NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent, | 708 | NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent, |
712 | br_nf_forward_finish); | 709 | br_nf_forward_finish); |
@@ -744,60 +741,11 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb, | |||
744 | return NF_STOLEN; | 741 | return NF_STOLEN; |
745 | } | 742 | } |
746 | 743 | ||
747 | /* PF_BRIDGE/LOCAL_OUT *********************************************** | ||
748 | * | ||
749 | * This function sees both locally originated IP packets and forwarded | ||
750 | * IP packets (in both cases the destination device is a bridge | ||
751 | * device). It also sees bridged-and-DNAT'ed packets. | ||
752 | * | ||
753 | * If (nf_bridge->mask & BRNF_BRIDGED_DNAT) then the packet is bridged | ||
754 | * and we fake the PF_BRIDGE/FORWARD hook. The function br_nf_forward() | ||
755 | * will then fake the PF_INET/FORWARD hook. br_nf_local_out() has priority | ||
756 | * NF_BR_PRI_FIRST, so no relevant PF_BRIDGE/INPUT functions have been nor | ||
757 | * will be executed. | ||
758 | */ | ||
759 | static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff *skb, | ||
760 | const struct net_device *in, | ||
761 | const struct net_device *out, | ||
762 | int (*okfn)(struct sk_buff *)) | ||
763 | { | ||
764 | struct net_device *realindev; | ||
765 | struct nf_bridge_info *nf_bridge; | ||
766 | |||
767 | if (!skb->nf_bridge) | ||
768 | return NF_ACCEPT; | ||
769 | |||
770 | /* Need exclusive nf_bridge_info since we might have multiple | ||
771 | * different physoutdevs. */ | ||
772 | if (!nf_bridge_unshare(skb)) | ||
773 | return NF_DROP; | ||
774 | |||
775 | nf_bridge = skb->nf_bridge; | ||
776 | if (!(nf_bridge->mask & BRNF_BRIDGED_DNAT)) | ||
777 | return NF_ACCEPT; | ||
778 | |||
779 | /* Bridged, take PF_BRIDGE/FORWARD. | ||
780 | * (see big note in front of br_nf_pre_routing_finish) */ | ||
781 | nf_bridge->physoutdev = skb->dev; | ||
782 | realindev = nf_bridge->physindev; | ||
783 | |||
784 | if (nf_bridge->mask & BRNF_PKT_TYPE) { | ||
785 | skb->pkt_type = PACKET_OTHERHOST; | ||
786 | nf_bridge->mask ^= BRNF_PKT_TYPE; | ||
787 | } | ||
788 | nf_bridge_push_encap_header(skb); | ||
789 | |||
790 | NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev, | ||
791 | br_forward_finish); | ||
792 | return NF_STOLEN; | ||
793 | } | ||
794 | |||
795 | #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE) | 744 | #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE) |
796 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | 745 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) |
797 | { | 746 | { |
798 | if (skb->nfct != NULL && | 747 | if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && |
799 | (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) && | 748 | skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && |
800 | skb->len > skb->dev->mtu && | ||
801 | !skb_is_gso(skb)) | 749 | !skb_is_gso(skb)) |
802 | return ip_fragment(skb, br_dev_queue_push_xmit); | 750 | return ip_fragment(skb, br_dev_queue_push_xmit); |
803 | else | 751 | else |
@@ -820,21 +768,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, | |||
820 | struct net_device *realoutdev = bridge_parent(skb->dev); | 768 | struct net_device *realoutdev = bridge_parent(skb->dev); |
821 | u_int8_t pf; | 769 | u_int8_t pf; |
822 | 770 | ||
823 | #ifdef CONFIG_NETFILTER_DEBUG | 771 | if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED)) |
824 | /* Be very paranoid. This probably won't happen anymore, but let's | ||
825 | * keep the check just to be sure... */ | ||
826 | if (skb_mac_header(skb) < skb->head || | ||
827 | skb_mac_header(skb) + ETH_HLEN > skb->data) { | ||
828 | printk(KERN_CRIT "br_netfilter: Argh!! br_nf_post_routing: " | ||
829 | "bad mac.raw pointer.\n"); | ||
830 | goto print_error; | ||
831 | } | ||
832 | #endif | ||
833 | |||
834 | if (!nf_bridge) | ||
835 | return NF_ACCEPT; | ||
836 | |||
837 | if (!(nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))) | ||
838 | return NF_ACCEPT; | 772 | return NF_ACCEPT; |
839 | 773 | ||
840 | if (!realoutdev) | 774 | if (!realoutdev) |
@@ -849,13 +783,6 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, | |||
849 | else | 783 | else |
850 | return NF_ACCEPT; | 784 | return NF_ACCEPT; |
851 | 785 | ||
852 | #ifdef CONFIG_NETFILTER_DEBUG | ||
853 | if (skb_dst(skb) == NULL) { | ||
854 | printk(KERN_INFO "br_netfilter post_routing: skb->dst == NULL\n"); | ||
855 | goto print_error; | ||
856 | } | ||
857 | #endif | ||
858 | |||
859 | /* We assume any code from br_dev_queue_push_xmit onwards doesn't care | 786 | /* We assume any code from br_dev_queue_push_xmit onwards doesn't care |
860 | * about the value of skb->pkt_type. */ | 787 | * about the value of skb->pkt_type. */ |
861 | if (skb->pkt_type == PACKET_OTHERHOST) { | 788 | if (skb->pkt_type == PACKET_OTHERHOST) { |
@@ -865,24 +792,15 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, | |||
865 | 792 | ||
866 | nf_bridge_pull_encap_header(skb); | 793 | nf_bridge_pull_encap_header(skb); |
867 | nf_bridge_save_header(skb); | 794 | nf_bridge_save_header(skb); |
795 | if (pf == PF_INET) | ||
796 | skb->protocol = htons(ETH_P_IP); | ||
797 | else | ||
798 | skb->protocol = htons(ETH_P_IPV6); | ||
868 | 799 | ||
869 | NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev, | 800 | NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev, |
870 | br_nf_dev_queue_xmit); | 801 | br_nf_dev_queue_xmit); |
871 | 802 | ||
872 | return NF_STOLEN; | 803 | return NF_STOLEN; |
873 | |||
874 | #ifdef CONFIG_NETFILTER_DEBUG | ||
875 | print_error: | ||
876 | if (skb->dev != NULL) { | ||
877 | printk("[%s]", skb->dev->name); | ||
878 | if (realoutdev) | ||
879 | printk("[%s]", realoutdev->name); | ||
880 | } | ||
881 | printk(" head:%p, raw:%p, data:%p\n", skb->head, skb_mac_header(skb), | ||
882 | skb->data); | ||
883 | dump_stack(); | ||
884 | return NF_ACCEPT; | ||
885 | #endif | ||
886 | } | 804 | } |
887 | 805 | ||
888 | /* IP/SABOTAGE *****************************************************/ | 806 | /* IP/SABOTAGE *****************************************************/ |
@@ -901,10 +819,8 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb, | |||
901 | return NF_ACCEPT; | 819 | return NF_ACCEPT; |
902 | } | 820 | } |
903 | 821 | ||
904 | /* For br_nf_local_out we need (prio = NF_BR_PRI_FIRST), to insure that innocent | 822 | /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because |
905 | * PF_BRIDGE/NF_BR_LOCAL_OUT functions don't get bridged traffic as input. | 823 | * br_dev_queue_push_xmit is called afterwards */ |
906 | * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because | ||
907 | * ip_refrag() can return NF_STOLEN. */ | ||
908 | static struct nf_hook_ops br_nf_ops[] __read_mostly = { | 824 | static struct nf_hook_ops br_nf_ops[] __read_mostly = { |
909 | { | 825 | { |
910 | .hook = br_nf_pre_routing, | 826 | .hook = br_nf_pre_routing, |
@@ -935,13 +851,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = { | |||
935 | .priority = NF_BR_PRI_BRNF, | 851 | .priority = NF_BR_PRI_BRNF, |
936 | }, | 852 | }, |
937 | { | 853 | { |
938 | .hook = br_nf_local_out, | ||
939 | .owner = THIS_MODULE, | ||
940 | .pf = PF_BRIDGE, | ||
941 | .hooknum = NF_BR_LOCAL_OUT, | ||
942 | .priority = NF_BR_PRI_FIRST, | ||
943 | }, | ||
944 | { | ||
945 | .hook = br_nf_post_routing, | 854 | .hook = br_nf_post_routing, |
946 | .owner = THIS_MODULE, | 855 | .owner = THIS_MODULE, |
947 | .pf = PF_BRIDGE, | 856 | .pf = PF_BRIDGE, |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index aa56ac2c8829..fe0a79018ab2 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -42,8 +42,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por | |||
42 | struct nlmsghdr *nlh; | 42 | struct nlmsghdr *nlh; |
43 | u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; | 43 | u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; |
44 | 44 | ||
45 | pr_debug("br_fill_info event %d port %s master %s\n", | 45 | br_debug(br, "br_fill_info event %d port %s master %s\n", |
46 | event, dev->name, br->dev->name); | 46 | event, dev->name, br->dev->name); |
47 | 47 | ||
48 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); | 48 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); |
49 | if (nlh == NULL) | 49 | if (nlh == NULL) |
@@ -87,7 +87,9 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) | |||
87 | struct sk_buff *skb; | 87 | struct sk_buff *skb; |
88 | int err = -ENOBUFS; | 88 | int err = -ENOBUFS; |
89 | 89 | ||
90 | pr_debug("bridge notify event=%d\n", event); | 90 | br_debug(port->br, "port %u(%s) event %d\n", |
91 | (unsigned)port->port_no, port->dev->name, event); | ||
92 | |||
91 | skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); | 93 | skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); |
92 | if (skb == NULL) | 94 | if (skb == NULL) |
93 | goto errout; | 95 | goto errout; |
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 1413b72acc7f..717e1fd6133c 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c | |||
@@ -34,6 +34,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
34 | struct net_device *dev = ptr; | 34 | struct net_device *dev = ptr; |
35 | struct net_bridge_port *p = dev->br_port; | 35 | struct net_bridge_port *p = dev->br_port; |
36 | struct net_bridge *br; | 36 | struct net_bridge *br; |
37 | int err; | ||
37 | 38 | ||
38 | /* not a port of a bridge */ | 39 | /* not a port of a bridge */ |
39 | if (p == NULL) | 40 | if (p == NULL) |
@@ -83,6 +84,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
83 | br_del_if(br, dev); | 84 | br_del_if(br, dev); |
84 | break; | 85 | break; |
85 | 86 | ||
87 | case NETDEV_CHANGENAME: | ||
88 | err = br_sysfs_renameif(p); | ||
89 | if (err) | ||
90 | return notifier_from_errno(err); | ||
91 | break; | ||
92 | |||
86 | case NETDEV_PRE_TYPE_CHANGE: | 93 | case NETDEV_PRE_TYPE_CHANGE: |
87 | /* Forbid underlaying device to change its type. */ | 94 | /* Forbid underlaying device to change its type. */ |
88 | return NOTIFY_BAD; | 95 | return NOTIFY_BAD; |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 3d2d3fe0a97e..0f4a74bc6a9b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -139,6 +139,10 @@ struct net_bridge_port | |||
139 | struct hlist_head mglist; | 139 | struct hlist_head mglist; |
140 | struct hlist_node rlist; | 140 | struct hlist_node rlist; |
141 | #endif | 141 | #endif |
142 | |||
143 | #ifdef CONFIG_SYSFS | ||
144 | char sysfs_name[IFNAMSIZ]; | ||
145 | #endif | ||
142 | }; | 146 | }; |
143 | 147 | ||
144 | struct br_cpu_netstats { | 148 | struct br_cpu_netstats { |
@@ -240,6 +244,21 @@ struct br_input_skb_cb { | |||
240 | # define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0) | 244 | # define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0) |
241 | #endif | 245 | #endif |
242 | 246 | ||
247 | #define br_printk(level, br, format, args...) \ | ||
248 | printk(level "%s: " format, (br)->dev->name, ##args) | ||
249 | |||
250 | #define br_err(__br, format, args...) \ | ||
251 | br_printk(KERN_ERR, __br, format, ##args) | ||
252 | #define br_warn(__br, format, args...) \ | ||
253 | br_printk(KERN_WARNING, __br, format, ##args) | ||
254 | #define br_notice(__br, format, args...) \ | ||
255 | br_printk(KERN_NOTICE, __br, format, ##args) | ||
256 | #define br_info(__br, format, args...) \ | ||
257 | br_printk(KERN_INFO, __br, format, ##args) | ||
258 | |||
259 | #define br_debug(br, format, args...) \ | ||
260 | pr_debug("%s: " format, (br)->dev->name, ##args) | ||
261 | |||
243 | extern struct notifier_block br_device_notifier; | 262 | extern struct notifier_block br_device_notifier; |
244 | extern const u8 br_group_address[ETH_ALEN]; | 263 | extern const u8 br_group_address[ETH_ALEN]; |
245 | 264 | ||
@@ -253,8 +272,18 @@ static inline int br_is_root_bridge(const struct net_bridge *br) | |||
253 | extern void br_dev_setup(struct net_device *dev); | 272 | extern void br_dev_setup(struct net_device *dev); |
254 | extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, | 273 | extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, |
255 | struct net_device *dev); | 274 | struct net_device *dev); |
256 | extern bool br_devices_support_netpoll(struct net_bridge *br); | 275 | #ifdef CONFIG_NET_POLL_CONTROLLER |
257 | extern void br_netpoll_cleanup(struct net_device *br_dev); | 276 | extern void br_netpoll_cleanup(struct net_device *dev); |
277 | extern void br_netpoll_enable(struct net_bridge *br, | ||
278 | struct net_device *dev); | ||
279 | extern void br_netpoll_disable(struct net_bridge *br, | ||
280 | struct net_device *dev); | ||
281 | #else | ||
282 | #define br_netpoll_cleanup(br) | ||
283 | #define br_netpoll_enable(br, dev) | ||
284 | #define br_netpoll_disable(br, dev) | ||
285 | |||
286 | #endif | ||
258 | 287 | ||
259 | /* br_fdb.c */ | 288 | /* br_fdb.c */ |
260 | extern int br_fdb_init(void); | 289 | extern int br_fdb_init(void); |
@@ -455,6 +484,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port); | |||
455 | /* br_sysfs_if.c */ | 484 | /* br_sysfs_if.c */ |
456 | extern const struct sysfs_ops brport_sysfs_ops; | 485 | extern const struct sysfs_ops brport_sysfs_ops; |
457 | extern int br_sysfs_addif(struct net_bridge_port *p); | 486 | extern int br_sysfs_addif(struct net_bridge_port *p); |
487 | extern int br_sysfs_renameif(struct net_bridge_port *p); | ||
458 | 488 | ||
459 | /* br_sysfs_br.c */ | 489 | /* br_sysfs_br.c */ |
460 | extern int br_sysfs_addbr(struct net_device *dev); | 490 | extern int br_sysfs_addbr(struct net_device *dev); |
@@ -463,6 +493,7 @@ extern void br_sysfs_delbr(struct net_device *dev); | |||
463 | #else | 493 | #else |
464 | 494 | ||
465 | #define br_sysfs_addif(p) (0) | 495 | #define br_sysfs_addif(p) (0) |
496 | #define br_sysfs_renameif(p) (0) | ||
466 | #define br_sysfs_addbr(dev) (0) | 497 | #define br_sysfs_addbr(dev) (0) |
467 | #define br_sysfs_delbr(dev) do { } while(0) | 498 | #define br_sysfs_delbr(dev) do { } while(0) |
468 | #endif /* CONFIG_SYSFS */ | 499 | #endif /* CONFIG_SYSFS */ |
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index edcf14b560f6..57186d84d2bd 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
@@ -31,10 +31,9 @@ static const char *const br_port_state_names[] = { | |||
31 | 31 | ||
32 | void br_log_state(const struct net_bridge_port *p) | 32 | void br_log_state(const struct net_bridge_port *p) |
33 | { | 33 | { |
34 | pr_info("%s: port %d(%s) entering %s state\n", | 34 | br_info(p->br, "port %u(%s) entering %s state\n", |
35 | p->br->dev->name, p->port_no, p->dev->name, | 35 | (unsigned) p->port_no, p->dev->name, |
36 | br_port_state_names[p->state]); | 36 | br_port_state_names[p->state]); |
37 | |||
38 | } | 37 | } |
39 | 38 | ||
40 | /* called under bridge lock */ | 39 | /* called under bridge lock */ |
@@ -300,7 +299,7 @@ void br_topology_change_detection(struct net_bridge *br) | |||
300 | if (br->stp_enabled != BR_KERNEL_STP) | 299 | if (br->stp_enabled != BR_KERNEL_STP) |
301 | return; | 300 | return; |
302 | 301 | ||
303 | pr_info("%s: topology change detected, %s\n", br->dev->name, | 302 | br_info(br, "topology change detected, %s\n", |
304 | isroot ? "propagating" : "sending tcn bpdu"); | 303 | isroot ? "propagating" : "sending tcn bpdu"); |
305 | 304 | ||
306 | if (isroot) { | 305 | if (isroot) { |
@@ -469,8 +468,8 @@ void br_received_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *b | |||
469 | void br_received_tcn_bpdu(struct net_bridge_port *p) | 468 | void br_received_tcn_bpdu(struct net_bridge_port *p) |
470 | { | 469 | { |
471 | if (br_is_designated_port(p)) { | 470 | if (br_is_designated_port(p)) { |
472 | pr_info("%s: received tcn bpdu on port %i(%s)\n", | 471 | br_info(p->br, "port %u(%s) received tcn bpdu\n", |
473 | p->br->dev->name, p->port_no, p->dev->name); | 472 | (unsigned) p->port_no, p->dev->name); |
474 | 473 | ||
475 | br_topology_change_detection(p->br); | 474 | br_topology_change_detection(p->br); |
476 | br_topology_change_acknowledge(p); | 475 | br_topology_change_acknowledge(p); |
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index d66cce11f3bf..217bd225a42f 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c | |||
@@ -50,7 +50,7 @@ static void br_send_bpdu(struct net_bridge_port *p, | |||
50 | 50 | ||
51 | llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr); | 51 | llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr); |
52 | 52 | ||
53 | NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 53 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, |
54 | dev_queue_xmit); | 54 | dev_queue_xmit); |
55 | } | 55 | } |
56 | 56 | ||
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index d527119e9f54..1d8826914cbf 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
@@ -85,17 +85,16 @@ void br_stp_enable_port(struct net_bridge_port *p) | |||
85 | { | 85 | { |
86 | br_init_port(p); | 86 | br_init_port(p); |
87 | br_port_state_selection(p->br); | 87 | br_port_state_selection(p->br); |
88 | br_log_state(p); | ||
88 | } | 89 | } |
89 | 90 | ||
90 | /* called under bridge lock */ | 91 | /* called under bridge lock */ |
91 | void br_stp_disable_port(struct net_bridge_port *p) | 92 | void br_stp_disable_port(struct net_bridge_port *p) |
92 | { | 93 | { |
93 | struct net_bridge *br; | 94 | struct net_bridge *br = p->br; |
94 | int wasroot; | 95 | int wasroot; |
95 | 96 | ||
96 | br = p->br; | 97 | br_log_state(p); |
97 | printk(KERN_INFO "%s: port %i(%s) entering %s state\n", | ||
98 | br->dev->name, p->port_no, p->dev->name, "disabled"); | ||
99 | 98 | ||
100 | wasroot = br_is_root_bridge(br); | 99 | wasroot = br_is_root_bridge(br); |
101 | br_become_designated_port(p); | 100 | br_become_designated_port(p); |
@@ -127,11 +126,10 @@ static void br_stp_start(struct net_bridge *br) | |||
127 | r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); | 126 | r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); |
128 | if (r == 0) { | 127 | if (r == 0) { |
129 | br->stp_enabled = BR_USER_STP; | 128 | br->stp_enabled = BR_USER_STP; |
130 | printk(KERN_INFO "%s: userspace STP started\n", br->dev->name); | 129 | br_debug(br, "userspace STP started\n"); |
131 | } else { | 130 | } else { |
132 | br->stp_enabled = BR_KERNEL_STP; | 131 | br->stp_enabled = BR_KERNEL_STP; |
133 | printk(KERN_INFO "%s: starting userspace STP failed, " | 132 | br_debug(br, "using kernel STP\n"); |
134 | "starting kernel STP\n", br->dev->name); | ||
135 | 133 | ||
136 | /* To start timers on any ports left in blocking */ | 134 | /* To start timers on any ports left in blocking */ |
137 | spin_lock_bh(&br->lock); | 135 | spin_lock_bh(&br->lock); |
@@ -148,9 +146,7 @@ static void br_stp_stop(struct net_bridge *br) | |||
148 | 146 | ||
149 | if (br->stp_enabled == BR_USER_STP) { | 147 | if (br->stp_enabled == BR_USER_STP) { |
150 | r = call_usermodehelper(BR_STP_PROG, argv, envp, 1); | 148 | r = call_usermodehelper(BR_STP_PROG, argv, envp, 1); |
151 | printk(KERN_INFO "%s: userspace STP stopped, return code %d\n", | 149 | br_info(br, "userspace STP stopped, return code %d\n", r); |
152 | br->dev->name, r); | ||
153 | |||
154 | 150 | ||
155 | /* To start timers on any ports left in blocking */ | 151 | /* To start timers on any ports left in blocking */ |
156 | spin_lock_bh(&br->lock); | 152 | spin_lock_bh(&br->lock); |
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 772a140bfdf0..7b22456023c5 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c | |||
@@ -35,7 +35,7 @@ static void br_hello_timer_expired(unsigned long arg) | |||
35 | { | 35 | { |
36 | struct net_bridge *br = (struct net_bridge *)arg; | 36 | struct net_bridge *br = (struct net_bridge *)arg; |
37 | 37 | ||
38 | pr_debug("%s: hello timer expired\n", br->dev->name); | 38 | br_debug(br, "hello timer expired\n"); |
39 | spin_lock(&br->lock); | 39 | spin_lock(&br->lock); |
40 | if (br->dev->flags & IFF_UP) { | 40 | if (br->dev->flags & IFF_UP) { |
41 | br_config_bpdu_generation(br); | 41 | br_config_bpdu_generation(br); |
@@ -55,13 +55,9 @@ static void br_message_age_timer_expired(unsigned long arg) | |||
55 | if (p->state == BR_STATE_DISABLED) | 55 | if (p->state == BR_STATE_DISABLED) |
56 | return; | 56 | return; |
57 | 57 | ||
58 | 58 | br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n", | |
59 | pr_info("%s: neighbor %.2x%.2x.%.2x:%.2x:%.2x:%.2x:%.2x:%.2x lost on port %d(%s)\n", | 59 | (unsigned) p->port_no, p->dev->name, |
60 | br->dev->name, | 60 | id->prio[0], id->prio[1], &id->addr); |
61 | id->prio[0], id->prio[1], | ||
62 | id->addr[0], id->addr[1], id->addr[2], | ||
63 | id->addr[3], id->addr[4], id->addr[5], | ||
64 | p->port_no, p->dev->name); | ||
65 | 61 | ||
66 | /* | 62 | /* |
67 | * According to the spec, the message age timer cannot be | 63 | * According to the spec, the message age timer cannot be |
@@ -87,8 +83,8 @@ static void br_forward_delay_timer_expired(unsigned long arg) | |||
87 | struct net_bridge_port *p = (struct net_bridge_port *) arg; | 83 | struct net_bridge_port *p = (struct net_bridge_port *) arg; |
88 | struct net_bridge *br = p->br; | 84 | struct net_bridge *br = p->br; |
89 | 85 | ||
90 | pr_debug("%s: %d(%s) forward delay timer\n", | 86 | br_debug(br, "port %u(%s) forward delay timer\n", |
91 | br->dev->name, p->port_no, p->dev->name); | 87 | (unsigned) p->port_no, p->dev->name); |
92 | spin_lock(&br->lock); | 88 | spin_lock(&br->lock); |
93 | if (p->state == BR_STATE_LISTENING) { | 89 | if (p->state == BR_STATE_LISTENING) { |
94 | p->state = BR_STATE_LEARNING; | 90 | p->state = BR_STATE_LEARNING; |
@@ -107,7 +103,7 @@ static void br_tcn_timer_expired(unsigned long arg) | |||
107 | { | 103 | { |
108 | struct net_bridge *br = (struct net_bridge *) arg; | 104 | struct net_bridge *br = (struct net_bridge *) arg; |
109 | 105 | ||
110 | pr_debug("%s: tcn timer expired\n", br->dev->name); | 106 | br_debug(br, "tcn timer expired\n"); |
111 | spin_lock(&br->lock); | 107 | spin_lock(&br->lock); |
112 | if (br->dev->flags & IFF_UP) { | 108 | if (br->dev->flags & IFF_UP) { |
113 | br_transmit_tcn(br); | 109 | br_transmit_tcn(br); |
@@ -121,7 +117,7 @@ static void br_topology_change_timer_expired(unsigned long arg) | |||
121 | { | 117 | { |
122 | struct net_bridge *br = (struct net_bridge *) arg; | 118 | struct net_bridge *br = (struct net_bridge *) arg; |
123 | 119 | ||
124 | pr_debug("%s: topo change timer expired\n", br->dev->name); | 120 | br_debug(br, "topo change timer expired\n"); |
125 | spin_lock(&br->lock); | 121 | spin_lock(&br->lock); |
126 | br->topology_change_detected = 0; | 122 | br->topology_change_detected = 0; |
127 | br->topology_change = 0; | 123 | br->topology_change = 0; |
@@ -132,8 +128,8 @@ static void br_hold_timer_expired(unsigned long arg) | |||
132 | { | 128 | { |
133 | struct net_bridge_port *p = (struct net_bridge_port *) arg; | 129 | struct net_bridge_port *p = (struct net_bridge_port *) arg; |
134 | 130 | ||
135 | pr_debug("%s: %d(%s) hold timer expired\n", | 131 | br_debug(p->br, "port %u(%s) hold timer expired\n", |
136 | p->br->dev->name, p->port_no, p->dev->name); | 132 | (unsigned) p->port_no, p->dev->name); |
137 | 133 | ||
138 | spin_lock(&p->br->lock); | 134 | spin_lock(&p->br->lock); |
139 | if (p->config_pending) | 135 | if (p->config_pending) |
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index dd321e39e621..486b8f3861d2 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c | |||
@@ -659,7 +659,7 @@ static struct attribute_group bridge_group = { | |||
659 | * | 659 | * |
660 | * Returns the number of bytes read. | 660 | * Returns the number of bytes read. |
661 | */ | 661 | */ |
662 | static ssize_t brforward_read(struct kobject *kobj, | 662 | static ssize_t brforward_read(struct file *filp, struct kobject *kobj, |
663 | struct bin_attribute *bin_attr, | 663 | struct bin_attribute *bin_attr, |
664 | char *buf, loff_t off, size_t count) | 664 | char *buf, loff_t off, size_t count) |
665 | { | 665 | { |
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 0b9916489d6b..fd5799c9bc8d 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c | |||
@@ -246,7 +246,7 @@ const struct sysfs_ops brport_sysfs_ops = { | |||
246 | /* | 246 | /* |
247 | * Add sysfs entries to ethernet device added to a bridge. | 247 | * Add sysfs entries to ethernet device added to a bridge. |
248 | * Creates a brport subdirectory with bridge attributes. | 248 | * Creates a brport subdirectory with bridge attributes. |
249 | * Puts symlink in bridge's brport subdirectory | 249 | * Puts symlink in bridge's brif subdirectory |
250 | */ | 250 | */ |
251 | int br_sysfs_addif(struct net_bridge_port *p) | 251 | int br_sysfs_addif(struct net_bridge_port *p) |
252 | { | 252 | { |
@@ -257,15 +257,37 @@ int br_sysfs_addif(struct net_bridge_port *p) | |||
257 | err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj, | 257 | err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj, |
258 | SYSFS_BRIDGE_PORT_LINK); | 258 | SYSFS_BRIDGE_PORT_LINK); |
259 | if (err) | 259 | if (err) |
260 | goto out2; | 260 | return err; |
261 | 261 | ||
262 | for (a = brport_attrs; *a; ++a) { | 262 | for (a = brport_attrs; *a; ++a) { |
263 | err = sysfs_create_file(&p->kobj, &((*a)->attr)); | 263 | err = sysfs_create_file(&p->kobj, &((*a)->attr)); |
264 | if (err) | 264 | if (err) |
265 | goto out2; | 265 | return err; |
266 | } | 266 | } |
267 | 267 | ||
268 | err = sysfs_create_link(br->ifobj, &p->kobj, p->dev->name); | 268 | strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ); |
269 | out2: | 269 | return sysfs_create_link(br->ifobj, &p->kobj, p->sysfs_name); |
270 | } | ||
271 | |||
272 | /* Rename bridge's brif symlink */ | ||
273 | int br_sysfs_renameif(struct net_bridge_port *p) | ||
274 | { | ||
275 | struct net_bridge *br = p->br; | ||
276 | int err; | ||
277 | |||
278 | /* If a rename fails, the rollback will cause another | ||
279 | * rename call with the existing name. | ||
280 | */ | ||
281 | if (!strncmp(p->sysfs_name, p->dev->name, IFNAMSIZ)) | ||
282 | return 0; | ||
283 | |||
284 | err = sysfs_rename_link(br->ifobj, &p->kobj, | ||
285 | p->sysfs_name, p->dev->name); | ||
286 | if (err) | ||
287 | netdev_notice(br->dev, "unable to rename link %s to %s", | ||
288 | p->sysfs_name, p->dev->name); | ||
289 | else | ||
290 | strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ); | ||
291 | |||
270 | return err; | 292 | return err; |
271 | } | 293 | } |
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c index 5d1176758ca5..2a449b7ab8fa 100644 --- a/net/bridge/netfilter/ebt_802_3.c +++ b/net/bridge/netfilter/ebt_802_3.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/netfilter_bridge/ebt_802_3.h> | 13 | #include <linux/netfilter_bridge/ebt_802_3.h> |
14 | 14 | ||
15 | static bool | 15 | static bool |
16 | ebt_802_3_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 16 | ebt_802_3_mt(const struct sk_buff *skb, struct xt_action_param *par) |
17 | { | 17 | { |
18 | const struct ebt_802_3_info *info = par->matchinfo; | 18 | const struct ebt_802_3_info *info = par->matchinfo; |
19 | const struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb); | 19 | const struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb); |
@@ -36,14 +36,14 @@ ebt_802_3_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
36 | return true; | 36 | return true; |
37 | } | 37 | } |
38 | 38 | ||
39 | static bool ebt_802_3_mt_check(const struct xt_mtchk_param *par) | 39 | static int ebt_802_3_mt_check(const struct xt_mtchk_param *par) |
40 | { | 40 | { |
41 | const struct ebt_802_3_info *info = par->matchinfo; | 41 | const struct ebt_802_3_info *info = par->matchinfo; |
42 | 42 | ||
43 | if (info->bitmask & ~EBT_802_3_MASK || info->invflags & ~EBT_802_3_MASK) | 43 | if (info->bitmask & ~EBT_802_3_MASK || info->invflags & ~EBT_802_3_MASK) |
44 | return false; | 44 | return -EINVAL; |
45 | 45 | ||
46 | return true; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
49 | static struct xt_match ebt_802_3_mt_reg __read_mostly = { | 49 | static struct xt_match ebt_802_3_mt_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c index b595f091f35b..8b84c581be30 100644 --- a/net/bridge/netfilter/ebt_among.c +++ b/net/bridge/netfilter/ebt_among.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * August, 2003 | 7 | * August, 2003 |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
10 | #include <linux/ip.h> | 11 | #include <linux/ip.h> |
11 | #include <linux/if_arp.h> | 12 | #include <linux/if_arp.h> |
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
@@ -128,7 +129,7 @@ static int get_ip_src(const struct sk_buff *skb, __be32 *addr) | |||
128 | } | 129 | } |
129 | 130 | ||
130 | static bool | 131 | static bool |
131 | ebt_among_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 132 | ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par) |
132 | { | 133 | { |
133 | const struct ebt_among_info *info = par->matchinfo; | 134 | const struct ebt_among_info *info = par->matchinfo; |
134 | const char *dmac, *smac; | 135 | const char *dmac, *smac; |
@@ -171,7 +172,7 @@ ebt_among_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
171 | return true; | 172 | return true; |
172 | } | 173 | } |
173 | 174 | ||
174 | static bool ebt_among_mt_check(const struct xt_mtchk_param *par) | 175 | static int ebt_among_mt_check(const struct xt_mtchk_param *par) |
175 | { | 176 | { |
176 | const struct ebt_among_info *info = par->matchinfo; | 177 | const struct ebt_among_info *info = par->matchinfo; |
177 | const struct ebt_entry_match *em = | 178 | const struct ebt_entry_match *em = |
@@ -186,24 +187,20 @@ static bool ebt_among_mt_check(const struct xt_mtchk_param *par) | |||
186 | expected_length += ebt_mac_wormhash_size(wh_src); | 187 | expected_length += ebt_mac_wormhash_size(wh_src); |
187 | 188 | ||
188 | if (em->match_size != EBT_ALIGN(expected_length)) { | 189 | if (em->match_size != EBT_ALIGN(expected_length)) { |
189 | printk(KERN_WARNING | 190 | pr_info("wrong size: %d against expected %d, rounded to %Zd\n", |
190 | "ebtables: among: wrong size: %d " | 191 | em->match_size, expected_length, |
191 | "against expected %d, rounded to %Zd\n", | 192 | EBT_ALIGN(expected_length)); |
192 | em->match_size, expected_length, | 193 | return -EINVAL; |
193 | EBT_ALIGN(expected_length)); | ||
194 | return false; | ||
195 | } | 194 | } |
196 | if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) { | 195 | if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) { |
197 | printk(KERN_WARNING | 196 | pr_info("dst integrity fail: %x\n", -err); |
198 | "ebtables: among: dst integrity fail: %x\n", -err); | 197 | return -EINVAL; |
199 | return false; | ||
200 | } | 198 | } |
201 | if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) { | 199 | if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) { |
202 | printk(KERN_WARNING | 200 | pr_info("src integrity fail: %x\n", -err); |
203 | "ebtables: among: src integrity fail: %x\n", -err); | 201 | return -EINVAL; |
204 | return false; | ||
205 | } | 202 | } |
206 | return true; | 203 | return 0; |
207 | } | 204 | } |
208 | 205 | ||
209 | static struct xt_match ebt_among_mt_reg __read_mostly = { | 206 | static struct xt_match ebt_among_mt_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c index e727697c5847..cd457b891b27 100644 --- a/net/bridge/netfilter/ebt_arp.c +++ b/net/bridge/netfilter/ebt_arp.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/netfilter_bridge/ebt_arp.h> | 16 | #include <linux/netfilter_bridge/ebt_arp.h> |
17 | 17 | ||
18 | static bool | 18 | static bool |
19 | ebt_arp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 19 | ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par) |
20 | { | 20 | { |
21 | const struct ebt_arp_info *info = par->matchinfo; | 21 | const struct ebt_arp_info *info = par->matchinfo; |
22 | const struct arphdr *ah; | 22 | const struct arphdr *ah; |
@@ -100,7 +100,7 @@ ebt_arp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
100 | return true; | 100 | return true; |
101 | } | 101 | } |
102 | 102 | ||
103 | static bool ebt_arp_mt_check(const struct xt_mtchk_param *par) | 103 | static int ebt_arp_mt_check(const struct xt_mtchk_param *par) |
104 | { | 104 | { |
105 | const struct ebt_arp_info *info = par->matchinfo; | 105 | const struct ebt_arp_info *info = par->matchinfo; |
106 | const struct ebt_entry *e = par->entryinfo; | 106 | const struct ebt_entry *e = par->entryinfo; |
@@ -108,10 +108,10 @@ static bool ebt_arp_mt_check(const struct xt_mtchk_param *par) | |||
108 | if ((e->ethproto != htons(ETH_P_ARP) && | 108 | if ((e->ethproto != htons(ETH_P_ARP) && |
109 | e->ethproto != htons(ETH_P_RARP)) || | 109 | e->ethproto != htons(ETH_P_RARP)) || |
110 | e->invflags & EBT_IPROTO) | 110 | e->invflags & EBT_IPROTO) |
111 | return false; | 111 | return -EINVAL; |
112 | if (info->bitmask & ~EBT_ARP_MASK || info->invflags & ~EBT_ARP_MASK) | 112 | if (info->bitmask & ~EBT_ARP_MASK || info->invflags & ~EBT_ARP_MASK) |
113 | return false; | 113 | return -EINVAL; |
114 | return true; | 114 | return 0; |
115 | } | 115 | } |
116 | 116 | ||
117 | static struct xt_match ebt_arp_mt_reg __read_mostly = { | 117 | static struct xt_match ebt_arp_mt_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c index f392e9d93f53..070cf134a22f 100644 --- a/net/bridge/netfilter/ebt_arpreply.c +++ b/net/bridge/netfilter/ebt_arpreply.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/netfilter_bridge/ebt_arpreply.h> | 16 | #include <linux/netfilter_bridge/ebt_arpreply.h> |
17 | 17 | ||
18 | static unsigned int | 18 | static unsigned int |
19 | ebt_arpreply_tg(struct sk_buff *skb, const struct xt_target_param *par) | 19 | ebt_arpreply_tg(struct sk_buff *skb, const struct xt_action_param *par) |
20 | { | 20 | { |
21 | const struct ebt_arpreply_info *info = par->targinfo; | 21 | const struct ebt_arpreply_info *info = par->targinfo; |
22 | const __be32 *siptr, *diptr; | 22 | const __be32 *siptr, *diptr; |
@@ -57,17 +57,17 @@ ebt_arpreply_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
57 | return info->target; | 57 | return info->target; |
58 | } | 58 | } |
59 | 59 | ||
60 | static bool ebt_arpreply_tg_check(const struct xt_tgchk_param *par) | 60 | static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par) |
61 | { | 61 | { |
62 | const struct ebt_arpreply_info *info = par->targinfo; | 62 | const struct ebt_arpreply_info *info = par->targinfo; |
63 | const struct ebt_entry *e = par->entryinfo; | 63 | const struct ebt_entry *e = par->entryinfo; |
64 | 64 | ||
65 | if (BASE_CHAIN && info->target == EBT_RETURN) | 65 | if (BASE_CHAIN && info->target == EBT_RETURN) |
66 | return false; | 66 | return -EINVAL; |
67 | if (e->ethproto != htons(ETH_P_ARP) || | 67 | if (e->ethproto != htons(ETH_P_ARP) || |
68 | e->invflags & EBT_IPROTO) | 68 | e->invflags & EBT_IPROTO) |
69 | return false; | 69 | return -EINVAL; |
70 | return true; | 70 | return 0; |
71 | } | 71 | } |
72 | 72 | ||
73 | static struct xt_target ebt_arpreply_tg_reg __read_mostly = { | 73 | static struct xt_target ebt_arpreply_tg_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c index 2bb40d728a35..c59f7bfae6e2 100644 --- a/net/bridge/netfilter/ebt_dnat.c +++ b/net/bridge/netfilter/ebt_dnat.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/netfilter_bridge/ebt_nat.h> | 15 | #include <linux/netfilter_bridge/ebt_nat.h> |
16 | 16 | ||
17 | static unsigned int | 17 | static unsigned int |
18 | ebt_dnat_tg(struct sk_buff *skb, const struct xt_target_param *par) | 18 | ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par) |
19 | { | 19 | { |
20 | const struct ebt_nat_info *info = par->targinfo; | 20 | const struct ebt_nat_info *info = par->targinfo; |
21 | 21 | ||
@@ -26,13 +26,13 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
26 | return info->target; | 26 | return info->target; |
27 | } | 27 | } |
28 | 28 | ||
29 | static bool ebt_dnat_tg_check(const struct xt_tgchk_param *par) | 29 | static int ebt_dnat_tg_check(const struct xt_tgchk_param *par) |
30 | { | 30 | { |
31 | const struct ebt_nat_info *info = par->targinfo; | 31 | const struct ebt_nat_info *info = par->targinfo; |
32 | unsigned int hook_mask; | 32 | unsigned int hook_mask; |
33 | 33 | ||
34 | if (BASE_CHAIN && info->target == EBT_RETURN) | 34 | if (BASE_CHAIN && info->target == EBT_RETURN) |
35 | return false; | 35 | return -EINVAL; |
36 | 36 | ||
37 | hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS); | 37 | hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS); |
38 | if ((strcmp(par->table, "nat") != 0 || | 38 | if ((strcmp(par->table, "nat") != 0 || |
@@ -40,10 +40,10 @@ static bool ebt_dnat_tg_check(const struct xt_tgchk_param *par) | |||
40 | (1 << NF_BR_LOCAL_OUT)))) && | 40 | (1 << NF_BR_LOCAL_OUT)))) && |
41 | (strcmp(par->table, "broute") != 0 || | 41 | (strcmp(par->table, "broute") != 0 || |
42 | hook_mask & ~(1 << NF_BR_BROUTING))) | 42 | hook_mask & ~(1 << NF_BR_BROUTING))) |
43 | return false; | 43 | return -EINVAL; |
44 | if (INVALID_TARGET) | 44 | if (INVALID_TARGET) |
45 | return false; | 45 | return -EINVAL; |
46 | return true; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
49 | static struct xt_target ebt_dnat_tg_reg __read_mostly = { | 49 | static struct xt_target ebt_dnat_tg_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c index 5de6df6f86b8..23bca62d58d2 100644 --- a/net/bridge/netfilter/ebt_ip.c +++ b/net/bridge/netfilter/ebt_ip.c | |||
@@ -25,7 +25,7 @@ struct tcpudphdr { | |||
25 | }; | 25 | }; |
26 | 26 | ||
27 | static bool | 27 | static bool |
28 | ebt_ip_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 28 | ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par) |
29 | { | 29 | { |
30 | const struct ebt_ip_info *info = par->matchinfo; | 30 | const struct ebt_ip_info *info = par->matchinfo; |
31 | const struct iphdr *ih; | 31 | const struct iphdr *ih; |
@@ -77,31 +77,31 @@ ebt_ip_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
77 | return true; | 77 | return true; |
78 | } | 78 | } |
79 | 79 | ||
80 | static bool ebt_ip_mt_check(const struct xt_mtchk_param *par) | 80 | static int ebt_ip_mt_check(const struct xt_mtchk_param *par) |
81 | { | 81 | { |
82 | const struct ebt_ip_info *info = par->matchinfo; | 82 | const struct ebt_ip_info *info = par->matchinfo; |
83 | const struct ebt_entry *e = par->entryinfo; | 83 | const struct ebt_entry *e = par->entryinfo; |
84 | 84 | ||
85 | if (e->ethproto != htons(ETH_P_IP) || | 85 | if (e->ethproto != htons(ETH_P_IP) || |
86 | e->invflags & EBT_IPROTO) | 86 | e->invflags & EBT_IPROTO) |
87 | return false; | 87 | return -EINVAL; |
88 | if (info->bitmask & ~EBT_IP_MASK || info->invflags & ~EBT_IP_MASK) | 88 | if (info->bitmask & ~EBT_IP_MASK || info->invflags & ~EBT_IP_MASK) |
89 | return false; | 89 | return -EINVAL; |
90 | if (info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT)) { | 90 | if (info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT)) { |
91 | if (info->invflags & EBT_IP_PROTO) | 91 | if (info->invflags & EBT_IP_PROTO) |
92 | return false; | 92 | return -EINVAL; |
93 | if (info->protocol != IPPROTO_TCP && | 93 | if (info->protocol != IPPROTO_TCP && |
94 | info->protocol != IPPROTO_UDP && | 94 | info->protocol != IPPROTO_UDP && |
95 | info->protocol != IPPROTO_UDPLITE && | 95 | info->protocol != IPPROTO_UDPLITE && |
96 | info->protocol != IPPROTO_SCTP && | 96 | info->protocol != IPPROTO_SCTP && |
97 | info->protocol != IPPROTO_DCCP) | 97 | info->protocol != IPPROTO_DCCP) |
98 | return false; | 98 | return -EINVAL; |
99 | } | 99 | } |
100 | if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1]) | 100 | if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1]) |
101 | return false; | 101 | return -EINVAL; |
102 | if (info->bitmask & EBT_IP_SPORT && info->sport[0] > info->sport[1]) | 102 | if (info->bitmask & EBT_IP_SPORT && info->sport[0] > info->sport[1]) |
103 | return false; | 103 | return -EINVAL; |
104 | return true; | 104 | return 0; |
105 | } | 105 | } |
106 | 106 | ||
107 | static struct xt_match ebt_ip_mt_reg __read_mostly = { | 107 | static struct xt_match ebt_ip_mt_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c index bbf2534ef026..50a46afc2bcc 100644 --- a/net/bridge/netfilter/ebt_ip6.c +++ b/net/bridge/netfilter/ebt_ip6.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Authors: | 4 | * Authors: |
5 | * Manohar Castelino <manohar.r.castelino@intel.com> | 5 | * Manohar Castelino <manohar.r.castelino@intel.com> |
6 | * Kuo-Lang Tseng <kuo-lang.tseng@intel.com> | 6 | * Kuo-Lang Tseng <kuo-lang.tseng@intel.com> |
7 | * Jan Engelhardt <jengelh@computergmbh.de> | 7 | * Jan Engelhardt <jengelh@medozas.de> |
8 | * | 8 | * |
9 | * Summary: | 9 | * Summary: |
10 | * This is just a modification of the IPv4 code written by | 10 | * This is just a modification of the IPv4 code written by |
@@ -28,15 +28,13 @@ struct tcpudphdr { | |||
28 | }; | 28 | }; |
29 | 29 | ||
30 | static bool | 30 | static bool |
31 | ebt_ip6_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 31 | ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par) |
32 | { | 32 | { |
33 | const struct ebt_ip6_info *info = par->matchinfo; | 33 | const struct ebt_ip6_info *info = par->matchinfo; |
34 | const struct ipv6hdr *ih6; | 34 | const struct ipv6hdr *ih6; |
35 | struct ipv6hdr _ip6h; | 35 | struct ipv6hdr _ip6h; |
36 | const struct tcpudphdr *pptr; | 36 | const struct tcpudphdr *pptr; |
37 | struct tcpudphdr _ports; | 37 | struct tcpudphdr _ports; |
38 | struct in6_addr tmp_addr; | ||
39 | int i; | ||
40 | 38 | ||
41 | ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h); | 39 | ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h); |
42 | if (ih6 == NULL) | 40 | if (ih6 == NULL) |
@@ -44,18 +42,10 @@ ebt_ip6_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
44 | if (info->bitmask & EBT_IP6_TCLASS && | 42 | if (info->bitmask & EBT_IP6_TCLASS && |
45 | FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS)) | 43 | FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS)) |
46 | return false; | 44 | return false; |
47 | for (i = 0; i < 4; i++) | 45 | if (FWINV(ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk, |
48 | tmp_addr.in6_u.u6_addr32[i] = ih6->saddr.in6_u.u6_addr32[i] & | 46 | &info->saddr), EBT_IP6_SOURCE) || |
49 | info->smsk.in6_u.u6_addr32[i]; | 47 | FWINV(ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk, |
50 | if (info->bitmask & EBT_IP6_SOURCE && | 48 | &info->daddr), EBT_IP6_DEST)) |
51 | FWINV((ipv6_addr_cmp(&tmp_addr, &info->saddr) != 0), | ||
52 | EBT_IP6_SOURCE)) | ||
53 | return false; | ||
54 | for (i = 0; i < 4; i++) | ||
55 | tmp_addr.in6_u.u6_addr32[i] = ih6->daddr.in6_u.u6_addr32[i] & | ||
56 | info->dmsk.in6_u.u6_addr32[i]; | ||
57 | if (info->bitmask & EBT_IP6_DEST && | ||
58 | FWINV((ipv6_addr_cmp(&tmp_addr, &info->daddr) != 0), EBT_IP6_DEST)) | ||
59 | return false; | 49 | return false; |
60 | if (info->bitmask & EBT_IP6_PROTO) { | 50 | if (info->bitmask & EBT_IP6_PROTO) { |
61 | uint8_t nexthdr = ih6->nexthdr; | 51 | uint8_t nexthdr = ih6->nexthdr; |
@@ -90,30 +80,30 @@ ebt_ip6_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
90 | return true; | 80 | return true; |
91 | } | 81 | } |
92 | 82 | ||
93 | static bool ebt_ip6_mt_check(const struct xt_mtchk_param *par) | 83 | static int ebt_ip6_mt_check(const struct xt_mtchk_param *par) |
94 | { | 84 | { |
95 | const struct ebt_entry *e = par->entryinfo; | 85 | const struct ebt_entry *e = par->entryinfo; |
96 | struct ebt_ip6_info *info = par->matchinfo; | 86 | struct ebt_ip6_info *info = par->matchinfo; |
97 | 87 | ||
98 | if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO) | 88 | if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO) |
99 | return false; | 89 | return -EINVAL; |
100 | if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK) | 90 | if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK) |
101 | return false; | 91 | return -EINVAL; |
102 | if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) { | 92 | if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) { |
103 | if (info->invflags & EBT_IP6_PROTO) | 93 | if (info->invflags & EBT_IP6_PROTO) |
104 | return false; | 94 | return -EINVAL; |
105 | if (info->protocol != IPPROTO_TCP && | 95 | if (info->protocol != IPPROTO_TCP && |
106 | info->protocol != IPPROTO_UDP && | 96 | info->protocol != IPPROTO_UDP && |
107 | info->protocol != IPPROTO_UDPLITE && | 97 | info->protocol != IPPROTO_UDPLITE && |
108 | info->protocol != IPPROTO_SCTP && | 98 | info->protocol != IPPROTO_SCTP && |
109 | info->protocol != IPPROTO_DCCP) | 99 | info->protocol != IPPROTO_DCCP) |
110 | return false; | 100 | return -EINVAL; |
111 | } | 101 | } |
112 | if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1]) | 102 | if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1]) |
113 | return false; | 103 | return -EINVAL; |
114 | if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1]) | 104 | if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1]) |
115 | return false; | 105 | return -EINVAL; |
116 | return true; | 106 | return 0; |
117 | } | 107 | } |
118 | 108 | ||
119 | static struct xt_match ebt_ip6_mt_reg __read_mostly = { | 109 | static struct xt_match ebt_ip6_mt_reg __read_mostly = { |
@@ -139,4 +129,5 @@ static void __exit ebt_ip6_fini(void) | |||
139 | module_init(ebt_ip6_init); | 129 | module_init(ebt_ip6_init); |
140 | module_exit(ebt_ip6_fini); | 130 | module_exit(ebt_ip6_fini); |
141 | MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match"); | 131 | MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match"); |
132 | MODULE_AUTHOR("Kuo-Lang Tseng <kuo-lang.tseng@intel.com>"); | ||
142 | MODULE_LICENSE("GPL"); | 133 | MODULE_LICENSE("GPL"); |
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c index 7a8182710eb3..517e78befcb2 100644 --- a/net/bridge/netfilter/ebt_limit.c +++ b/net/bridge/netfilter/ebt_limit.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * September, 2003 | 10 | * September, 2003 |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
14 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
15 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
@@ -31,7 +32,7 @@ static DEFINE_SPINLOCK(limit_lock); | |||
31 | #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) | 32 | #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) |
32 | 33 | ||
33 | static bool | 34 | static bool |
34 | ebt_limit_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 35 | ebt_limit_mt(const struct sk_buff *skb, struct xt_action_param *par) |
35 | { | 36 | { |
36 | struct ebt_limit_info *info = (void *)par->matchinfo; | 37 | struct ebt_limit_info *info = (void *)par->matchinfo; |
37 | unsigned long now = jiffies; | 38 | unsigned long now = jiffies; |
@@ -64,16 +65,16 @@ user2credits(u_int32_t user) | |||
64 | return (user * HZ * CREDITS_PER_JIFFY) / EBT_LIMIT_SCALE; | 65 | return (user * HZ * CREDITS_PER_JIFFY) / EBT_LIMIT_SCALE; |
65 | } | 66 | } |
66 | 67 | ||
67 | static bool ebt_limit_mt_check(const struct xt_mtchk_param *par) | 68 | static int ebt_limit_mt_check(const struct xt_mtchk_param *par) |
68 | { | 69 | { |
69 | struct ebt_limit_info *info = par->matchinfo; | 70 | struct ebt_limit_info *info = par->matchinfo; |
70 | 71 | ||
71 | /* Check for overflow. */ | 72 | /* Check for overflow. */ |
72 | if (info->burst == 0 || | 73 | if (info->burst == 0 || |
73 | user2credits(info->avg * info->burst) < user2credits(info->avg)) { | 74 | user2credits(info->avg * info->burst) < user2credits(info->avg)) { |
74 | printk("Overflow in ebt_limit, try lower: %u/%u\n", | 75 | pr_info("overflow, try lower: %u/%u\n", |
75 | info->avg, info->burst); | 76 | info->avg, info->burst); |
76 | return false; | 77 | return -EINVAL; |
77 | } | 78 | } |
78 | 79 | ||
79 | /* User avg in seconds * EBT_LIMIT_SCALE: convert to jiffies * 128. */ | 80 | /* User avg in seconds * EBT_LIMIT_SCALE: convert to jiffies * 128. */ |
@@ -81,7 +82,7 @@ static bool ebt_limit_mt_check(const struct xt_mtchk_param *par) | |||
81 | info->credit = user2credits(info->avg * info->burst); | 82 | info->credit = user2credits(info->avg * info->burst); |
82 | info->credit_cap = user2credits(info->avg * info->burst); | 83 | info->credit_cap = user2credits(info->avg * info->burst); |
83 | info->cost = user2credits(info->avg); | 84 | info->cost = user2credits(info->avg); |
84 | return true; | 85 | return 0; |
85 | } | 86 | } |
86 | 87 | ||
87 | 88 | ||
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index e873924ddb5d..6e5a8bb9b940 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
@@ -24,16 +24,16 @@ | |||
24 | 24 | ||
25 | static DEFINE_SPINLOCK(ebt_log_lock); | 25 | static DEFINE_SPINLOCK(ebt_log_lock); |
26 | 26 | ||
27 | static bool ebt_log_tg_check(const struct xt_tgchk_param *par) | 27 | static int ebt_log_tg_check(const struct xt_tgchk_param *par) |
28 | { | 28 | { |
29 | struct ebt_log_info *info = par->targinfo; | 29 | struct ebt_log_info *info = par->targinfo; |
30 | 30 | ||
31 | if (info->bitmask & ~EBT_LOG_MASK) | 31 | if (info->bitmask & ~EBT_LOG_MASK) |
32 | return false; | 32 | return -EINVAL; |
33 | if (info->loglevel >= 8) | 33 | if (info->loglevel >= 8) |
34 | return false; | 34 | return -EINVAL; |
35 | info->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0'; | 35 | info->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0'; |
36 | return true; | 36 | return 0; |
37 | } | 37 | } |
38 | 38 | ||
39 | struct tcpudphdr | 39 | struct tcpudphdr |
@@ -171,7 +171,7 @@ out: | |||
171 | } | 171 | } |
172 | 172 | ||
173 | static unsigned int | 173 | static unsigned int |
174 | ebt_log_tg(struct sk_buff *skb, const struct xt_target_param *par) | 174 | ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par) |
175 | { | 175 | { |
176 | const struct ebt_log_info *info = par->targinfo; | 176 | const struct ebt_log_info *info = par->targinfo; |
177 | struct nf_loginfo li; | 177 | struct nf_loginfo li; |
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c index 2b5ce533d6b9..66697cbd0a8b 100644 --- a/net/bridge/netfilter/ebt_mark.c +++ b/net/bridge/netfilter/ebt_mark.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/netfilter_bridge/ebt_mark_t.h> | 19 | #include <linux/netfilter_bridge/ebt_mark_t.h> |
20 | 20 | ||
21 | static unsigned int | 21 | static unsigned int |
22 | ebt_mark_tg(struct sk_buff *skb, const struct xt_target_param *par) | 22 | ebt_mark_tg(struct sk_buff *skb, const struct xt_action_param *par) |
23 | { | 23 | { |
24 | const struct ebt_mark_t_info *info = par->targinfo; | 24 | const struct ebt_mark_t_info *info = par->targinfo; |
25 | int action = info->target & -16; | 25 | int action = info->target & -16; |
@@ -36,21 +36,21 @@ ebt_mark_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
36 | return info->target | ~EBT_VERDICT_BITS; | 36 | return info->target | ~EBT_VERDICT_BITS; |
37 | } | 37 | } |
38 | 38 | ||
39 | static bool ebt_mark_tg_check(const struct xt_tgchk_param *par) | 39 | static int ebt_mark_tg_check(const struct xt_tgchk_param *par) |
40 | { | 40 | { |
41 | const struct ebt_mark_t_info *info = par->targinfo; | 41 | const struct ebt_mark_t_info *info = par->targinfo; |
42 | int tmp; | 42 | int tmp; |
43 | 43 | ||
44 | tmp = info->target | ~EBT_VERDICT_BITS; | 44 | tmp = info->target | ~EBT_VERDICT_BITS; |
45 | if (BASE_CHAIN && tmp == EBT_RETURN) | 45 | if (BASE_CHAIN && tmp == EBT_RETURN) |
46 | return false; | 46 | return -EINVAL; |
47 | if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0) | 47 | if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0) |
48 | return false; | 48 | return -EINVAL; |
49 | tmp = info->target & ~EBT_VERDICT_BITS; | 49 | tmp = info->target & ~EBT_VERDICT_BITS; |
50 | if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE && | 50 | if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE && |
51 | tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE) | 51 | tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE) |
52 | return false; | 52 | return -EINVAL; |
53 | return true; | 53 | return 0; |
54 | } | 54 | } |
55 | #ifdef CONFIG_COMPAT | 55 | #ifdef CONFIG_COMPAT |
56 | struct compat_ebt_mark_t_info { | 56 | struct compat_ebt_mark_t_info { |
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c index 8de8c396d913..d98baefc4c7e 100644 --- a/net/bridge/netfilter/ebt_mark_m.c +++ b/net/bridge/netfilter/ebt_mark_m.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/netfilter_bridge/ebt_mark_m.h> | 13 | #include <linux/netfilter_bridge/ebt_mark_m.h> |
14 | 14 | ||
15 | static bool | 15 | static bool |
16 | ebt_mark_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 16 | ebt_mark_mt(const struct sk_buff *skb, struct xt_action_param *par) |
17 | { | 17 | { |
18 | const struct ebt_mark_m_info *info = par->matchinfo; | 18 | const struct ebt_mark_m_info *info = par->matchinfo; |
19 | 19 | ||
@@ -22,17 +22,17 @@ ebt_mark_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
22 | return ((skb->mark & info->mask) == info->mark) ^ info->invert; | 22 | return ((skb->mark & info->mask) == info->mark) ^ info->invert; |
23 | } | 23 | } |
24 | 24 | ||
25 | static bool ebt_mark_mt_check(const struct xt_mtchk_param *par) | 25 | static int ebt_mark_mt_check(const struct xt_mtchk_param *par) |
26 | { | 26 | { |
27 | const struct ebt_mark_m_info *info = par->matchinfo; | 27 | const struct ebt_mark_m_info *info = par->matchinfo; |
28 | 28 | ||
29 | if (info->bitmask & ~EBT_MARK_MASK) | 29 | if (info->bitmask & ~EBT_MARK_MASK) |
30 | return false; | 30 | return -EINVAL; |
31 | if ((info->bitmask & EBT_MARK_OR) && (info->bitmask & EBT_MARK_AND)) | 31 | if ((info->bitmask & EBT_MARK_OR) && (info->bitmask & EBT_MARK_AND)) |
32 | return false; | 32 | return -EINVAL; |
33 | if (!info->bitmask) | 33 | if (!info->bitmask) |
34 | return false; | 34 | return -EINVAL; |
35 | return true; | 35 | return 0; |
36 | } | 36 | } |
37 | 37 | ||
38 | 38 | ||
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c index 40dbd248b9ae..5be68bbcc341 100644 --- a/net/bridge/netfilter/ebt_nflog.c +++ b/net/bridge/netfilter/ebt_nflog.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <net/netfilter/nf_log.h> | 20 | #include <net/netfilter/nf_log.h> |
21 | 21 | ||
22 | static unsigned int | 22 | static unsigned int |
23 | ebt_nflog_tg(struct sk_buff *skb, const struct xt_target_param *par) | 23 | ebt_nflog_tg(struct sk_buff *skb, const struct xt_action_param *par) |
24 | { | 24 | { |
25 | const struct ebt_nflog_info *info = par->targinfo; | 25 | const struct ebt_nflog_info *info = par->targinfo; |
26 | struct nf_loginfo li; | 26 | struct nf_loginfo li; |
@@ -35,14 +35,14 @@ ebt_nflog_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
35 | return EBT_CONTINUE; | 35 | return EBT_CONTINUE; |
36 | } | 36 | } |
37 | 37 | ||
38 | static bool ebt_nflog_tg_check(const struct xt_tgchk_param *par) | 38 | static int ebt_nflog_tg_check(const struct xt_tgchk_param *par) |
39 | { | 39 | { |
40 | struct ebt_nflog_info *info = par->targinfo; | 40 | struct ebt_nflog_info *info = par->targinfo; |
41 | 41 | ||
42 | if (info->flags & ~EBT_NFLOG_MASK) | 42 | if (info->flags & ~EBT_NFLOG_MASK) |
43 | return false; | 43 | return -EINVAL; |
44 | info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0'; | 44 | info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0'; |
45 | return true; | 45 | return 0; |
46 | } | 46 | } |
47 | 47 | ||
48 | static struct xt_target ebt_nflog_tg_reg __read_mostly = { | 48 | static struct xt_target ebt_nflog_tg_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c index e2a07e6cbef3..496a56515307 100644 --- a/net/bridge/netfilter/ebt_pkttype.c +++ b/net/bridge/netfilter/ebt_pkttype.c | |||
@@ -13,21 +13,21 @@ | |||
13 | #include <linux/netfilter_bridge/ebt_pkttype.h> | 13 | #include <linux/netfilter_bridge/ebt_pkttype.h> |
14 | 14 | ||
15 | static bool | 15 | static bool |
16 | ebt_pkttype_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 16 | ebt_pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par) |
17 | { | 17 | { |
18 | const struct ebt_pkttype_info *info = par->matchinfo; | 18 | const struct ebt_pkttype_info *info = par->matchinfo; |
19 | 19 | ||
20 | return (skb->pkt_type == info->pkt_type) ^ info->invert; | 20 | return (skb->pkt_type == info->pkt_type) ^ info->invert; |
21 | } | 21 | } |
22 | 22 | ||
23 | static bool ebt_pkttype_mt_check(const struct xt_mtchk_param *par) | 23 | static int ebt_pkttype_mt_check(const struct xt_mtchk_param *par) |
24 | { | 24 | { |
25 | const struct ebt_pkttype_info *info = par->matchinfo; | 25 | const struct ebt_pkttype_info *info = par->matchinfo; |
26 | 26 | ||
27 | if (info->invert != 0 && info->invert != 1) | 27 | if (info->invert != 0 && info->invert != 1) |
28 | return false; | 28 | return -EINVAL; |
29 | /* Allow any pkt_type value */ | 29 | /* Allow any pkt_type value */ |
30 | return true; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | static struct xt_match ebt_pkttype_mt_reg __read_mostly = { | 33 | static struct xt_match ebt_pkttype_mt_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c index 9be8fbcd370b..9e19166ba453 100644 --- a/net/bridge/netfilter/ebt_redirect.c +++ b/net/bridge/netfilter/ebt_redirect.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/netfilter_bridge/ebt_redirect.h> | 16 | #include <linux/netfilter_bridge/ebt_redirect.h> |
17 | 17 | ||
18 | static unsigned int | 18 | static unsigned int |
19 | ebt_redirect_tg(struct sk_buff *skb, const struct xt_target_param *par) | 19 | ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par) |
20 | { | 20 | { |
21 | const struct ebt_redirect_info *info = par->targinfo; | 21 | const struct ebt_redirect_info *info = par->targinfo; |
22 | 22 | ||
@@ -32,23 +32,23 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
32 | return info->target; | 32 | return info->target; |
33 | } | 33 | } |
34 | 34 | ||
35 | static bool ebt_redirect_tg_check(const struct xt_tgchk_param *par) | 35 | static int ebt_redirect_tg_check(const struct xt_tgchk_param *par) |
36 | { | 36 | { |
37 | const struct ebt_redirect_info *info = par->targinfo; | 37 | const struct ebt_redirect_info *info = par->targinfo; |
38 | unsigned int hook_mask; | 38 | unsigned int hook_mask; |
39 | 39 | ||
40 | if (BASE_CHAIN && info->target == EBT_RETURN) | 40 | if (BASE_CHAIN && info->target == EBT_RETURN) |
41 | return false; | 41 | return -EINVAL; |
42 | 42 | ||
43 | hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS); | 43 | hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS); |
44 | if ((strcmp(par->table, "nat") != 0 || | 44 | if ((strcmp(par->table, "nat") != 0 || |
45 | hook_mask & ~(1 << NF_BR_PRE_ROUTING)) && | 45 | hook_mask & ~(1 << NF_BR_PRE_ROUTING)) && |
46 | (strcmp(par->table, "broute") != 0 || | 46 | (strcmp(par->table, "broute") != 0 || |
47 | hook_mask & ~(1 << NF_BR_BROUTING))) | 47 | hook_mask & ~(1 << NF_BR_BROUTING))) |
48 | return false; | 48 | return -EINVAL; |
49 | if (INVALID_TARGET) | 49 | if (INVALID_TARGET) |
50 | return false; | 50 | return -EINVAL; |
51 | return true; | 51 | return 0; |
52 | } | 52 | } |
53 | 53 | ||
54 | static struct xt_target ebt_redirect_tg_reg __read_mostly = { | 54 | static struct xt_target ebt_redirect_tg_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c index 9c7b520765a2..f8f0bd1a1d51 100644 --- a/net/bridge/netfilter/ebt_snat.c +++ b/net/bridge/netfilter/ebt_snat.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/netfilter_bridge/ebt_nat.h> | 17 | #include <linux/netfilter_bridge/ebt_nat.h> |
18 | 18 | ||
19 | static unsigned int | 19 | static unsigned int |
20 | ebt_snat_tg(struct sk_buff *skb, const struct xt_target_param *par) | 20 | ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par) |
21 | { | 21 | { |
22 | const struct ebt_nat_info *info = par->targinfo; | 22 | const struct ebt_nat_info *info = par->targinfo; |
23 | 23 | ||
@@ -42,21 +42,21 @@ out: | |||
42 | return info->target | ~EBT_VERDICT_BITS; | 42 | return info->target | ~EBT_VERDICT_BITS; |
43 | } | 43 | } |
44 | 44 | ||
45 | static bool ebt_snat_tg_check(const struct xt_tgchk_param *par) | 45 | static int ebt_snat_tg_check(const struct xt_tgchk_param *par) |
46 | { | 46 | { |
47 | const struct ebt_nat_info *info = par->targinfo; | 47 | const struct ebt_nat_info *info = par->targinfo; |
48 | int tmp; | 48 | int tmp; |
49 | 49 | ||
50 | tmp = info->target | ~EBT_VERDICT_BITS; | 50 | tmp = info->target | ~EBT_VERDICT_BITS; |
51 | if (BASE_CHAIN && tmp == EBT_RETURN) | 51 | if (BASE_CHAIN && tmp == EBT_RETURN) |
52 | return false; | 52 | return -EINVAL; |
53 | 53 | ||
54 | if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0) | 54 | if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0) |
55 | return false; | 55 | return -EINVAL; |
56 | tmp = info->target | EBT_VERDICT_BITS; | 56 | tmp = info->target | EBT_VERDICT_BITS; |
57 | if ((tmp & ~NAT_ARP_BIT) != ~NAT_ARP_BIT) | 57 | if ((tmp & ~NAT_ARP_BIT) != ~NAT_ARP_BIT) |
58 | return false; | 58 | return -EINVAL; |
59 | return true; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | static struct xt_target ebt_snat_tg_reg __read_mostly = { | 62 | static struct xt_target ebt_snat_tg_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c index 92a93d363765..5b33a2e634a6 100644 --- a/net/bridge/netfilter/ebt_stp.c +++ b/net/bridge/netfilter/ebt_stp.c | |||
@@ -120,7 +120,7 @@ static bool ebt_filter_config(const struct ebt_stp_info *info, | |||
120 | } | 120 | } |
121 | 121 | ||
122 | static bool | 122 | static bool |
123 | ebt_stp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 123 | ebt_stp_mt(const struct sk_buff *skb, struct xt_action_param *par) |
124 | { | 124 | { |
125 | const struct ebt_stp_info *info = par->matchinfo; | 125 | const struct ebt_stp_info *info = par->matchinfo; |
126 | const struct stp_header *sp; | 126 | const struct stp_header *sp; |
@@ -153,7 +153,7 @@ ebt_stp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
153 | return true; | 153 | return true; |
154 | } | 154 | } |
155 | 155 | ||
156 | static bool ebt_stp_mt_check(const struct xt_mtchk_param *par) | 156 | static int ebt_stp_mt_check(const struct xt_mtchk_param *par) |
157 | { | 157 | { |
158 | const struct ebt_stp_info *info = par->matchinfo; | 158 | const struct ebt_stp_info *info = par->matchinfo; |
159 | const uint8_t bridge_ula[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; | 159 | const uint8_t bridge_ula[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; |
@@ -162,13 +162,13 @@ static bool ebt_stp_mt_check(const struct xt_mtchk_param *par) | |||
162 | 162 | ||
163 | if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK || | 163 | if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK || |
164 | !(info->bitmask & EBT_STP_MASK)) | 164 | !(info->bitmask & EBT_STP_MASK)) |
165 | return false; | 165 | return -EINVAL; |
166 | /* Make sure the match only receives stp frames */ | 166 | /* Make sure the match only receives stp frames */ |
167 | if (compare_ether_addr(e->destmac, bridge_ula) || | 167 | if (compare_ether_addr(e->destmac, bridge_ula) || |
168 | compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) | 168 | compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) |
169 | return false; | 169 | return -EINVAL; |
170 | 170 | ||
171 | return true; | 171 | return 0; |
172 | } | 172 | } |
173 | 173 | ||
174 | static struct xt_match ebt_stp_mt_reg __read_mostly = { | 174 | static struct xt_match ebt_stp_mt_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index f9560f3dbdc7..ae3c7cef1484 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c | |||
@@ -27,7 +27,7 @@ | |||
27 | * flushed even if it is not full yet. | 27 | * flushed even if it is not full yet. |
28 | * | 28 | * |
29 | */ | 29 | */ |
30 | 30 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
@@ -44,9 +44,6 @@ | |||
44 | #include <net/sock.h> | 44 | #include <net/sock.h> |
45 | #include "../br_private.h" | 45 | #include "../br_private.h" |
46 | 46 | ||
47 | #define PRINTR(format, args...) do { if (net_ratelimit()) \ | ||
48 | printk(format , ## args); } while (0) | ||
49 | |||
50 | static unsigned int nlbufsiz = NLMSG_GOODSIZE; | 47 | static unsigned int nlbufsiz = NLMSG_GOODSIZE; |
51 | module_param(nlbufsiz, uint, 0600); | 48 | module_param(nlbufsiz, uint, 0600); |
52 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " | 49 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " |
@@ -107,15 +104,14 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size) | |||
107 | n = max(size, nlbufsiz); | 104 | n = max(size, nlbufsiz); |
108 | skb = alloc_skb(n, GFP_ATOMIC); | 105 | skb = alloc_skb(n, GFP_ATOMIC); |
109 | if (!skb) { | 106 | if (!skb) { |
110 | PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer " | 107 | pr_debug("cannot alloc whole buffer of size %ub!\n", n); |
111 | "of size %ub!\n", n); | ||
112 | if (n > size) { | 108 | if (n > size) { |
113 | /* try to allocate only as much as we need for | 109 | /* try to allocate only as much as we need for |
114 | * current packet */ | 110 | * current packet */ |
115 | skb = alloc_skb(size, GFP_ATOMIC); | 111 | skb = alloc_skb(size, GFP_ATOMIC); |
116 | if (!skb) | 112 | if (!skb) |
117 | PRINTR(KERN_ERR "ebt_ulog: can't even allocate " | 113 | pr_debug("cannot even allocate " |
118 | "buffer of size %ub\n", size); | 114 | "buffer of size %ub\n", size); |
119 | } | 115 | } |
120 | } | 116 | } |
121 | 117 | ||
@@ -142,8 +138,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb, | |||
142 | 138 | ||
143 | size = NLMSG_SPACE(sizeof(*pm) + copy_len); | 139 | size = NLMSG_SPACE(sizeof(*pm) + copy_len); |
144 | if (size > nlbufsiz) { | 140 | if (size > nlbufsiz) { |
145 | PRINTR("ebt_ulog: Size %Zd needed, but nlbufsiz=%d\n", | 141 | pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz); |
146 | size, nlbufsiz); | ||
147 | return; | 142 | return; |
148 | } | 143 | } |
149 | 144 | ||
@@ -217,8 +212,8 @@ unlock: | |||
217 | return; | 212 | return; |
218 | 213 | ||
219 | nlmsg_failure: | 214 | nlmsg_failure: |
220 | printk(KERN_CRIT "ebt_ulog: error during NLMSG_PUT. This should " | 215 | pr_debug("error during NLMSG_PUT. This should " |
221 | "not happen, please report to author.\n"); | 216 | "not happen, please report to author.\n"); |
222 | goto unlock; | 217 | goto unlock; |
223 | alloc_failure: | 218 | alloc_failure: |
224 | goto unlock; | 219 | goto unlock; |
@@ -248,26 +243,26 @@ static void ebt_log_packet(u_int8_t pf, unsigned int hooknum, | |||
248 | } | 243 | } |
249 | 244 | ||
250 | static unsigned int | 245 | static unsigned int |
251 | ebt_ulog_tg(struct sk_buff *skb, const struct xt_target_param *par) | 246 | ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par) |
252 | { | 247 | { |
253 | ebt_ulog_packet(par->hooknum, skb, par->in, par->out, | 248 | ebt_ulog_packet(par->hooknum, skb, par->in, par->out, |
254 | par->targinfo, NULL); | 249 | par->targinfo, NULL); |
255 | return EBT_CONTINUE; | 250 | return EBT_CONTINUE; |
256 | } | 251 | } |
257 | 252 | ||
258 | static bool ebt_ulog_tg_check(const struct xt_tgchk_param *par) | 253 | static int ebt_ulog_tg_check(const struct xt_tgchk_param *par) |
259 | { | 254 | { |
260 | struct ebt_ulog_info *uloginfo = par->targinfo; | 255 | struct ebt_ulog_info *uloginfo = par->targinfo; |
261 | 256 | ||
262 | if (uloginfo->nlgroup > 31) | 257 | if (uloginfo->nlgroup > 31) |
263 | return false; | 258 | return -EINVAL; |
264 | 259 | ||
265 | uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0'; | 260 | uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0'; |
266 | 261 | ||
267 | if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN) | 262 | if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN) |
268 | uloginfo->qthreshold = EBT_ULOG_MAX_QLEN; | 263 | uloginfo->qthreshold = EBT_ULOG_MAX_QLEN; |
269 | 264 | ||
270 | return true; | 265 | return 0; |
271 | } | 266 | } |
272 | 267 | ||
273 | static struct xt_target ebt_ulog_tg_reg __read_mostly = { | 268 | static struct xt_target ebt_ulog_tg_reg __read_mostly = { |
@@ -292,8 +287,8 @@ static int __init ebt_ulog_init(void) | |||
292 | int i; | 287 | int i; |
293 | 288 | ||
294 | if (nlbufsiz >= 128*1024) { | 289 | if (nlbufsiz >= 128*1024) { |
295 | printk(KERN_NOTICE "ebt_ulog: Netlink buffer has to be <= 128kB," | 290 | pr_warning("Netlink buffer has to be <= 128kB," |
296 | " please try a smaller nlbufsiz parameter.\n"); | 291 | " please try a smaller nlbufsiz parameter.\n"); |
297 | return -EINVAL; | 292 | return -EINVAL; |
298 | } | 293 | } |
299 | 294 | ||
@@ -306,13 +301,10 @@ static int __init ebt_ulog_init(void) | |||
306 | ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, | 301 | ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, |
307 | EBT_ULOG_MAXNLGROUPS, NULL, NULL, | 302 | EBT_ULOG_MAXNLGROUPS, NULL, NULL, |
308 | THIS_MODULE); | 303 | THIS_MODULE); |
309 | if (!ebtulognl) { | 304 | if (!ebtulognl) |
310 | printk(KERN_WARNING KBUILD_MODNAME ": out of memory trying to " | ||
311 | "call netlink_kernel_create\n"); | ||
312 | ret = -ENOMEM; | 305 | ret = -ENOMEM; |
313 | } else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0) { | 306 | else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0) |
314 | netlink_kernel_release(ebtulognl); | 307 | netlink_kernel_release(ebtulognl); |
315 | } | ||
316 | 308 | ||
317 | if (ret == 0) | 309 | if (ret == 0) |
318 | nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger); | 310 | nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger); |
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c index be1dd2e1f615..87b53b3a921d 100644 --- a/net/bridge/netfilter/ebt_vlan.c +++ b/net/bridge/netfilter/ebt_vlan.c | |||
@@ -26,22 +26,17 @@ | |||
26 | #include <linux/netfilter_bridge/ebtables.h> | 26 | #include <linux/netfilter_bridge/ebtables.h> |
27 | #include <linux/netfilter_bridge/ebt_vlan.h> | 27 | #include <linux/netfilter_bridge/ebt_vlan.h> |
28 | 28 | ||
29 | static int debug; | ||
30 | #define MODULE_VERS "0.6" | 29 | #define MODULE_VERS "0.6" |
31 | 30 | ||
32 | module_param(debug, int, 0); | ||
33 | MODULE_PARM_DESC(debug, "debug=1 is turn on debug messages"); | ||
34 | MODULE_AUTHOR("Nick Fedchik <nick@fedchik.org.ua>"); | 31 | MODULE_AUTHOR("Nick Fedchik <nick@fedchik.org.ua>"); |
35 | MODULE_DESCRIPTION("Ebtables: 802.1Q VLAN tag match"); | 32 | MODULE_DESCRIPTION("Ebtables: 802.1Q VLAN tag match"); |
36 | MODULE_LICENSE("GPL"); | 33 | MODULE_LICENSE("GPL"); |
37 | 34 | ||
38 | |||
39 | #define DEBUG_MSG(args...) if (debug) printk (KERN_DEBUG "ebt_vlan: " args) | ||
40 | #define GET_BITMASK(_BIT_MASK_) info->bitmask & _BIT_MASK_ | 35 | #define GET_BITMASK(_BIT_MASK_) info->bitmask & _BIT_MASK_ |
41 | #define EXIT_ON_MISMATCH(_MATCH_,_MASK_) {if (!((info->_MATCH_ == _MATCH_)^!!(info->invflags & _MASK_))) return false; } | 36 | #define EXIT_ON_MISMATCH(_MATCH_,_MASK_) {if (!((info->_MATCH_ == _MATCH_)^!!(info->invflags & _MASK_))) return false; } |
42 | 37 | ||
43 | static bool | 38 | static bool |
44 | ebt_vlan_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 39 | ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par) |
45 | { | 40 | { |
46 | const struct ebt_vlan_info *info = par->matchinfo; | 41 | const struct ebt_vlan_info *info = par->matchinfo; |
47 | const struct vlan_hdr *fp; | 42 | const struct vlan_hdr *fp; |
@@ -84,32 +79,31 @@ ebt_vlan_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
84 | return true; | 79 | return true; |
85 | } | 80 | } |
86 | 81 | ||
87 | static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par) | 82 | static int ebt_vlan_mt_check(const struct xt_mtchk_param *par) |
88 | { | 83 | { |
89 | struct ebt_vlan_info *info = par->matchinfo; | 84 | struct ebt_vlan_info *info = par->matchinfo; |
90 | const struct ebt_entry *e = par->entryinfo; | 85 | const struct ebt_entry *e = par->entryinfo; |
91 | 86 | ||
92 | /* Is it 802.1Q frame checked? */ | 87 | /* Is it 802.1Q frame checked? */ |
93 | if (e->ethproto != htons(ETH_P_8021Q)) { | 88 | if (e->ethproto != htons(ETH_P_8021Q)) { |
94 | DEBUG_MSG | 89 | pr_debug("passed entry proto %2.4X is not 802.1Q (8100)\n", |
95 | ("passed entry proto %2.4X is not 802.1Q (8100)\n", | 90 | ntohs(e->ethproto)); |
96 | (unsigned short) ntohs(e->ethproto)); | 91 | return -EINVAL; |
97 | return false; | ||
98 | } | 92 | } |
99 | 93 | ||
100 | /* Check for bitmask range | 94 | /* Check for bitmask range |
101 | * True if even one bit is out of mask */ | 95 | * True if even one bit is out of mask */ |
102 | if (info->bitmask & ~EBT_VLAN_MASK) { | 96 | if (info->bitmask & ~EBT_VLAN_MASK) { |
103 | DEBUG_MSG("bitmask %2X is out of mask (%2X)\n", | 97 | pr_debug("bitmask %2X is out of mask (%2X)\n", |
104 | info->bitmask, EBT_VLAN_MASK); | 98 | info->bitmask, EBT_VLAN_MASK); |
105 | return false; | 99 | return -EINVAL; |
106 | } | 100 | } |
107 | 101 | ||
108 | /* Check for inversion flags range */ | 102 | /* Check for inversion flags range */ |
109 | if (info->invflags & ~EBT_VLAN_MASK) { | 103 | if (info->invflags & ~EBT_VLAN_MASK) { |
110 | DEBUG_MSG("inversion flags %2X is out of mask (%2X)\n", | 104 | pr_debug("inversion flags %2X is out of mask (%2X)\n", |
111 | info->invflags, EBT_VLAN_MASK); | 105 | info->invflags, EBT_VLAN_MASK); |
112 | return false; | 106 | return -EINVAL; |
113 | } | 107 | } |
114 | 108 | ||
115 | /* Reserved VLAN ID (VID) values | 109 | /* Reserved VLAN ID (VID) values |
@@ -121,10 +115,9 @@ static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par) | |||
121 | if (GET_BITMASK(EBT_VLAN_ID)) { | 115 | if (GET_BITMASK(EBT_VLAN_ID)) { |
122 | if (!!info->id) { /* if id!=0 => check vid range */ | 116 | if (!!info->id) { /* if id!=0 => check vid range */ |
123 | if (info->id > VLAN_GROUP_ARRAY_LEN) { | 117 | if (info->id > VLAN_GROUP_ARRAY_LEN) { |
124 | DEBUG_MSG | 118 | pr_debug("id %d is out of range (1-4096)\n", |
125 | ("id %d is out of range (1-4096)\n", | 119 | info->id); |
126 | info->id); | 120 | return -EINVAL; |
127 | return false; | ||
128 | } | 121 | } |
129 | /* Note: This is valid VLAN-tagged frame point. | 122 | /* Note: This is valid VLAN-tagged frame point. |
130 | * Any value of user_priority are acceptable, | 123 | * Any value of user_priority are acceptable, |
@@ -137,9 +130,9 @@ static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par) | |||
137 | 130 | ||
138 | if (GET_BITMASK(EBT_VLAN_PRIO)) { | 131 | if (GET_BITMASK(EBT_VLAN_PRIO)) { |
139 | if ((unsigned char) info->prio > 7) { | 132 | if ((unsigned char) info->prio > 7) { |
140 | DEBUG_MSG("prio %d is out of range (0-7)\n", | 133 | pr_debug("prio %d is out of range (0-7)\n", |
141 | info->prio); | 134 | info->prio); |
142 | return false; | 135 | return -EINVAL; |
143 | } | 136 | } |
144 | } | 137 | } |
145 | /* Check for encapsulated proto range - it is possible to be | 138 | /* Check for encapsulated proto range - it is possible to be |
@@ -147,14 +140,13 @@ static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par) | |||
147 | * if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS */ | 140 | * if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS */ |
148 | if (GET_BITMASK(EBT_VLAN_ENCAP)) { | 141 | if (GET_BITMASK(EBT_VLAN_ENCAP)) { |
149 | if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) { | 142 | if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) { |
150 | DEBUG_MSG | 143 | pr_debug("encap frame length %d is less than " |
151 | ("encap frame length %d is less than minimal\n", | 144 | "minimal\n", ntohs(info->encap)); |
152 | ntohs(info->encap)); | 145 | return -EINVAL; |
153 | return false; | ||
154 | } | 146 | } |
155 | } | 147 | } |
156 | 148 | ||
157 | return true; | 149 | return 0; |
158 | } | 150 | } |
159 | 151 | ||
160 | static struct xt_match ebt_vlan_mt_reg __read_mostly = { | 152 | static struct xt_match ebt_vlan_mt_reg __read_mostly = { |
@@ -169,9 +161,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = { | |||
169 | 161 | ||
170 | static int __init ebt_vlan_init(void) | 162 | static int __init ebt_vlan_init(void) |
171 | { | 163 | { |
172 | DEBUG_MSG("ebtables 802.1Q extension module v" | 164 | pr_debug("ebtables 802.1Q extension module v" MODULE_VERS "\n"); |
173 | MODULE_VERS "\n"); | ||
174 | DEBUG_MSG("module debug=%d\n", !!debug); | ||
175 | return xt_register_match(&ebt_vlan_mt_reg); | 165 | return xt_register_match(&ebt_vlan_mt_reg); |
176 | } | 166 | } |
177 | 167 | ||
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index f0865fd1e3ec..59ca00e40dec 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -14,8 +14,7 @@ | |||
14 | * as published by the Free Software Foundation; either version | 14 | * as published by the Free Software Foundation; either version |
15 | * 2 of the License, or (at your option) any later version. | 15 | * 2 of the License, or (at your option) any later version. |
16 | */ | 16 | */ |
17 | 17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
18 | |||
19 | #include <linux/kmod.h> | 18 | #include <linux/kmod.h> |
20 | #include <linux/module.h> | 19 | #include <linux/module.h> |
21 | #include <linux/vmalloc.h> | 20 | #include <linux/vmalloc.h> |
@@ -87,7 +86,7 @@ static struct xt_target ebt_standard_target = { | |||
87 | 86 | ||
88 | static inline int | 87 | static inline int |
89 | ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb, | 88 | ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb, |
90 | struct xt_target_param *par) | 89 | struct xt_action_param *par) |
91 | { | 90 | { |
92 | par->target = w->u.watcher; | 91 | par->target = w->u.watcher; |
93 | par->targinfo = w->data; | 92 | par->targinfo = w->data; |
@@ -96,8 +95,9 @@ ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb, | |||
96 | return 0; | 95 | return 0; |
97 | } | 96 | } |
98 | 97 | ||
99 | static inline int ebt_do_match (struct ebt_entry_match *m, | 98 | static inline int |
100 | const struct sk_buff *skb, struct xt_match_param *par) | 99 | ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb, |
100 | struct xt_action_param *par) | ||
101 | { | 101 | { |
102 | par->match = m->u.match; | 102 | par->match = m->u.match; |
103 | par->matchinfo = m->data; | 103 | par->matchinfo = m->data; |
@@ -186,15 +186,13 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb, | |||
186 | struct ebt_entries *chaininfo; | 186 | struct ebt_entries *chaininfo; |
187 | const char *base; | 187 | const char *base; |
188 | const struct ebt_table_info *private; | 188 | const struct ebt_table_info *private; |
189 | bool hotdrop = false; | 189 | struct xt_action_param acpar; |
190 | struct xt_match_param mtpar; | ||
191 | struct xt_target_param tgpar; | ||
192 | 190 | ||
193 | mtpar.family = tgpar.family = NFPROTO_BRIDGE; | 191 | acpar.family = NFPROTO_BRIDGE; |
194 | mtpar.in = tgpar.in = in; | 192 | acpar.in = in; |
195 | mtpar.out = tgpar.out = out; | 193 | acpar.out = out; |
196 | mtpar.hotdrop = &hotdrop; | 194 | acpar.hotdrop = false; |
197 | mtpar.hooknum = tgpar.hooknum = hook; | 195 | acpar.hooknum = hook; |
198 | 196 | ||
199 | read_lock_bh(&table->lock); | 197 | read_lock_bh(&table->lock); |
200 | private = table->private; | 198 | private = table->private; |
@@ -215,9 +213,9 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb, | |||
215 | if (ebt_basic_match(point, eth_hdr(skb), in, out)) | 213 | if (ebt_basic_match(point, eth_hdr(skb), in, out)) |
216 | goto letscontinue; | 214 | goto letscontinue; |
217 | 215 | ||
218 | if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &mtpar) != 0) | 216 | if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0) |
219 | goto letscontinue; | 217 | goto letscontinue; |
220 | if (hotdrop) { | 218 | if (acpar.hotdrop) { |
221 | read_unlock_bh(&table->lock); | 219 | read_unlock_bh(&table->lock); |
222 | return NF_DROP; | 220 | return NF_DROP; |
223 | } | 221 | } |
@@ -228,7 +226,7 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb, | |||
228 | 226 | ||
229 | /* these should only watch: not modify, nor tell us | 227 | /* these should only watch: not modify, nor tell us |
230 | what to do with the packet */ | 228 | what to do with the packet */ |
231 | EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &tgpar); | 229 | EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar); |
232 | 230 | ||
233 | t = (struct ebt_entry_target *) | 231 | t = (struct ebt_entry_target *) |
234 | (((char *)point) + point->target_offset); | 232 | (((char *)point) + point->target_offset); |
@@ -236,9 +234,9 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb, | |||
236 | if (!t->u.target->target) | 234 | if (!t->u.target->target) |
237 | verdict = ((struct ebt_standard_target *)t)->verdict; | 235 | verdict = ((struct ebt_standard_target *)t)->verdict; |
238 | else { | 236 | else { |
239 | tgpar.target = t->u.target; | 237 | acpar.target = t->u.target; |
240 | tgpar.targinfo = t->data; | 238 | acpar.targinfo = t->data; |
241 | verdict = t->u.target->target(skb, &tgpar); | 239 | verdict = t->u.target->target(skb, &acpar); |
242 | } | 240 | } |
243 | if (verdict == EBT_ACCEPT) { | 241 | if (verdict == EBT_ACCEPT) { |
244 | read_unlock_bh(&table->lock); | 242 | read_unlock_bh(&table->lock); |
@@ -363,12 +361,9 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par, | |||
363 | left - sizeof(struct ebt_entry_match) < m->match_size) | 361 | left - sizeof(struct ebt_entry_match) < m->match_size) |
364 | return -EINVAL; | 362 | return -EINVAL; |
365 | 363 | ||
366 | match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE, | 364 | match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0); |
367 | m->u.name, 0), "ebt_%s", m->u.name); | ||
368 | if (IS_ERR(match)) | 365 | if (IS_ERR(match)) |
369 | return PTR_ERR(match); | 366 | return PTR_ERR(match); |
370 | if (match == NULL) | ||
371 | return -ENOENT; | ||
372 | m->u.match = match; | 367 | m->u.match = match; |
373 | 368 | ||
374 | par->match = match; | 369 | par->match = match; |
@@ -397,13 +392,9 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, | |||
397 | left - sizeof(struct ebt_entry_watcher) < w->watcher_size) | 392 | left - sizeof(struct ebt_entry_watcher) < w->watcher_size) |
398 | return -EINVAL; | 393 | return -EINVAL; |
399 | 394 | ||
400 | watcher = try_then_request_module( | 395 | watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); |
401 | xt_find_target(NFPROTO_BRIDGE, w->u.name, 0), | ||
402 | "ebt_%s", w->u.name); | ||
403 | if (IS_ERR(watcher)) | 396 | if (IS_ERR(watcher)) |
404 | return PTR_ERR(watcher); | 397 | return PTR_ERR(watcher); |
405 | if (watcher == NULL) | ||
406 | return -ENOENT; | ||
407 | w->u.watcher = watcher; | 398 | w->u.watcher = watcher; |
408 | 399 | ||
409 | par->target = watcher; | 400 | par->target = watcher; |
@@ -716,15 +707,10 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, | |||
716 | t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); | 707 | t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); |
717 | gap = e->next_offset - e->target_offset; | 708 | gap = e->next_offset - e->target_offset; |
718 | 709 | ||
719 | target = try_then_request_module( | 710 | target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0); |
720 | xt_find_target(NFPROTO_BRIDGE, t->u.name, 0), | ||
721 | "ebt_%s", t->u.name); | ||
722 | if (IS_ERR(target)) { | 711 | if (IS_ERR(target)) { |
723 | ret = PTR_ERR(target); | 712 | ret = PTR_ERR(target); |
724 | goto cleanup_watchers; | 713 | goto cleanup_watchers; |
725 | } else if (target == NULL) { | ||
726 | ret = -ENOENT; | ||
727 | goto cleanup_watchers; | ||
728 | } | 714 | } |
729 | 715 | ||
730 | t->u.target = target; | 716 | t->u.target = target; |
@@ -2128,7 +2114,7 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, | |||
2128 | return ret; | 2114 | return ret; |
2129 | new_offset += ret; | 2115 | new_offset += ret; |
2130 | if (offsets_update && new_offset) { | 2116 | if (offsets_update && new_offset) { |
2131 | pr_debug("ebtables: change offset %d to %d\n", | 2117 | pr_debug("change offset %d to %d\n", |
2132 | offsets_update[i], offsets[j] + new_offset); | 2118 | offsets_update[i], offsets[j] + new_offset); |
2133 | offsets_update[i] = offsets[j] + new_offset; | 2119 | offsets_update[i] = offsets[j] + new_offset; |
2134 | } | 2120 | } |
diff --git a/net/caif/Kconfig b/net/caif/Kconfig index cd1daf6008bd..ed651786f16b 100644 --- a/net/caif/Kconfig +++ b/net/caif/Kconfig | |||
@@ -2,10 +2,8 @@ | |||
2 | # CAIF net configurations | 2 | # CAIF net configurations |
3 | # | 3 | # |
4 | 4 | ||
5 | #menu "CAIF Support" | ||
6 | comment "CAIF Support" | ||
7 | menuconfig CAIF | 5 | menuconfig CAIF |
8 | tristate "Enable CAIF support" | 6 | tristate "CAIF support" |
9 | select CRC_CCITT | 7 | select CRC_CCITT |
10 | default n | 8 | default n |
11 | ---help--- | 9 | ---help--- |
@@ -45,4 +43,3 @@ config CAIF_NETDEV | |||
45 | If unsure say Y. | 43 | If unsure say Y. |
46 | 44 | ||
47 | endif | 45 | endif |
48 | #endmenu | ||
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 024fd5bb2d39..e2b86f1f5a47 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
@@ -112,7 +112,6 @@ static void caif_device_destroy(struct net_device *dev) | |||
112 | spin_unlock_bh(&caifdevs->lock); | 112 | spin_unlock_bh(&caifdevs->lock); |
113 | 113 | ||
114 | kfree(caifd); | 114 | kfree(caifd); |
115 | return; | ||
116 | } | 115 | } |
117 | 116 | ||
118 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) | 117 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index c3a70c5c893a..3d0e09584fae 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -60,7 +60,7 @@ struct debug_fs_counter { | |||
60 | atomic_t num_rx_flow_off; | 60 | atomic_t num_rx_flow_off; |
61 | atomic_t num_rx_flow_on; | 61 | atomic_t num_rx_flow_on; |
62 | }; | 62 | }; |
63 | struct debug_fs_counter cnt; | 63 | static struct debug_fs_counter cnt; |
64 | #define dbfs_atomic_inc(v) atomic_inc(v) | 64 | #define dbfs_atomic_inc(v) atomic_inc(v) |
65 | #define dbfs_atomic_dec(v) atomic_dec(v) | 65 | #define dbfs_atomic_dec(v) atomic_dec(v) |
66 | #else | 66 | #else |
@@ -128,17 +128,17 @@ static void caif_read_unlock(struct sock *sk) | |||
128 | mutex_unlock(&cf_sk->readlock); | 128 | mutex_unlock(&cf_sk->readlock); |
129 | } | 129 | } |
130 | 130 | ||
131 | int sk_rcvbuf_lowwater(struct caifsock *cf_sk) | 131 | static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) |
132 | { | 132 | { |
133 | /* A quarter of full buffer is used a low water mark */ | 133 | /* A quarter of full buffer is used a low water mark */ |
134 | return cf_sk->sk.sk_rcvbuf / 4; | 134 | return cf_sk->sk.sk_rcvbuf / 4; |
135 | } | 135 | } |
136 | 136 | ||
137 | void caif_flow_ctrl(struct sock *sk, int mode) | 137 | static void caif_flow_ctrl(struct sock *sk, int mode) |
138 | { | 138 | { |
139 | struct caifsock *cf_sk; | 139 | struct caifsock *cf_sk; |
140 | cf_sk = container_of(sk, struct caifsock, sk); | 140 | cf_sk = container_of(sk, struct caifsock, sk); |
141 | if (cf_sk->layer.dn) | 141 | if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) |
142 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); | 142 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); |
143 | } | 143 | } |
144 | 144 | ||
@@ -146,7 +146,7 @@ void caif_flow_ctrl(struct sock *sk, int mode) | |||
146 | * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are | 146 | * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are |
147 | * not dropped, but CAIF is sending flow off instead. | 147 | * not dropped, but CAIF is sending flow off instead. |
148 | */ | 148 | */ |
149 | int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 149 | static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
150 | { | 150 | { |
151 | int err; | 151 | int err; |
152 | int skb_len; | 152 | int skb_len; |
@@ -162,9 +162,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
162 | atomic_read(&cf_sk->sk.sk_rmem_alloc), | 162 | atomic_read(&cf_sk->sk.sk_rmem_alloc), |
163 | sk_rcvbuf_lowwater(cf_sk)); | 163 | sk_rcvbuf_lowwater(cf_sk)); |
164 | set_rx_flow_off(cf_sk); | 164 | set_rx_flow_off(cf_sk); |
165 | if (cf_sk->layer.dn) | 165 | dbfs_atomic_inc(&cnt.num_rx_flow_off); |
166 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | 166 | caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); |
167 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
168 | } | 167 | } |
169 | 168 | ||
170 | err = sk_filter(sk, skb); | 169 | err = sk_filter(sk, skb); |
@@ -175,9 +174,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
175 | trace_printk("CAIF: %s():" | 174 | trace_printk("CAIF: %s():" |
176 | " sending flow OFF due to rmem_schedule\n", | 175 | " sending flow OFF due to rmem_schedule\n", |
177 | __func__); | 176 | __func__); |
178 | if (cf_sk->layer.dn) | 177 | dbfs_atomic_inc(&cnt.num_rx_flow_off); |
179 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | 178 | caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); |
180 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
181 | } | 179 | } |
182 | skb->dev = NULL; | 180 | skb->dev = NULL; |
183 | skb_set_owner_r(skb, sk); | 181 | skb_set_owner_r(skb, sk); |
@@ -285,65 +283,51 @@ static void caif_check_flow_release(struct sock *sk) | |||
285 | { | 283 | { |
286 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | 284 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
287 | 285 | ||
288 | if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL) | ||
289 | return; | ||
290 | if (rx_flow_is_on(cf_sk)) | 286 | if (rx_flow_is_on(cf_sk)) |
291 | return; | 287 | return; |
292 | 288 | ||
293 | if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { | 289 | if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { |
294 | dbfs_atomic_inc(&cnt.num_rx_flow_on); | 290 | dbfs_atomic_inc(&cnt.num_rx_flow_on); |
295 | set_rx_flow_on(cf_sk); | 291 | set_rx_flow_on(cf_sk); |
296 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | 292 | caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); |
297 | CAIF_MODEMCMD_FLOW_ON_REQ); | ||
298 | } | 293 | } |
299 | } | 294 | } |
295 | |||
300 | /* | 296 | /* |
301 | * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer | 297 | * Copied from unix_dgram_recvmsg, but removed credit checks, |
302 | * has sufficient size. | 298 | * changed locking, address handling and added MSG_TRUNC. |
303 | */ | 299 | */ |
304 | |||
305 | static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, | 300 | static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, |
306 | struct msghdr *m, size_t buf_len, int flags) | 301 | struct msghdr *m, size_t len, int flags) |
307 | 302 | ||
308 | { | 303 | { |
309 | struct sock *sk = sock->sk; | 304 | struct sock *sk = sock->sk; |
310 | struct sk_buff *skb; | 305 | struct sk_buff *skb; |
311 | int ret = 0; | 306 | int ret; |
312 | int len; | 307 | int copylen; |
313 | 308 | ||
314 | if (unlikely(!buf_len)) | 309 | ret = -EOPNOTSUPP; |
315 | return -EINVAL; | 310 | if (m->msg_flags&MSG_OOB) |
311 | goto read_error; | ||
316 | 312 | ||
317 | skb = skb_recv_datagram(sk, flags, 0 , &ret); | 313 | skb = skb_recv_datagram(sk, flags, 0 , &ret); |
318 | if (!skb) | 314 | if (!skb) |
319 | goto read_error; | 315 | goto read_error; |
320 | 316 | copylen = skb->len; | |
321 | len = skb->len; | 317 | if (len < copylen) { |
322 | 318 | m->msg_flags |= MSG_TRUNC; | |
323 | if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) { | 319 | copylen = len; |
324 | len = buf_len; | ||
325 | /* | ||
326 | * Push skb back on receive queue if buffer too small. | ||
327 | * This has a built-in race where multi-threaded receive | ||
328 | * may get packet in wrong order, but multiple read does | ||
329 | * not really guarantee ordered delivery anyway. | ||
330 | * Let's optimize for speed without taking locks. | ||
331 | */ | ||
332 | |||
333 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
334 | ret = -EMSGSIZE; | ||
335 | goto read_error; | ||
336 | } | 320 | } |
337 | 321 | ||
338 | ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); | 322 | ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); |
339 | if (ret) | 323 | if (ret) |
340 | goto read_error; | 324 | goto out_free; |
341 | 325 | ||
326 | ret = (flags & MSG_TRUNC) ? skb->len : copylen; | ||
327 | out_free: | ||
342 | skb_free_datagram(sk, skb); | 328 | skb_free_datagram(sk, skb); |
343 | |||
344 | caif_check_flow_release(sk); | 329 | caif_check_flow_release(sk); |
345 | 330 | return ret; | |
346 | return len; | ||
347 | 331 | ||
348 | read_error: | 332 | read_error: |
349 | return ret; | 333 | return ret; |
@@ -920,17 +904,17 @@ wait_connect: | |||
920 | timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); | 904 | timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); |
921 | 905 | ||
922 | release_sock(sk); | 906 | release_sock(sk); |
923 | err = wait_event_interruptible_timeout(*sk_sleep(sk), | 907 | err = -ERESTARTSYS; |
908 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), | ||
924 | sk->sk_state != CAIF_CONNECTING, | 909 | sk->sk_state != CAIF_CONNECTING, |
925 | timeo); | 910 | timeo); |
926 | lock_sock(sk); | 911 | lock_sock(sk); |
927 | if (err < 0) | 912 | if (timeo < 0) |
928 | goto out; /* -ERESTARTSYS */ | 913 | goto out; /* -ERESTARTSYS */ |
929 | if (err == 0 && sk->sk_state != CAIF_CONNECTED) { | ||
930 | err = -ETIMEDOUT; | ||
931 | goto out; | ||
932 | } | ||
933 | 914 | ||
915 | err = -ETIMEDOUT; | ||
916 | if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) | ||
917 | goto out; | ||
934 | if (sk->sk_state != CAIF_CONNECTED) { | 918 | if (sk->sk_state != CAIF_CONNECTED) { |
935 | sock->state = SS_UNCONNECTED; | 919 | sock->state = SS_UNCONNECTED; |
936 | err = sock_error(sk); | 920 | err = sock_error(sk); |
@@ -945,7 +929,6 @@ out: | |||
945 | return err; | 929 | return err; |
946 | } | 930 | } |
947 | 931 | ||
948 | |||
949 | /* | 932 | /* |
950 | * caif_release() - Disconnect a CAIF Socket | 933 | * caif_release() - Disconnect a CAIF Socket |
951 | * Copied and modified af_irda.c:irda_release(). | 934 | * Copied and modified af_irda.c:irda_release(). |
@@ -1019,10 +1002,6 @@ static unsigned int caif_poll(struct file *file, | |||
1019 | (sk->sk_shutdown & RCV_SHUTDOWN)) | 1002 | (sk->sk_shutdown & RCV_SHUTDOWN)) |
1020 | mask |= POLLIN | POLLRDNORM; | 1003 | mask |= POLLIN | POLLRDNORM; |
1021 | 1004 | ||
1022 | /* Connection-based need to check for termination and startup */ | ||
1023 | if (sk->sk_state == CAIF_DISCONNECTED) | ||
1024 | mask |= POLLHUP; | ||
1025 | |||
1026 | /* | 1005 | /* |
1027 | * we set writable also when the other side has shut down the | 1006 | * we set writable also when the other side has shut down the |
1028 | * connection. This prevents stuck sockets. | 1007 | * connection. This prevents stuck sockets. |
@@ -1194,7 +1173,7 @@ static struct net_proto_family caif_family_ops = { | |||
1194 | .owner = THIS_MODULE, | 1173 | .owner = THIS_MODULE, |
1195 | }; | 1174 | }; |
1196 | 1175 | ||
1197 | int af_caif_init(void) | 1176 | static int af_caif_init(void) |
1198 | { | 1177 | { |
1199 | int err = sock_register(&caif_family_ops); | 1178 | int err = sock_register(&caif_family_ops); |
1200 | if (!err) | 1179 | if (!err) |
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 471c62939fad..df43f264d9fb 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c | |||
@@ -65,12 +65,11 @@ struct cfcnfg *cfcnfg_create(void) | |||
65 | struct cfcnfg *this; | 65 | struct cfcnfg *this; |
66 | struct cfctrl_rsp *resp; | 66 | struct cfctrl_rsp *resp; |
67 | /* Initiate this layer */ | 67 | /* Initiate this layer */ |
68 | this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC); | 68 | this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); |
69 | if (!this) { | 69 | if (!this) { |
70 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | 70 | pr_warning("CAIF: %s(): Out of memory\n", __func__); |
71 | return NULL; | 71 | return NULL; |
72 | } | 72 | } |
73 | memset(this, 0, sizeof(struct cfcnfg)); | ||
74 | this->mux = cfmuxl_create(); | 73 | this->mux = cfmuxl_create(); |
75 | if (!this->mux) | 74 | if (!this->mux) |
76 | goto out_of_mem; | 75 | goto out_of_mem; |
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index a521d32cfe56..fcfda98a5e6d 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c | |||
@@ -44,13 +44,14 @@ struct cflayer *cfctrl_create(void) | |||
44 | dev_info.id = 0xff; | 44 | dev_info.id = 0xff; |
45 | memset(this, 0, sizeof(*this)); | 45 | memset(this, 0, sizeof(*this)); |
46 | cfsrvl_init(&this->serv, 0, &dev_info); | 46 | cfsrvl_init(&this->serv, 0, &dev_info); |
47 | spin_lock_init(&this->info_list_lock); | ||
48 | atomic_set(&this->req_seq_no, 1); | 47 | atomic_set(&this->req_seq_no, 1); |
49 | atomic_set(&this->rsp_seq_no, 1); | 48 | atomic_set(&this->rsp_seq_no, 1); |
50 | this->serv.layer.receive = cfctrl_recv; | 49 | this->serv.layer.receive = cfctrl_recv; |
51 | sprintf(this->serv.layer.name, "ctrl"); | 50 | sprintf(this->serv.layer.name, "ctrl"); |
52 | this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; | 51 | this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; |
53 | spin_lock_init(&this->loop_linkid_lock); | 52 | spin_lock_init(&this->loop_linkid_lock); |
53 | spin_lock_init(&this->info_list_lock); | ||
54 | INIT_LIST_HEAD(&this->list); | ||
54 | this->loop_linkid = 1; | 55 | this->loop_linkid = 1; |
55 | return &this->serv.layer; | 56 | return &this->serv.layer; |
56 | } | 57 | } |
@@ -112,20 +113,10 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1, | |||
112 | void cfctrl_insert_req(struct cfctrl *ctrl, | 113 | void cfctrl_insert_req(struct cfctrl *ctrl, |
113 | struct cfctrl_request_info *req) | 114 | struct cfctrl_request_info *req) |
114 | { | 115 | { |
115 | struct cfctrl_request_info *p; | ||
116 | spin_lock(&ctrl->info_list_lock); | 116 | spin_lock(&ctrl->info_list_lock); |
117 | req->next = NULL; | ||
118 | atomic_inc(&ctrl->req_seq_no); | 117 | atomic_inc(&ctrl->req_seq_no); |
119 | req->sequence_no = atomic_read(&ctrl->req_seq_no); | 118 | req->sequence_no = atomic_read(&ctrl->req_seq_no); |
120 | if (ctrl->first_req == NULL) { | 119 | list_add_tail(&req->list, &ctrl->list); |
121 | ctrl->first_req = req; | ||
122 | spin_unlock(&ctrl->info_list_lock); | ||
123 | return; | ||
124 | } | ||
125 | p = ctrl->first_req; | ||
126 | while (p->next != NULL) | ||
127 | p = p->next; | ||
128 | p->next = req; | ||
129 | spin_unlock(&ctrl->info_list_lock); | 120 | spin_unlock(&ctrl->info_list_lock); |
130 | } | 121 | } |
131 | 122 | ||
@@ -133,46 +124,28 @@ void cfctrl_insert_req(struct cfctrl *ctrl, | |||
133 | struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, | 124 | struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, |
134 | struct cfctrl_request_info *req) | 125 | struct cfctrl_request_info *req) |
135 | { | 126 | { |
136 | struct cfctrl_request_info *p; | 127 | struct cfctrl_request_info *p, *tmp, *first; |
137 | struct cfctrl_request_info *ret; | ||
138 | 128 | ||
139 | spin_lock(&ctrl->info_list_lock); | 129 | spin_lock(&ctrl->info_list_lock); |
140 | if (ctrl->first_req == NULL) { | 130 | first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list); |
141 | spin_unlock(&ctrl->info_list_lock); | ||
142 | return NULL; | ||
143 | } | ||
144 | |||
145 | if (cfctrl_req_eq(req, ctrl->first_req)) { | ||
146 | ret = ctrl->first_req; | ||
147 | caif_assert(ctrl->first_req); | ||
148 | atomic_set(&ctrl->rsp_seq_no, | ||
149 | ctrl->first_req->sequence_no); | ||
150 | ctrl->first_req = ctrl->first_req->next; | ||
151 | spin_unlock(&ctrl->info_list_lock); | ||
152 | return ret; | ||
153 | } | ||
154 | 131 | ||
155 | p = ctrl->first_req; | 132 | list_for_each_entry_safe(p, tmp, &ctrl->list, list) { |
156 | 133 | if (cfctrl_req_eq(req, p)) { | |
157 | while (p->next != NULL) { | 134 | if (p != first) |
158 | if (cfctrl_req_eq(req, p->next)) { | 135 | pr_warning("CAIF: %s(): Requests are not " |
159 | pr_warning("CAIF: %s(): Requests are not " | ||
160 | "received in order\n", | 136 | "received in order\n", |
161 | __func__); | 137 | __func__); |
162 | ret = p->next; | 138 | |
163 | atomic_set(&ctrl->rsp_seq_no, | 139 | atomic_set(&ctrl->rsp_seq_no, |
164 | p->next->sequence_no); | 140 | p->sequence_no); |
165 | p->next = p->next->next; | 141 | list_del(&p->list); |
166 | spin_unlock(&ctrl->info_list_lock); | 142 | goto out; |
167 | return ret; | ||
168 | } | 143 | } |
169 | p = p->next; | ||
170 | } | 144 | } |
145 | p = NULL; | ||
146 | out: | ||
171 | spin_unlock(&ctrl->info_list_lock); | 147 | spin_unlock(&ctrl->info_list_lock); |
172 | 148 | return p; | |
173 | pr_warning("CAIF: %s(): Request does not match\n", | ||
174 | __func__); | ||
175 | return NULL; | ||
176 | } | 149 | } |
177 | 150 | ||
178 | struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) | 151 | struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) |
@@ -284,12 +257,11 @@ int cfctrl_linkup_request(struct cflayer *layer, | |||
284 | __func__, param->linktype); | 257 | __func__, param->linktype); |
285 | return -EINVAL; | 258 | return -EINVAL; |
286 | } | 259 | } |
287 | req = kmalloc(sizeof(*req), GFP_KERNEL); | 260 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
288 | if (!req) { | 261 | if (!req) { |
289 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | 262 | pr_warning("CAIF: %s(): Out of memory\n", __func__); |
290 | return -ENOMEM; | 263 | return -ENOMEM; |
291 | } | 264 | } |
292 | memset(req, 0, sizeof(*req)); | ||
293 | req->client_layer = user_layer; | 265 | req->client_layer = user_layer; |
294 | req->cmd = CFCTRL_CMD_LINK_SETUP; | 266 | req->cmd = CFCTRL_CMD_LINK_SETUP; |
295 | req->param = *param; | 267 | req->param = *param; |
@@ -389,31 +361,18 @@ void cfctrl_getstartreason_req(struct cflayer *layer) | |||
389 | 361 | ||
390 | void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) | 362 | void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) |
391 | { | 363 | { |
392 | struct cfctrl_request_info *p, *req; | 364 | struct cfctrl_request_info *p, *tmp; |
393 | struct cfctrl *ctrl = container_obj(layr); | 365 | struct cfctrl *ctrl = container_obj(layr); |
394 | spin_lock(&ctrl->info_list_lock); | 366 | spin_lock(&ctrl->info_list_lock); |
395 | 367 | pr_warning("CAIF: %s(): enter\n", __func__); | |
396 | if (ctrl->first_req == NULL) { | 368 | |
397 | spin_unlock(&ctrl->info_list_lock); | 369 | list_for_each_entry_safe(p, tmp, &ctrl->list, list) { |
398 | return; | 370 | if (p->client_layer == adap_layer) { |
399 | } | 371 | pr_warning("CAIF: %s(): cancel req :%d\n", __func__, |
400 | 372 | p->sequence_no); | |
401 | if (ctrl->first_req->client_layer == adap_layer) { | 373 | list_del(&p->list); |
402 | 374 | kfree(p); | |
403 | req = ctrl->first_req; | ||
404 | ctrl->first_req = ctrl->first_req->next; | ||
405 | kfree(req); | ||
406 | } | ||
407 | |||
408 | p = ctrl->first_req; | ||
409 | while (p != NULL && p->next != NULL) { | ||
410 | if (p->next->client_layer == adap_layer) { | ||
411 | |||
412 | req = p->next; | ||
413 | p->next = p->next->next; | ||
414 | kfree(p->next); | ||
415 | } | 375 | } |
416 | p = p->next; | ||
417 | } | 376 | } |
418 | 377 | ||
419 | spin_unlock(&ctrl->info_list_lock); | 378 | spin_unlock(&ctrl->info_list_lock); |
@@ -635,7 +594,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |||
635 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: | 594 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: |
636 | case CAIF_CTRLCMD_FLOW_OFF_IND: | 595 | case CAIF_CTRLCMD_FLOW_OFF_IND: |
637 | spin_lock(&this->info_list_lock); | 596 | spin_lock(&this->info_list_lock); |
638 | if (this->first_req != NULL) { | 597 | if (!list_empty(&this->list)) { |
639 | pr_debug("CAIF: %s(): Received flow off in " | 598 | pr_debug("CAIF: %s(): Received flow off in " |
640 | "control layer", __func__); | 599 | "control layer", __func__); |
641 | } | 600 | } |
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c index 7372f27f1d32..80c8d332b258 100644 --- a/net/caif/cfmuxl.c +++ b/net/caif/cfmuxl.c | |||
@@ -174,10 +174,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) | |||
174 | spin_lock(&muxl->receive_lock); | 174 | spin_lock(&muxl->receive_lock); |
175 | up = get_up(muxl, id); | 175 | up = get_up(muxl, id); |
176 | if (up == NULL) | 176 | if (up == NULL) |
177 | return NULL; | 177 | goto out; |
178 | memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); | 178 | memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); |
179 | list_del(&up->node); | 179 | list_del(&up->node); |
180 | cfsrvl_put(up); | 180 | cfsrvl_put(up); |
181 | out: | ||
181 | spin_unlock(&muxl->receive_lock); | 182 | spin_unlock(&muxl->receive_lock); |
182 | return up; | 183 | return up; |
183 | } | 184 | } |
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 83fff2ff6658..a6fdf899741a 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c | |||
@@ -238,6 +238,7 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) | |||
238 | struct sk_buff *lastskb; | 238 | struct sk_buff *lastskb; |
239 | u8 *to; | 239 | u8 *to; |
240 | const u8 *data = data2; | 240 | const u8 *data = data2; |
241 | int ret; | ||
241 | if (unlikely(is_erronous(pkt))) | 242 | if (unlikely(is_erronous(pkt))) |
242 | return -EPROTO; | 243 | return -EPROTO; |
243 | if (unlikely(skb_headroom(skb) < len)) { | 244 | if (unlikely(skb_headroom(skb) < len)) { |
@@ -246,9 +247,10 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) | |||
246 | } | 247 | } |
247 | 248 | ||
248 | /* Make sure data is writable */ | 249 | /* Make sure data is writable */ |
249 | if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { | 250 | ret = skb_cow_data(skb, 0, &lastskb); |
251 | if (unlikely(ret < 0)) { | ||
250 | PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); | 252 | PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); |
251 | return -EPROTO; | 253 | return ret; |
252 | } | 254 | } |
253 | 255 | ||
254 | to = skb_push(skb, len); | 256 | to = skb_push(skb, len); |
@@ -316,6 +318,8 @@ EXPORT_SYMBOL(cfpkt_setlen); | |||
316 | struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) | 318 | struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) |
317 | { | 319 | { |
318 | struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); | 320 | struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); |
321 | if (!pkt) | ||
322 | return NULL; | ||
319 | if (unlikely(data != NULL)) | 323 | if (unlikely(data != NULL)) |
320 | cfpkt_add_body(pkt, data, len); | 324 | cfpkt_add_body(pkt, data, len); |
321 | return pkt; | 325 | return pkt; |
@@ -344,12 +348,13 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, | |||
344 | 348 | ||
345 | if (dst->tail + neededtailspace > dst->end) { | 349 | if (dst->tail + neededtailspace > dst->end) { |
346 | /* Create a dumplicate of 'dst' with more tail space */ | 350 | /* Create a dumplicate of 'dst' with more tail space */ |
351 | struct cfpkt *tmppkt; | ||
347 | dstlen = skb_headlen(dst); | 352 | dstlen = skb_headlen(dst); |
348 | createlen = dstlen + neededtailspace; | 353 | createlen = dstlen + neededtailspace; |
349 | tmp = pkt_to_skb( | 354 | tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX); |
350 | cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX)); | 355 | if (tmppkt == NULL) |
351 | if (!tmp) | ||
352 | return NULL; | 356 | return NULL; |
357 | tmp = pkt_to_skb(tmppkt); | ||
353 | skb_set_tail_pointer(tmp, dstlen); | 358 | skb_set_tail_pointer(tmp, dstlen); |
354 | tmp->len = dstlen; | 359 | tmp->len = dstlen; |
355 | memcpy(tmp->data, dst->data, dstlen); | 360 | memcpy(tmp->data, dst->data, dstlen); |
@@ -368,6 +373,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) | |||
368 | { | 373 | { |
369 | struct sk_buff *skb2; | 374 | struct sk_buff *skb2; |
370 | struct sk_buff *skb = pkt_to_skb(pkt); | 375 | struct sk_buff *skb = pkt_to_skb(pkt); |
376 | struct cfpkt *tmppkt; | ||
371 | u8 *split = skb->data + pos; | 377 | u8 *split = skb->data + pos; |
372 | u16 len2nd = skb_tail_pointer(skb) - split; | 378 | u16 len2nd = skb_tail_pointer(skb) - split; |
373 | 379 | ||
@@ -381,9 +387,12 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) | |||
381 | } | 387 | } |
382 | 388 | ||
383 | /* Create a new packet for the second part of the data */ | 389 | /* Create a new packet for the second part of the data */ |
384 | skb2 = pkt_to_skb( | 390 | tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, |
385 | cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, | 391 | PKT_PREFIX); |
386 | PKT_PREFIX)); | 392 | if (tmppkt == NULL) |
393 | return NULL; | ||
394 | skb2 = pkt_to_skb(tmppkt); | ||
395 | |||
387 | 396 | ||
388 | if (skb2 == NULL) | 397 | if (skb2 == NULL) |
389 | return NULL; | 398 | return NULL; |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index cd2830fec935..fd27b172fb5d 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c | |||
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
83 | if (!cfsrvl_ready(service, &ret)) | 83 | if (!cfsrvl_ready(service, &ret)) |
84 | return ret; | 84 | return ret; |
85 | 85 | ||
86 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 86 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
87 | pr_err("CAIF: %s():Packet too large - size=%d\n", | 87 | pr_err("CAIF: %s():Packet too large - size=%d\n", |
88 | __func__, cfpkt_getlen(pkt)); | 88 | __func__, cfpkt_getlen(pkt)); |
89 | return -EOVERFLOW; | 89 | return -EOVERFLOW; |
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index 06029ea2da2f..965c5baace40 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c | |||
@@ -59,14 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) | |||
59 | u8 stx = CFSERL_STX; | 59 | u8 stx = CFSERL_STX; |
60 | int ret; | 60 | int ret; |
61 | u16 expectlen = 0; | 61 | u16 expectlen = 0; |
62 | |||
62 | caif_assert(newpkt != NULL); | 63 | caif_assert(newpkt != NULL); |
63 | spin_lock(&layr->sync); | 64 | spin_lock(&layr->sync); |
64 | 65 | ||
65 | if (layr->incomplete_frm != NULL) { | 66 | if (layr->incomplete_frm != NULL) { |
66 | |||
67 | layr->incomplete_frm = | 67 | layr->incomplete_frm = |
68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); | 68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); |
69 | pkt = layr->incomplete_frm; | 69 | pkt = layr->incomplete_frm; |
70 | if (pkt == NULL) { | ||
71 | spin_unlock(&layr->sync); | ||
72 | return -ENOMEM; | ||
73 | } | ||
70 | } else { | 74 | } else { |
71 | pkt = newpkt; | 75 | pkt = newpkt; |
72 | } | 76 | } |
@@ -154,7 +158,6 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) | |||
154 | if (layr->usestx) { | 158 | if (layr->usestx) { |
155 | if (tail_pkt != NULL) | 159 | if (tail_pkt != NULL) |
156 | pkt = cfpkt_append(pkt, tail_pkt, 0); | 160 | pkt = cfpkt_append(pkt, tail_pkt, 0); |
157 | |||
158 | /* Start search for next STX if frame failed */ | 161 | /* Start search for next STX if frame failed */ |
159 | continue; | 162 | continue; |
160 | } else { | 163 | } else { |
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index aff31f34528f..6e5b7079a684 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c | |||
@@ -123,6 +123,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | |||
123 | struct caif_payload_info *info; | 123 | struct caif_payload_info *info; |
124 | u8 flow_off = SRVL_FLOW_OFF; | 124 | u8 flow_off = SRVL_FLOW_OFF; |
125 | pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); | 125 | pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); |
126 | if (!pkt) { | ||
127 | pr_warning("CAIF: %s(): Out of memory\n", | ||
128 | __func__); | ||
129 | return -ENOMEM; | ||
130 | } | ||
131 | |||
126 | if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { | 132 | if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { |
127 | pr_err("CAIF: %s(): Packet is erroneous!\n", | 133 | pr_err("CAIF: %s(): Packet is erroneous!\n", |
128 | __func__); | 134 | __func__); |
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c index 0fd827f49491..e04f7d964e83 100644 --- a/net/caif/cfveil.c +++ b/net/caif/cfveil.c | |||
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
84 | return ret; | 84 | return ret; |
85 | caif_assert(layr->dn != NULL); | 85 | caif_assert(layr->dn != NULL); |
86 | caif_assert(layr->dn->transmit != NULL); | 86 | caif_assert(layr->dn->transmit != NULL); |
87 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 87 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", | 88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", |
89 | __func__, cfpkt_getlen(pkt)); | 89 | __func__, cfpkt_getlen(pkt)); |
90 | return -EOVERFLOW; | 90 | return -EOVERFLOW; |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 907dc871fac8..9c65e9deb9c3 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -713,8 +713,6 @@ static void bcm_remove_op(struct bcm_op *op) | |||
713 | kfree(op->last_frames); | 713 | kfree(op->last_frames); |
714 | 714 | ||
715 | kfree(op); | 715 | kfree(op); |
716 | |||
717 | return; | ||
718 | } | 716 | } |
719 | 717 | ||
720 | static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) | 718 | static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) |
diff --git a/net/core/datagram.c b/net/core/datagram.c index e0097531417a..f5b6f43a4c2e 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram); | |||
229 | 229 | ||
230 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) | 230 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) |
231 | { | 231 | { |
232 | bool slow; | ||
233 | |||
232 | if (likely(atomic_read(&skb->users) == 1)) | 234 | if (likely(atomic_read(&skb->users) == 1)) |
233 | smp_rmb(); | 235 | smp_rmb(); |
234 | else if (likely(!atomic_dec_and_test(&skb->users))) | 236 | else if (likely(!atomic_dec_and_test(&skb->users))) |
235 | return; | 237 | return; |
236 | 238 | ||
237 | lock_sock_bh(sk); | 239 | slow = lock_sock_fast(sk); |
238 | skb_orphan(skb); | 240 | skb_orphan(skb); |
239 | sk_mem_reclaim_partial(sk); | 241 | sk_mem_reclaim_partial(sk); |
240 | unlock_sock_bh(sk); | 242 | unlock_sock_fast(sk, slow); |
241 | 243 | ||
242 | /* skb is now orphaned, can be freed outside of locked section */ | 244 | /* skb is now orphaned, can be freed outside of locked section */ |
243 | __kfree_skb(skb); | 245 | __kfree_skb(skb); |
diff --git a/net/core/dev.c b/net/core/dev.c index 32611c8f1219..2b3bf53bc687 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -954,18 +954,22 @@ int dev_alloc_name(struct net_device *dev, const char *name) | |||
954 | } | 954 | } |
955 | EXPORT_SYMBOL(dev_alloc_name); | 955 | EXPORT_SYMBOL(dev_alloc_name); |
956 | 956 | ||
957 | static int dev_get_valid_name(struct net *net, const char *name, char *buf, | 957 | static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) |
958 | bool fmt) | ||
959 | { | 958 | { |
959 | struct net *net; | ||
960 | |||
961 | BUG_ON(!dev_net(dev)); | ||
962 | net = dev_net(dev); | ||
963 | |||
960 | if (!dev_valid_name(name)) | 964 | if (!dev_valid_name(name)) |
961 | return -EINVAL; | 965 | return -EINVAL; |
962 | 966 | ||
963 | if (fmt && strchr(name, '%')) | 967 | if (fmt && strchr(name, '%')) |
964 | return __dev_alloc_name(net, name, buf); | 968 | return dev_alloc_name(dev, name); |
965 | else if (__dev_get_by_name(net, name)) | 969 | else if (__dev_get_by_name(net, name)) |
966 | return -EEXIST; | 970 | return -EEXIST; |
967 | else if (buf != name) | 971 | else if (dev->name != name) |
968 | strlcpy(buf, name, IFNAMSIZ); | 972 | strlcpy(dev->name, name, IFNAMSIZ); |
969 | 973 | ||
970 | return 0; | 974 | return 0; |
971 | } | 975 | } |
@@ -997,20 +1001,15 @@ int dev_change_name(struct net_device *dev, const char *newname) | |||
997 | 1001 | ||
998 | memcpy(oldname, dev->name, IFNAMSIZ); | 1002 | memcpy(oldname, dev->name, IFNAMSIZ); |
999 | 1003 | ||
1000 | err = dev_get_valid_name(net, newname, dev->name, 1); | 1004 | err = dev_get_valid_name(dev, newname, 1); |
1001 | if (err < 0) | 1005 | if (err < 0) |
1002 | return err; | 1006 | return err; |
1003 | 1007 | ||
1004 | rollback: | 1008 | rollback: |
1005 | /* For now only devices in the initial network namespace | 1009 | ret = device_rename(&dev->dev, dev->name); |
1006 | * are in sysfs. | 1010 | if (ret) { |
1007 | */ | 1011 | memcpy(dev->name, oldname, IFNAMSIZ); |
1008 | if (net_eq(net, &init_net)) { | 1012 | return ret; |
1009 | ret = device_rename(&dev->dev, dev->name); | ||
1010 | if (ret) { | ||
1011 | memcpy(dev->name, oldname, IFNAMSIZ); | ||
1012 | return ret; | ||
1013 | } | ||
1014 | } | 1013 | } |
1015 | 1014 | ||
1016 | write_lock_bh(&dev_base_lock); | 1015 | write_lock_bh(&dev_base_lock); |
@@ -1454,7 +1453,7 @@ void net_disable_timestamp(void) | |||
1454 | } | 1453 | } |
1455 | EXPORT_SYMBOL(net_disable_timestamp); | 1454 | EXPORT_SYMBOL(net_disable_timestamp); |
1456 | 1455 | ||
1457 | static inline void net_timestamp(struct sk_buff *skb) | 1456 | static inline void net_timestamp_set(struct sk_buff *skb) |
1458 | { | 1457 | { |
1459 | if (atomic_read(&netstamp_needed)) | 1458 | if (atomic_read(&netstamp_needed)) |
1460 | __net_timestamp(skb); | 1459 | __net_timestamp(skb); |
@@ -1462,6 +1461,12 @@ static inline void net_timestamp(struct sk_buff *skb) | |||
1462 | skb->tstamp.tv64 = 0; | 1461 | skb->tstamp.tv64 = 0; |
1463 | } | 1462 | } |
1464 | 1463 | ||
1464 | static inline void net_timestamp_check(struct sk_buff *skb) | ||
1465 | { | ||
1466 | if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed)) | ||
1467 | __net_timestamp(skb); | ||
1468 | } | ||
1469 | |||
1465 | /** | 1470 | /** |
1466 | * dev_forward_skb - loopback an skb to another netif | 1471 | * dev_forward_skb - loopback an skb to another netif |
1467 | * | 1472 | * |
@@ -1470,7 +1475,7 @@ static inline void net_timestamp(struct sk_buff *skb) | |||
1470 | * | 1475 | * |
1471 | * return values: | 1476 | * return values: |
1472 | * NET_RX_SUCCESS (no congestion) | 1477 | * NET_RX_SUCCESS (no congestion) |
1473 | * NET_RX_DROP (packet was dropped) | 1478 | * NET_RX_DROP (packet was dropped, but freed) |
1474 | * | 1479 | * |
1475 | * dev_forward_skb can be used for injecting an skb from the | 1480 | * dev_forward_skb can be used for injecting an skb from the |
1476 | * start_xmit function of one device into the receive queue | 1481 | * start_xmit function of one device into the receive queue |
@@ -1484,12 +1489,11 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1484 | { | 1489 | { |
1485 | skb_orphan(skb); | 1490 | skb_orphan(skb); |
1486 | 1491 | ||
1487 | if (!(dev->flags & IFF_UP)) | 1492 | if (!(dev->flags & IFF_UP) || |
1488 | return NET_RX_DROP; | 1493 | (skb->len > (dev->mtu + dev->hard_header_len))) { |
1489 | 1494 | kfree_skb(skb); | |
1490 | if (skb->len > (dev->mtu + dev->hard_header_len)) | ||
1491 | return NET_RX_DROP; | 1495 | return NET_RX_DROP; |
1492 | 1496 | } | |
1493 | skb_set_dev(skb, dev); | 1497 | skb_set_dev(skb, dev); |
1494 | skb->tstamp.tv64 = 0; | 1498 | skb->tstamp.tv64 = 0; |
1495 | skb->pkt_type = PACKET_HOST; | 1499 | skb->pkt_type = PACKET_HOST; |
@@ -1509,9 +1513,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1509 | 1513 | ||
1510 | #ifdef CONFIG_NET_CLS_ACT | 1514 | #ifdef CONFIG_NET_CLS_ACT |
1511 | if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS))) | 1515 | if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS))) |
1512 | net_timestamp(skb); | 1516 | net_timestamp_set(skb); |
1513 | #else | 1517 | #else |
1514 | net_timestamp(skb); | 1518 | net_timestamp_set(skb); |
1515 | #endif | 1519 | #endif |
1516 | 1520 | ||
1517 | rcu_read_lock(); | 1521 | rcu_read_lock(); |
@@ -2047,6 +2051,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2047 | * waiting to be sent out; and the qdisc is not running - | 2051 | * waiting to be sent out; and the qdisc is not running - |
2048 | * xmit the skb directly. | 2052 | * xmit the skb directly. |
2049 | */ | 2053 | */ |
2054 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) | ||
2055 | skb_dst_force(skb); | ||
2050 | __qdisc_update_bstats(q, skb->len); | 2056 | __qdisc_update_bstats(q, skb->len); |
2051 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) | 2057 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) |
2052 | __qdisc_run(q); | 2058 | __qdisc_run(q); |
@@ -2055,6 +2061,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2055 | 2061 | ||
2056 | rc = NET_XMIT_SUCCESS; | 2062 | rc = NET_XMIT_SUCCESS; |
2057 | } else { | 2063 | } else { |
2064 | skb_dst_force(skb); | ||
2058 | rc = qdisc_enqueue_root(skb, q); | 2065 | rc = qdisc_enqueue_root(skb, q); |
2059 | qdisc_run(q); | 2066 | qdisc_run(q); |
2060 | } | 2067 | } |
@@ -2202,6 +2209,7 @@ EXPORT_SYMBOL(dev_queue_xmit); | |||
2202 | =======================================================================*/ | 2209 | =======================================================================*/ |
2203 | 2210 | ||
2204 | int netdev_max_backlog __read_mostly = 1000; | 2211 | int netdev_max_backlog __read_mostly = 1000; |
2212 | int netdev_tstamp_prequeue __read_mostly = 1; | ||
2205 | int netdev_budget __read_mostly = 300; | 2213 | int netdev_budget __read_mostly = 300; |
2206 | int weight_p __read_mostly = 64; /* old backlog weight */ | 2214 | int weight_p __read_mostly = 64; /* old backlog weight */ |
2207 | 2215 | ||
@@ -2245,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
2245 | if (skb_rx_queue_recorded(skb)) { | 2253 | if (skb_rx_queue_recorded(skb)) { |
2246 | u16 index = skb_get_rx_queue(skb); | 2254 | u16 index = skb_get_rx_queue(skb); |
2247 | if (unlikely(index >= dev->num_rx_queues)) { | 2255 | if (unlikely(index >= dev->num_rx_queues)) { |
2248 | if (net_ratelimit()) { | 2256 | WARN_ONCE(dev->num_rx_queues > 1, "%s received packet " |
2249 | pr_warning("%s received packet on queue " | 2257 | "on queue %u, but number of RX queues is %u\n", |
2250 | "%u, but number of RX queues is %u\n", | 2258 | dev->name, index, dev->num_rx_queues); |
2251 | dev->name, index, dev->num_rx_queues); | ||
2252 | } | ||
2253 | goto done; | 2259 | goto done; |
2254 | } | 2260 | } |
2255 | rxqueue = dev->_rx + index; | 2261 | rxqueue = dev->_rx + index; |
@@ -2417,17 +2423,16 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, | |||
2417 | if (skb_queue_len(&sd->input_pkt_queue)) { | 2423 | if (skb_queue_len(&sd->input_pkt_queue)) { |
2418 | enqueue: | 2424 | enqueue: |
2419 | __skb_queue_tail(&sd->input_pkt_queue, skb); | 2425 | __skb_queue_tail(&sd->input_pkt_queue, skb); |
2420 | #ifdef CONFIG_RPS | 2426 | input_queue_tail_incr_save(sd, qtail); |
2421 | *qtail = sd->input_queue_head + | ||
2422 | skb_queue_len(&sd->input_pkt_queue); | ||
2423 | #endif | ||
2424 | rps_unlock(sd); | 2427 | rps_unlock(sd); |
2425 | local_irq_restore(flags); | 2428 | local_irq_restore(flags); |
2426 | return NET_RX_SUCCESS; | 2429 | return NET_RX_SUCCESS; |
2427 | } | 2430 | } |
2428 | 2431 | ||
2429 | /* Schedule NAPI for backlog device */ | 2432 | /* Schedule NAPI for backlog device |
2430 | if (napi_schedule_prep(&sd->backlog)) { | 2433 | * We can use non atomic operation since we own the queue lock |
2434 | */ | ||
2435 | if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { | ||
2431 | if (!rps_ipi_queued(sd)) | 2436 | if (!rps_ipi_queued(sd)) |
2432 | ____napi_schedule(sd, &sd->backlog); | 2437 | ____napi_schedule(sd, &sd->backlog); |
2433 | } | 2438 | } |
@@ -2466,8 +2471,8 @@ int netif_rx(struct sk_buff *skb) | |||
2466 | if (netpoll_rx(skb)) | 2471 | if (netpoll_rx(skb)) |
2467 | return NET_RX_DROP; | 2472 | return NET_RX_DROP; |
2468 | 2473 | ||
2469 | if (!skb->tstamp.tv64) | 2474 | if (netdev_tstamp_prequeue) |
2470 | net_timestamp(skb); | 2475 | net_timestamp_check(skb); |
2471 | 2476 | ||
2472 | #ifdef CONFIG_RPS | 2477 | #ifdef CONFIG_RPS |
2473 | { | 2478 | { |
@@ -2613,7 +2618,8 @@ static inline struct sk_buff *handle_bridge(struct sk_buff *skb, | |||
2613 | #endif | 2618 | #endif |
2614 | 2619 | ||
2615 | #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE) | 2620 | #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE) |
2616 | struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly; | 2621 | struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p, |
2622 | struct sk_buff *skb) __read_mostly; | ||
2617 | EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook); | 2623 | EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook); |
2618 | 2624 | ||
2619 | static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, | 2625 | static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, |
@@ -2621,14 +2627,17 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, | |||
2621 | int *ret, | 2627 | int *ret, |
2622 | struct net_device *orig_dev) | 2628 | struct net_device *orig_dev) |
2623 | { | 2629 | { |
2624 | if (skb->dev->macvlan_port == NULL) | 2630 | struct macvlan_port *port; |
2631 | |||
2632 | port = rcu_dereference(skb->dev->macvlan_port); | ||
2633 | if (!port) | ||
2625 | return skb; | 2634 | return skb; |
2626 | 2635 | ||
2627 | if (*pt_prev) { | 2636 | if (*pt_prev) { |
2628 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | 2637 | *ret = deliver_skb(skb, *pt_prev, orig_dev); |
2629 | *pt_prev = NULL; | 2638 | *pt_prev = NULL; |
2630 | } | 2639 | } |
2631 | return macvlan_handle_frame_hook(skb); | 2640 | return macvlan_handle_frame_hook(port, skb); |
2632 | } | 2641 | } |
2633 | #else | 2642 | #else |
2634 | #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb) | 2643 | #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb) |
@@ -2784,12 +2793,12 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2784 | struct net_device *orig_dev; | 2793 | struct net_device *orig_dev; |
2785 | struct net_device *master; | 2794 | struct net_device *master; |
2786 | struct net_device *null_or_orig; | 2795 | struct net_device *null_or_orig; |
2787 | struct net_device *null_or_bond; | 2796 | struct net_device *orig_or_bond; |
2788 | int ret = NET_RX_DROP; | 2797 | int ret = NET_RX_DROP; |
2789 | __be16 type; | 2798 | __be16 type; |
2790 | 2799 | ||
2791 | if (!skb->tstamp.tv64) | 2800 | if (!netdev_tstamp_prequeue) |
2792 | net_timestamp(skb); | 2801 | net_timestamp_check(skb); |
2793 | 2802 | ||
2794 | if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) | 2803 | if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) |
2795 | return NET_RX_SUCCESS; | 2804 | return NET_RX_SUCCESS; |
@@ -2801,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2801 | if (!skb->skb_iif) | 2810 | if (!skb->skb_iif) |
2802 | skb->skb_iif = skb->dev->ifindex; | 2811 | skb->skb_iif = skb->dev->ifindex; |
2803 | 2812 | ||
2813 | /* | ||
2814 | * bonding note: skbs received on inactive slaves should only | ||
2815 | * be delivered to pkt handlers that are exact matches. Also | ||
2816 | * the deliver_no_wcard flag will be set. If packet handlers | ||
2817 | * are sensitive to duplicate packets these skbs will need to | ||
2818 | * be dropped at the handler. The vlan accel path may have | ||
2819 | * already set the deliver_no_wcard flag. | ||
2820 | */ | ||
2804 | null_or_orig = NULL; | 2821 | null_or_orig = NULL; |
2805 | orig_dev = skb->dev; | 2822 | orig_dev = skb->dev; |
2806 | master = ACCESS_ONCE(orig_dev->master); | 2823 | master = ACCESS_ONCE(orig_dev->master); |
2807 | if (master) { | 2824 | if (skb->deliver_no_wcard) |
2808 | if (skb_bond_should_drop(skb, master)) | 2825 | null_or_orig = orig_dev; |
2826 | else if (master) { | ||
2827 | if (skb_bond_should_drop(skb, master)) { | ||
2828 | skb->deliver_no_wcard = 1; | ||
2809 | null_or_orig = orig_dev; /* deliver only exact match */ | 2829 | null_or_orig = orig_dev; /* deliver only exact match */ |
2810 | else | 2830 | } else |
2811 | skb->dev = master; | 2831 | skb->dev = master; |
2812 | } | 2832 | } |
2813 | 2833 | ||
@@ -2857,10 +2877,10 @@ ncls: | |||
2857 | * device that may have registered for a specific ptype. The | 2877 | * device that may have registered for a specific ptype. The |
2858 | * handler may have to adjust skb->dev and orig_dev. | 2878 | * handler may have to adjust skb->dev and orig_dev. |
2859 | */ | 2879 | */ |
2860 | null_or_bond = NULL; | 2880 | orig_or_bond = orig_dev; |
2861 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && | 2881 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && |
2862 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { | 2882 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { |
2863 | null_or_bond = vlan_dev_real_dev(skb->dev); | 2883 | orig_or_bond = vlan_dev_real_dev(skb->dev); |
2864 | } | 2884 | } |
2865 | 2885 | ||
2866 | type = skb->protocol; | 2886 | type = skb->protocol; |
@@ -2868,7 +2888,7 @@ ncls: | |||
2868 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2888 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
2869 | if (ptype->type == type && (ptype->dev == null_or_orig || | 2889 | if (ptype->type == type && (ptype->dev == null_or_orig || |
2870 | ptype->dev == skb->dev || ptype->dev == orig_dev || | 2890 | ptype->dev == skb->dev || ptype->dev == orig_dev || |
2871 | ptype->dev == null_or_bond)) { | 2891 | ptype->dev == orig_or_bond)) { |
2872 | if (pt_prev) | 2892 | if (pt_prev) |
2873 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2893 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2874 | pt_prev = ptype; | 2894 | pt_prev = ptype; |
@@ -2907,23 +2927,28 @@ out: | |||
2907 | */ | 2927 | */ |
2908 | int netif_receive_skb(struct sk_buff *skb) | 2928 | int netif_receive_skb(struct sk_buff *skb) |
2909 | { | 2929 | { |
2930 | if (netdev_tstamp_prequeue) | ||
2931 | net_timestamp_check(skb); | ||
2932 | |||
2910 | #ifdef CONFIG_RPS | 2933 | #ifdef CONFIG_RPS |
2911 | struct rps_dev_flow voidflow, *rflow = &voidflow; | 2934 | { |
2912 | int cpu, ret; | 2935 | struct rps_dev_flow voidflow, *rflow = &voidflow; |
2936 | int cpu, ret; | ||
2913 | 2937 | ||
2914 | rcu_read_lock(); | 2938 | rcu_read_lock(); |
2915 | 2939 | ||
2916 | cpu = get_rps_cpu(skb->dev, skb, &rflow); | 2940 | cpu = get_rps_cpu(skb->dev, skb, &rflow); |
2917 | 2941 | ||
2918 | if (cpu >= 0) { | 2942 | if (cpu >= 0) { |
2919 | ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); | 2943 | ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); |
2920 | rcu_read_unlock(); | 2944 | rcu_read_unlock(); |
2921 | } else { | 2945 | } else { |
2922 | rcu_read_unlock(); | 2946 | rcu_read_unlock(); |
2923 | ret = __netif_receive_skb(skb); | 2947 | ret = __netif_receive_skb(skb); |
2924 | } | 2948 | } |
2925 | 2949 | ||
2926 | return ret; | 2950 | return ret; |
2951 | } | ||
2927 | #else | 2952 | #else |
2928 | return __netif_receive_skb(skb); | 2953 | return __netif_receive_skb(skb); |
2929 | #endif | 2954 | #endif |
@@ -2944,7 +2969,7 @@ static void flush_backlog(void *arg) | |||
2944 | if (skb->dev == dev) { | 2969 | if (skb->dev == dev) { |
2945 | __skb_unlink(skb, &sd->input_pkt_queue); | 2970 | __skb_unlink(skb, &sd->input_pkt_queue); |
2946 | kfree_skb(skb); | 2971 | kfree_skb(skb); |
2947 | input_queue_head_add(sd, 1); | 2972 | input_queue_head_incr(sd); |
2948 | } | 2973 | } |
2949 | } | 2974 | } |
2950 | rps_unlock(sd); | 2975 | rps_unlock(sd); |
@@ -2953,6 +2978,7 @@ static void flush_backlog(void *arg) | |||
2953 | if (skb->dev == dev) { | 2978 | if (skb->dev == dev) { |
2954 | __skb_unlink(skb, &sd->process_queue); | 2979 | __skb_unlink(skb, &sd->process_queue); |
2955 | kfree_skb(skb); | 2980 | kfree_skb(skb); |
2981 | input_queue_head_incr(sd); | ||
2956 | } | 2982 | } |
2957 | } | 2983 | } |
2958 | } | 2984 | } |
@@ -3308,18 +3334,20 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
3308 | while ((skb = __skb_dequeue(&sd->process_queue))) { | 3334 | while ((skb = __skb_dequeue(&sd->process_queue))) { |
3309 | local_irq_enable(); | 3335 | local_irq_enable(); |
3310 | __netif_receive_skb(skb); | 3336 | __netif_receive_skb(skb); |
3311 | if (++work >= quota) | ||
3312 | return work; | ||
3313 | local_irq_disable(); | 3337 | local_irq_disable(); |
3338 | input_queue_head_incr(sd); | ||
3339 | if (++work >= quota) { | ||
3340 | local_irq_enable(); | ||
3341 | return work; | ||
3342 | } | ||
3314 | } | 3343 | } |
3315 | 3344 | ||
3316 | rps_lock(sd); | 3345 | rps_lock(sd); |
3317 | qlen = skb_queue_len(&sd->input_pkt_queue); | 3346 | qlen = skb_queue_len(&sd->input_pkt_queue); |
3318 | if (qlen) { | 3347 | if (qlen) |
3319 | input_queue_head_add(sd, qlen); | ||
3320 | skb_queue_splice_tail_init(&sd->input_pkt_queue, | 3348 | skb_queue_splice_tail_init(&sd->input_pkt_queue, |
3321 | &sd->process_queue); | 3349 | &sd->process_queue); |
3322 | } | 3350 | |
3323 | if (qlen < quota - work) { | 3351 | if (qlen < quota - work) { |
3324 | /* | 3352 | /* |
3325 | * Inline a custom version of __napi_complete(). | 3353 | * Inline a custom version of __napi_complete(). |
@@ -4945,7 +4973,7 @@ int register_netdevice(struct net_device *dev) | |||
4945 | } | 4973 | } |
4946 | } | 4974 | } |
4947 | 4975 | ||
4948 | ret = dev_get_valid_name(net, dev->name, dev->name, 0); | 4976 | ret = dev_get_valid_name(dev, dev->name, 0); |
4949 | if (ret) | 4977 | if (ret) |
4950 | goto err_uninit; | 4978 | goto err_uninit; |
4951 | 4979 | ||
@@ -4974,8 +5002,6 @@ int register_netdevice(struct net_device *dev) | |||
4974 | if (dev->features & NETIF_F_SG) | 5002 | if (dev->features & NETIF_F_SG) |
4975 | dev->features |= NETIF_F_GSO; | 5003 | dev->features |= NETIF_F_GSO; |
4976 | 5004 | ||
4977 | netdev_initialize_kobject(dev); | ||
4978 | |||
4979 | ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); | 5005 | ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); |
4980 | ret = notifier_to_errno(ret); | 5006 | ret = notifier_to_errno(ret); |
4981 | if (ret) | 5007 | if (ret) |
@@ -5527,15 +5553,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5527 | if (dev->features & NETIF_F_NETNS_LOCAL) | 5553 | if (dev->features & NETIF_F_NETNS_LOCAL) |
5528 | goto out; | 5554 | goto out; |
5529 | 5555 | ||
5530 | #ifdef CONFIG_SYSFS | ||
5531 | /* Don't allow real devices to be moved when sysfs | ||
5532 | * is enabled. | ||
5533 | */ | ||
5534 | err = -EINVAL; | ||
5535 | if (dev->dev.parent) | ||
5536 | goto out; | ||
5537 | #endif | ||
5538 | |||
5539 | /* Ensure the device has been registrered */ | 5556 | /* Ensure the device has been registrered */ |
5540 | err = -EINVAL; | 5557 | err = -EINVAL; |
5541 | if (dev->reg_state != NETREG_REGISTERED) | 5558 | if (dev->reg_state != NETREG_REGISTERED) |
@@ -5554,7 +5571,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5554 | /* We get here if we can't use the current device name */ | 5571 | /* We get here if we can't use the current device name */ |
5555 | if (!pat) | 5572 | if (!pat) |
5556 | goto out; | 5573 | goto out; |
5557 | if (dev_get_valid_name(net, pat, dev->name, 1)) | 5574 | if (dev_get_valid_name(dev, pat, 1)) |
5558 | goto out; | 5575 | goto out; |
5559 | } | 5576 | } |
5560 | 5577 | ||
@@ -5586,8 +5603,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5586 | dev_uc_flush(dev); | 5603 | dev_uc_flush(dev); |
5587 | dev_mc_flush(dev); | 5604 | dev_mc_flush(dev); |
5588 | 5605 | ||
5589 | netdev_unregister_kobject(dev); | ||
5590 | |||
5591 | /* Actually switch the network namespace */ | 5606 | /* Actually switch the network namespace */ |
5592 | dev_net_set(dev, net); | 5607 | dev_net_set(dev, net); |
5593 | 5608 | ||
@@ -5600,7 +5615,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5600 | } | 5615 | } |
5601 | 5616 | ||
5602 | /* Fixup kobjects */ | 5617 | /* Fixup kobjects */ |
5603 | err = netdev_register_kobject(dev); | 5618 | err = device_rename(&dev->dev, dev->name); |
5604 | WARN_ON(err); | 5619 | WARN_ON(err); |
5605 | 5620 | ||
5606 | /* Add the device back in the hashes */ | 5621 | /* Add the device back in the hashes */ |
@@ -5659,12 +5674,14 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
5659 | local_irq_enable(); | 5674 | local_irq_enable(); |
5660 | 5675 | ||
5661 | /* Process offline CPU's input_pkt_queue */ | 5676 | /* Process offline CPU's input_pkt_queue */ |
5662 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { | 5677 | while ((skb = __skb_dequeue(&oldsd->process_queue))) { |
5663 | netif_rx(skb); | 5678 | netif_rx(skb); |
5664 | input_queue_head_add(oldsd, 1); | 5679 | input_queue_head_incr(oldsd); |
5665 | } | 5680 | } |
5666 | while ((skb = __skb_dequeue(&oldsd->process_queue))) | 5681 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { |
5667 | netif_rx(skb); | 5682 | netif_rx(skb); |
5683 | input_queue_head_incr(oldsd); | ||
5684 | } | ||
5668 | 5685 | ||
5669 | return NOTIFY_OK; | 5686 | return NOTIFY_OK; |
5670 | } | 5687 | } |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index cf208d8042b1..ad41529fb60f 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -172,12 +172,12 @@ out: | |||
172 | return; | 172 | return; |
173 | } | 173 | } |
174 | 174 | ||
175 | static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) | 175 | static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) |
176 | { | 176 | { |
177 | trace_drop_common(skb, location); | 177 | trace_drop_common(skb, location); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void trace_napi_poll_hit(struct napi_struct *napi) | 180 | static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi) |
181 | { | 181 | { |
182 | struct dm_hw_stat_delta *new_stat; | 182 | struct dm_hw_stat_delta *new_stat; |
183 | 183 | ||
@@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state) | |||
225 | 225 | ||
226 | switch (state) { | 226 | switch (state) { |
227 | case TRACE_ON: | 227 | case TRACE_ON: |
228 | rc |= register_trace_kfree_skb(trace_kfree_skb_hit); | 228 | rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); |
229 | rc |= register_trace_napi_poll(trace_napi_poll_hit); | 229 | rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL); |
230 | break; | 230 | break; |
231 | case TRACE_OFF: | 231 | case TRACE_OFF: |
232 | rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); | 232 | rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); |
233 | rc |= unregister_trace_napi_poll(trace_napi_poll_hit); | 233 | rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL); |
234 | 234 | ||
235 | tracepoint_synchronize_unregister(); | 235 | tracepoint_synchronize_unregister(); |
236 | 236 | ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 1a7db92037fa..a0f4964033d2 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -522,7 +522,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | |||
522 | p += ETH_GSTRING_LEN; | 522 | p += ETH_GSTRING_LEN; |
523 | num_strings++; | 523 | num_strings++; |
524 | goto unknown_filter; | 524 | goto unknown_filter; |
525 | }; | 525 | } |
526 | 526 | ||
527 | /* now the rest of the filters */ | 527 | /* now the rest of the filters */ |
528 | switch (fsc->fs.flow_type) { | 528 | switch (fsc->fs.flow_type) { |
@@ -646,7 +646,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | |||
646 | p += ETH_GSTRING_LEN; | 646 | p += ETH_GSTRING_LEN; |
647 | num_strings++; | 647 | num_strings++; |
648 | break; | 648 | break; |
649 | }; | 649 | } |
650 | sprintf(p, "\tVLAN: %d, mask: 0x%x\n", | 650 | sprintf(p, "\tVLAN: %d, mask: 0x%x\n", |
651 | fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); | 651 | fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); |
652 | p += ETH_GSTRING_LEN; | 652 | p += ETH_GSTRING_LEN; |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cf8e70392fe0..785e5276a300 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock); | |||
107 | 107 | ||
108 | /* Protects against soft lockup during large deletion */ | 108 | /* Protects against soft lockup during large deletion */ |
109 | static struct rb_root est_root = RB_ROOT; | 109 | static struct rb_root est_root = RB_ROOT; |
110 | static DEFINE_SPINLOCK(est_tree_lock); | ||
110 | 111 | ||
111 | static void est_timer(unsigned long arg) | 112 | static void est_timer(unsigned long arg) |
112 | { | 113 | { |
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats | |||
201 | * | 202 | * |
202 | * Returns 0 on success or a negative error code. | 203 | * Returns 0 on success or a negative error code. |
203 | * | 204 | * |
204 | * NOTE: Called under rtnl_mutex | ||
205 | */ | 205 | */ |
206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | 206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, |
207 | struct gnet_stats_rate_est *rate_est, | 207 | struct gnet_stats_rate_est *rate_est, |
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
232 | est->last_packets = bstats->packets; | 232 | est->last_packets = bstats->packets; |
233 | est->avpps = rate_est->pps<<10; | 233 | est->avpps = rate_est->pps<<10; |
234 | 234 | ||
235 | spin_lock(&est_tree_lock); | ||
235 | if (!elist[idx].timer.function) { | 236 | if (!elist[idx].timer.function) { |
236 | INIT_LIST_HEAD(&elist[idx].list); | 237 | INIT_LIST_HEAD(&elist[idx].list); |
237 | setup_timer(&elist[idx].timer, est_timer, idx); | 238 | setup_timer(&elist[idx].timer, est_timer, idx); |
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
242 | 243 | ||
243 | list_add_rcu(&est->list, &elist[idx].list); | 244 | list_add_rcu(&est->list, &elist[idx].list); |
244 | gen_add_node(est); | 245 | gen_add_node(est); |
246 | spin_unlock(&est_tree_lock); | ||
245 | 247 | ||
246 | return 0; | 248 | return 0; |
247 | } | 249 | } |
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head) | |||
261 | * | 263 | * |
262 | * Removes the rate estimator specified by &bstats and &rate_est. | 264 | * Removes the rate estimator specified by &bstats and &rate_est. |
263 | * | 265 | * |
264 | * NOTE: Called under rtnl_mutex | ||
265 | */ | 266 | */ |
266 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | 267 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, |
267 | struct gnet_stats_rate_est *rate_est) | 268 | struct gnet_stats_rate_est *rate_est) |
268 | { | 269 | { |
269 | struct gen_estimator *e; | 270 | struct gen_estimator *e; |
270 | 271 | ||
272 | spin_lock(&est_tree_lock); | ||
271 | while ((e = gen_find_node(bstats, rate_est))) { | 273 | while ((e = gen_find_node(bstats, rate_est))) { |
272 | rb_erase(&e->node, &est_root); | 274 | rb_erase(&e->node, &est_root); |
273 | 275 | ||
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
278 | list_del_rcu(&e->list); | 280 | list_del_rcu(&e->list); |
279 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 281 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
280 | } | 282 | } |
283 | spin_unlock(&est_tree_lock); | ||
281 | } | 284 | } |
282 | EXPORT_SYMBOL(gen_kill_estimator); | 285 | EXPORT_SYMBOL(gen_kill_estimator); |
283 | 286 | ||
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator); | |||
312 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, | 315 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |
313 | const struct gnet_stats_rate_est *rate_est) | 316 | const struct gnet_stats_rate_est *rate_est) |
314 | { | 317 | { |
318 | bool res; | ||
319 | |||
315 | ASSERT_RTNL(); | 320 | ASSERT_RTNL(); |
316 | 321 | ||
317 | return gen_find_node(bstats, rate_est) != NULL; | 322 | spin_lock(&est_tree_lock); |
323 | res = gen_find_node(bstats, rate_est) != NULL; | ||
324 | spin_unlock(&est_tree_lock); | ||
325 | |||
326 | return res; | ||
318 | } | 327 | } |
319 | EXPORT_SYMBOL(gen_estimator_active); | 328 | EXPORT_SYMBOL(gen_estimator_active); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index bff37908bd55..6ba1c0eece03 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) | |||
934 | kfree_skb(buff); | 934 | kfree_skb(buff); |
935 | NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); | 935 | NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); |
936 | } | 936 | } |
937 | skb_dst_force(skb); | ||
937 | __skb_queue_tail(&neigh->arp_queue, skb); | 938 | __skb_queue_tail(&neigh->arp_queue, skb); |
938 | } | 939 | } |
939 | rc = 1; | 940 | rc = 1; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index c57c4b228bb5..99e7052d7323 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -14,7 +14,9 @@ | |||
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/if_arp.h> | 15 | #include <linux/if_arp.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/nsproxy.h> | ||
17 | #include <net/sock.h> | 18 | #include <net/sock.h> |
19 | #include <net/net_namespace.h> | ||
18 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
19 | #include <linux/wireless.h> | 21 | #include <linux/wireless.h> |
20 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
@@ -467,6 +469,7 @@ static struct attribute_group wireless_group = { | |||
467 | .attrs = wireless_attrs, | 469 | .attrs = wireless_attrs, |
468 | }; | 470 | }; |
469 | #endif | 471 | #endif |
472 | #endif /* CONFIG_SYSFS */ | ||
470 | 473 | ||
471 | #ifdef CONFIG_RPS | 474 | #ifdef CONFIG_RPS |
472 | /* | 475 | /* |
@@ -766,7 +769,38 @@ static void rx_queue_remove_kobjects(struct net_device *net) | |||
766 | kset_unregister(net->queues_kset); | 769 | kset_unregister(net->queues_kset); |
767 | } | 770 | } |
768 | #endif /* CONFIG_RPS */ | 771 | #endif /* CONFIG_RPS */ |
769 | #endif /* CONFIG_SYSFS */ | 772 | |
773 | static const void *net_current_ns(void) | ||
774 | { | ||
775 | return current->nsproxy->net_ns; | ||
776 | } | ||
777 | |||
778 | static const void *net_initial_ns(void) | ||
779 | { | ||
780 | return &init_net; | ||
781 | } | ||
782 | |||
783 | static const void *net_netlink_ns(struct sock *sk) | ||
784 | { | ||
785 | return sock_net(sk); | ||
786 | } | ||
787 | |||
788 | static struct kobj_ns_type_operations net_ns_type_operations = { | ||
789 | .type = KOBJ_NS_TYPE_NET, | ||
790 | .current_ns = net_current_ns, | ||
791 | .netlink_ns = net_netlink_ns, | ||
792 | .initial_ns = net_initial_ns, | ||
793 | }; | ||
794 | |||
795 | static void net_kobj_ns_exit(struct net *net) | ||
796 | { | ||
797 | kobj_ns_exit(KOBJ_NS_TYPE_NET, net); | ||
798 | } | ||
799 | |||
800 | static struct pernet_operations kobj_net_ops = { | ||
801 | .exit = net_kobj_ns_exit, | ||
802 | }; | ||
803 | |||
770 | 804 | ||
771 | #ifdef CONFIG_HOTPLUG | 805 | #ifdef CONFIG_HOTPLUG |
772 | static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) | 806 | static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) |
@@ -774,9 +808,6 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) | |||
774 | struct net_device *dev = to_net_dev(d); | 808 | struct net_device *dev = to_net_dev(d); |
775 | int retval; | 809 | int retval; |
776 | 810 | ||
777 | if (!net_eq(dev_net(dev), &init_net)) | ||
778 | return 0; | ||
779 | |||
780 | /* pass interface to uevent. */ | 811 | /* pass interface to uevent. */ |
781 | retval = add_uevent_var(env, "INTERFACE=%s", dev->name); | 812 | retval = add_uevent_var(env, "INTERFACE=%s", dev->name); |
782 | if (retval) | 813 | if (retval) |
@@ -806,6 +837,13 @@ static void netdev_release(struct device *d) | |||
806 | kfree((char *)dev - dev->padded); | 837 | kfree((char *)dev - dev->padded); |
807 | } | 838 | } |
808 | 839 | ||
840 | static const void *net_namespace(struct device *d) | ||
841 | { | ||
842 | struct net_device *dev; | ||
843 | dev = container_of(d, struct net_device, dev); | ||
844 | return dev_net(dev); | ||
845 | } | ||
846 | |||
809 | static struct class net_class = { | 847 | static struct class net_class = { |
810 | .name = "net", | 848 | .name = "net", |
811 | .dev_release = netdev_release, | 849 | .dev_release = netdev_release, |
@@ -815,6 +853,8 @@ static struct class net_class = { | |||
815 | #ifdef CONFIG_HOTPLUG | 853 | #ifdef CONFIG_HOTPLUG |
816 | .dev_uevent = netdev_uevent, | 854 | .dev_uevent = netdev_uevent, |
817 | #endif | 855 | #endif |
856 | .ns_type = &net_ns_type_operations, | ||
857 | .namespace = net_namespace, | ||
818 | }; | 858 | }; |
819 | 859 | ||
820 | /* Delete sysfs entries but hold kobject reference until after all | 860 | /* Delete sysfs entries but hold kobject reference until after all |
@@ -826,9 +866,6 @@ void netdev_unregister_kobject(struct net_device * net) | |||
826 | 866 | ||
827 | kobject_get(&dev->kobj); | 867 | kobject_get(&dev->kobj); |
828 | 868 | ||
829 | if (!net_eq(dev_net(net), &init_net)) | ||
830 | return; | ||
831 | |||
832 | #ifdef CONFIG_RPS | 869 | #ifdef CONFIG_RPS |
833 | rx_queue_remove_kobjects(net); | 870 | rx_queue_remove_kobjects(net); |
834 | #endif | 871 | #endif |
@@ -843,6 +880,7 @@ int netdev_register_kobject(struct net_device *net) | |||
843 | const struct attribute_group **groups = net->sysfs_groups; | 880 | const struct attribute_group **groups = net->sysfs_groups; |
844 | int error = 0; | 881 | int error = 0; |
845 | 882 | ||
883 | device_initialize(dev); | ||
846 | dev->class = &net_class; | 884 | dev->class = &net_class; |
847 | dev->platform_data = net; | 885 | dev->platform_data = net; |
848 | dev->groups = groups; | 886 | dev->groups = groups; |
@@ -865,9 +903,6 @@ int netdev_register_kobject(struct net_device *net) | |||
865 | #endif | 903 | #endif |
866 | #endif /* CONFIG_SYSFS */ | 904 | #endif /* CONFIG_SYSFS */ |
867 | 905 | ||
868 | if (!net_eq(dev_net(net), &init_net)) | ||
869 | return 0; | ||
870 | |||
871 | error = device_add(dev); | 906 | error = device_add(dev); |
872 | if (error) | 907 | if (error) |
873 | return error; | 908 | return error; |
@@ -896,13 +931,9 @@ void netdev_class_remove_file(struct class_attribute *class_attr) | |||
896 | EXPORT_SYMBOL(netdev_class_create_file); | 931 | EXPORT_SYMBOL(netdev_class_create_file); |
897 | EXPORT_SYMBOL(netdev_class_remove_file); | 932 | EXPORT_SYMBOL(netdev_class_remove_file); |
898 | 933 | ||
899 | void netdev_initialize_kobject(struct net_device *net) | ||
900 | { | ||
901 | struct device *device = &(net->dev); | ||
902 | device_initialize(device); | ||
903 | } | ||
904 | |||
905 | int netdev_kobject_init(void) | 934 | int netdev_kobject_init(void) |
906 | { | 935 | { |
936 | kobj_ns_type_register(&net_ns_type_operations); | ||
937 | register_pernet_subsys(&kobj_net_ops); | ||
907 | return class_register(&net_class); | 938 | return class_register(&net_class); |
908 | } | 939 | } |
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h index 14e7524260b3..805555e8b187 100644 --- a/net/core/net-sysfs.h +++ b/net/core/net-sysfs.h | |||
@@ -4,5 +4,4 @@ | |||
4 | int netdev_kobject_init(void); | 4 | int netdev_kobject_init(void); |
5 | int netdev_register_kobject(struct net_device *); | 5 | int netdev_register_kobject(struct net_device *); |
6 | void netdev_unregister_kobject(struct net_device *); | 6 | void netdev_unregister_kobject(struct net_device *); |
7 | void netdev_initialize_kobject(struct net_device *); | ||
8 | #endif | 7 | #endif |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2ad68da418df..1dacd7ba8dbb 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2170 | end_time = ktime_now(); | 2170 | end_time = ktime_now(); |
2171 | 2171 | ||
2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | 2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); |
2173 | pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); | 2173 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); |
2174 | } | 2174 | } |
2175 | 2175 | ||
2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 23a71cb21273..1a2af24e9e3d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -644,15 +644,48 @@ static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) | |||
644 | memcpy(v, &a, sizeof(a)); | 644 | memcpy(v, &a, sizeof(a)); |
645 | } | 645 | } |
646 | 646 | ||
647 | /* All VF info */ | ||
647 | static inline int rtnl_vfinfo_size(const struct net_device *dev) | 648 | static inline int rtnl_vfinfo_size(const struct net_device *dev) |
648 | { | 649 | { |
649 | if (dev->dev.parent && dev_is_pci(dev->dev.parent)) | 650 | if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { |
650 | return dev_num_vf(dev->dev.parent) * | 651 | |
651 | sizeof(struct ifla_vf_info); | 652 | int num_vfs = dev_num_vf(dev->dev.parent); |
652 | else | 653 | size_t size = nla_total_size(sizeof(struct nlattr)); |
654 | size += nla_total_size(num_vfs * sizeof(struct nlattr)); | ||
655 | size += num_vfs * | ||
656 | (nla_total_size(sizeof(struct ifla_vf_mac)) + | ||
657 | nla_total_size(sizeof(struct ifla_vf_vlan)) + | ||
658 | nla_total_size(sizeof(struct ifla_vf_tx_rate))); | ||
659 | return size; | ||
660 | } else | ||
653 | return 0; | 661 | return 0; |
654 | } | 662 | } |
655 | 663 | ||
664 | static size_t rtnl_port_size(const struct net_device *dev) | ||
665 | { | ||
666 | size_t port_size = nla_total_size(4) /* PORT_VF */ | ||
667 | + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ | ||
668 | + nla_total_size(sizeof(struct ifla_port_vsi)) | ||
669 | /* PORT_VSI_TYPE */ | ||
670 | + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ | ||
671 | + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ | ||
672 | + nla_total_size(1) /* PROT_VDP_REQUEST */ | ||
673 | + nla_total_size(2); /* PORT_VDP_RESPONSE */ | ||
674 | size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); | ||
675 | size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) | ||
676 | + port_size; | ||
677 | size_t port_self_size = nla_total_size(sizeof(struct nlattr)) | ||
678 | + port_size; | ||
679 | |||
680 | if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent) | ||
681 | return 0; | ||
682 | if (dev_num_vf(dev->dev.parent)) | ||
683 | return port_self_size + vf_ports_size + | ||
684 | vf_port_size * dev_num_vf(dev->dev.parent); | ||
685 | else | ||
686 | return port_self_size; | ||
687 | } | ||
688 | |||
656 | static inline size_t if_nlmsg_size(const struct net_device *dev) | 689 | static inline size_t if_nlmsg_size(const struct net_device *dev) |
657 | { | 690 | { |
658 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) | 691 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) |
@@ -672,10 +705,86 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) | |||
672 | + nla_total_size(1) /* IFLA_OPERSTATE */ | 705 | + nla_total_size(1) /* IFLA_OPERSTATE */ |
673 | + nla_total_size(1) /* IFLA_LINKMODE */ | 706 | + nla_total_size(1) /* IFLA_LINKMODE */ |
674 | + nla_total_size(4) /* IFLA_NUM_VF */ | 707 | + nla_total_size(4) /* IFLA_NUM_VF */ |
675 | + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ | 708 | + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ |
709 | + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ | ||
676 | + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ | 710 | + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ |
677 | } | 711 | } |
678 | 712 | ||
713 | static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) | ||
714 | { | ||
715 | struct nlattr *vf_ports; | ||
716 | struct nlattr *vf_port; | ||
717 | int vf; | ||
718 | int err; | ||
719 | |||
720 | vf_ports = nla_nest_start(skb, IFLA_VF_PORTS); | ||
721 | if (!vf_ports) | ||
722 | return -EMSGSIZE; | ||
723 | |||
724 | for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { | ||
725 | vf_port = nla_nest_start(skb, IFLA_VF_PORT); | ||
726 | if (!vf_port) | ||
727 | goto nla_put_failure; | ||
728 | NLA_PUT_U32(skb, IFLA_PORT_VF, vf); | ||
729 | err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); | ||
730 | if (err == -EMSGSIZE) | ||
731 | goto nla_put_failure; | ||
732 | if (err) { | ||
733 | nla_nest_cancel(skb, vf_port); | ||
734 | continue; | ||
735 | } | ||
736 | nla_nest_end(skb, vf_port); | ||
737 | } | ||
738 | |||
739 | nla_nest_end(skb, vf_ports); | ||
740 | |||
741 | return 0; | ||
742 | |||
743 | nla_put_failure: | ||
744 | nla_nest_cancel(skb, vf_ports); | ||
745 | return -EMSGSIZE; | ||
746 | } | ||
747 | |||
748 | static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) | ||
749 | { | ||
750 | struct nlattr *port_self; | ||
751 | int err; | ||
752 | |||
753 | port_self = nla_nest_start(skb, IFLA_PORT_SELF); | ||
754 | if (!port_self) | ||
755 | return -EMSGSIZE; | ||
756 | |||
757 | err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); | ||
758 | if (err) { | ||
759 | nla_nest_cancel(skb, port_self); | ||
760 | return (err == -EMSGSIZE) ? err : 0; | ||
761 | } | ||
762 | |||
763 | nla_nest_end(skb, port_self); | ||
764 | |||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev) | ||
769 | { | ||
770 | int err; | ||
771 | |||
772 | if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent) | ||
773 | return 0; | ||
774 | |||
775 | err = rtnl_port_self_fill(skb, dev); | ||
776 | if (err) | ||
777 | return err; | ||
778 | |||
779 | if (dev_num_vf(dev->dev.parent)) { | ||
780 | err = rtnl_vf_ports_fill(skb, dev); | ||
781 | if (err) | ||
782 | return err; | ||
783 | } | ||
784 | |||
785 | return 0; | ||
786 | } | ||
787 | |||
679 | static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | 788 | static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, |
680 | int type, u32 pid, u32 seq, u32 change, | 789 | int type, u32 pid, u32 seq, u32 change, |
681 | unsigned int flags) | 790 | unsigned int flags) |
@@ -747,17 +856,46 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
747 | goto nla_put_failure; | 856 | goto nla_put_failure; |
748 | copy_rtnl_link_stats64(nla_data(attr), stats); | 857 | copy_rtnl_link_stats64(nla_data(attr), stats); |
749 | 858 | ||
859 | if (dev->dev.parent) | ||
860 | NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); | ||
861 | |||
750 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { | 862 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { |
751 | int i; | 863 | int i; |
752 | struct ifla_vf_info ivi; | ||
753 | 864 | ||
754 | NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); | 865 | struct nlattr *vfinfo, *vf; |
755 | for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { | 866 | int num_vfs = dev_num_vf(dev->dev.parent); |
867 | |||
868 | vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); | ||
869 | if (!vfinfo) | ||
870 | goto nla_put_failure; | ||
871 | for (i = 0; i < num_vfs; i++) { | ||
872 | struct ifla_vf_info ivi; | ||
873 | struct ifla_vf_mac vf_mac; | ||
874 | struct ifla_vf_vlan vf_vlan; | ||
875 | struct ifla_vf_tx_rate vf_tx_rate; | ||
756 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) | 876 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) |
757 | break; | 877 | break; |
758 | NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); | 878 | vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf; |
879 | memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); | ||
880 | vf_vlan.vlan = ivi.vlan; | ||
881 | vf_vlan.qos = ivi.qos; | ||
882 | vf_tx_rate.rate = ivi.tx_rate; | ||
883 | vf = nla_nest_start(skb, IFLA_VF_INFO); | ||
884 | if (!vf) { | ||
885 | nla_nest_cancel(skb, vfinfo); | ||
886 | goto nla_put_failure; | ||
887 | } | ||
888 | NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); | ||
889 | NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); | ||
890 | NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate); | ||
891 | nla_nest_end(skb, vf); | ||
759 | } | 892 | } |
893 | nla_nest_end(skb, vfinfo); | ||
760 | } | 894 | } |
895 | |||
896 | if (rtnl_port_fill(skb, dev)) | ||
897 | goto nla_put_failure; | ||
898 | |||
761 | if (dev->rtnl_link_ops) { | 899 | if (dev->rtnl_link_ops) { |
762 | if (rtnl_link_fill(skb, dev) < 0) | 900 | if (rtnl_link_fill(skb, dev) < 0) |
763 | goto nla_put_failure; | 901 | goto nla_put_failure; |
@@ -818,6 +956,22 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { | |||
818 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, | 956 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, |
819 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, | 957 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, |
820 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, | 958 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, |
959 | [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, | ||
960 | [IFLA_VF_PORTS] = { .type = NLA_NESTED }, | ||
961 | [IFLA_PORT_SELF] = { .type = NLA_NESTED }, | ||
962 | }; | ||
963 | EXPORT_SYMBOL(ifla_policy); | ||
964 | |||
965 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { | ||
966 | [IFLA_INFO_KIND] = { .type = NLA_STRING }, | ||
967 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, | ||
968 | }; | ||
969 | |||
970 | static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { | ||
971 | [IFLA_VF_INFO] = { .type = NLA_NESTED }, | ||
972 | }; | ||
973 | |||
974 | static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { | ||
821 | [IFLA_VF_MAC] = { .type = NLA_BINARY, | 975 | [IFLA_VF_MAC] = { .type = NLA_BINARY, |
822 | .len = sizeof(struct ifla_vf_mac) }, | 976 | .len = sizeof(struct ifla_vf_mac) }, |
823 | [IFLA_VF_VLAN] = { .type = NLA_BINARY, | 977 | [IFLA_VF_VLAN] = { .type = NLA_BINARY, |
@@ -825,11 +979,19 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { | |||
825 | [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, | 979 | [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, |
826 | .len = sizeof(struct ifla_vf_tx_rate) }, | 980 | .len = sizeof(struct ifla_vf_tx_rate) }, |
827 | }; | 981 | }; |
828 | EXPORT_SYMBOL(ifla_policy); | ||
829 | 982 | ||
830 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { | 983 | static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { |
831 | [IFLA_INFO_KIND] = { .type = NLA_STRING }, | 984 | [IFLA_PORT_VF] = { .type = NLA_U32 }, |
832 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, | 985 | [IFLA_PORT_PROFILE] = { .type = NLA_STRING, |
986 | .len = PORT_PROFILE_MAX }, | ||
987 | [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, | ||
988 | .len = sizeof(struct ifla_port_vsi)}, | ||
989 | [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, | ||
990 | .len = PORT_UUID_MAX }, | ||
991 | [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, | ||
992 | .len = PORT_UUID_MAX }, | ||
993 | [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, | ||
994 | [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, | ||
833 | }; | 995 | }; |
834 | 996 | ||
835 | struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) | 997 | struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) |
@@ -861,6 +1023,52 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) | |||
861 | return 0; | 1023 | return 0; |
862 | } | 1024 | } |
863 | 1025 | ||
1026 | static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) | ||
1027 | { | ||
1028 | int rem, err = -EINVAL; | ||
1029 | struct nlattr *vf; | ||
1030 | const struct net_device_ops *ops = dev->netdev_ops; | ||
1031 | |||
1032 | nla_for_each_nested(vf, attr, rem) { | ||
1033 | switch (nla_type(vf)) { | ||
1034 | case IFLA_VF_MAC: { | ||
1035 | struct ifla_vf_mac *ivm; | ||
1036 | ivm = nla_data(vf); | ||
1037 | err = -EOPNOTSUPP; | ||
1038 | if (ops->ndo_set_vf_mac) | ||
1039 | err = ops->ndo_set_vf_mac(dev, ivm->vf, | ||
1040 | ivm->mac); | ||
1041 | break; | ||
1042 | } | ||
1043 | case IFLA_VF_VLAN: { | ||
1044 | struct ifla_vf_vlan *ivv; | ||
1045 | ivv = nla_data(vf); | ||
1046 | err = -EOPNOTSUPP; | ||
1047 | if (ops->ndo_set_vf_vlan) | ||
1048 | err = ops->ndo_set_vf_vlan(dev, ivv->vf, | ||
1049 | ivv->vlan, | ||
1050 | ivv->qos); | ||
1051 | break; | ||
1052 | } | ||
1053 | case IFLA_VF_TX_RATE: { | ||
1054 | struct ifla_vf_tx_rate *ivt; | ||
1055 | ivt = nla_data(vf); | ||
1056 | err = -EOPNOTSUPP; | ||
1057 | if (ops->ndo_set_vf_tx_rate) | ||
1058 | err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, | ||
1059 | ivt->rate); | ||
1060 | break; | ||
1061 | } | ||
1062 | default: | ||
1063 | err = -EINVAL; | ||
1064 | break; | ||
1065 | } | ||
1066 | if (err) | ||
1067 | break; | ||
1068 | } | ||
1069 | return err; | ||
1070 | } | ||
1071 | |||
864 | static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | 1072 | static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, |
865 | struct nlattr **tb, char *ifname, int modified) | 1073 | struct nlattr **tb, char *ifname, int modified) |
866 | { | 1074 | { |
@@ -991,37 +1199,63 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
991 | write_unlock_bh(&dev_base_lock); | 1199 | write_unlock_bh(&dev_base_lock); |
992 | } | 1200 | } |
993 | 1201 | ||
994 | if (tb[IFLA_VF_MAC]) { | 1202 | if (tb[IFLA_VFINFO_LIST]) { |
995 | struct ifla_vf_mac *ivm; | 1203 | struct nlattr *attr; |
996 | ivm = nla_data(tb[IFLA_VF_MAC]); | 1204 | int rem; |
997 | err = -EOPNOTSUPP; | 1205 | nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { |
998 | if (ops->ndo_set_vf_mac) | 1206 | if (nla_type(attr) != IFLA_VF_INFO) { |
999 | err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); | 1207 | err = -EINVAL; |
1000 | if (err < 0) | 1208 | goto errout; |
1001 | goto errout; | 1209 | } |
1002 | modified = 1; | 1210 | err = do_setvfinfo(dev, attr); |
1211 | if (err < 0) | ||
1212 | goto errout; | ||
1213 | modified = 1; | ||
1214 | } | ||
1003 | } | 1215 | } |
1216 | err = 0; | ||
1217 | |||
1218 | if (tb[IFLA_VF_PORTS]) { | ||
1219 | struct nlattr *port[IFLA_PORT_MAX+1]; | ||
1220 | struct nlattr *attr; | ||
1221 | int vf; | ||
1222 | int rem; | ||
1004 | 1223 | ||
1005 | if (tb[IFLA_VF_VLAN]) { | ||
1006 | struct ifla_vf_vlan *ivv; | ||
1007 | ivv = nla_data(tb[IFLA_VF_VLAN]); | ||
1008 | err = -EOPNOTSUPP; | 1224 | err = -EOPNOTSUPP; |
1009 | if (ops->ndo_set_vf_vlan) | 1225 | if (!ops->ndo_set_vf_port) |
1010 | err = ops->ndo_set_vf_vlan(dev, ivv->vf, | ||
1011 | ivv->vlan, | ||
1012 | ivv->qos); | ||
1013 | if (err < 0) | ||
1014 | goto errout; | 1226 | goto errout; |
1015 | modified = 1; | 1227 | |
1228 | nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { | ||
1229 | if (nla_type(attr) != IFLA_VF_PORT) | ||
1230 | continue; | ||
1231 | err = nla_parse_nested(port, IFLA_PORT_MAX, | ||
1232 | attr, ifla_port_policy); | ||
1233 | if (err < 0) | ||
1234 | goto errout; | ||
1235 | if (!port[IFLA_PORT_VF]) { | ||
1236 | err = -EOPNOTSUPP; | ||
1237 | goto errout; | ||
1238 | } | ||
1239 | vf = nla_get_u32(port[IFLA_PORT_VF]); | ||
1240 | err = ops->ndo_set_vf_port(dev, vf, port); | ||
1241 | if (err < 0) | ||
1242 | goto errout; | ||
1243 | modified = 1; | ||
1244 | } | ||
1016 | } | 1245 | } |
1017 | err = 0; | 1246 | err = 0; |
1018 | 1247 | ||
1019 | if (tb[IFLA_VF_TX_RATE]) { | 1248 | if (tb[IFLA_PORT_SELF]) { |
1020 | struct ifla_vf_tx_rate *ivt; | 1249 | struct nlattr *port[IFLA_PORT_MAX+1]; |
1021 | ivt = nla_data(tb[IFLA_VF_TX_RATE]); | 1250 | |
1251 | err = nla_parse_nested(port, IFLA_PORT_MAX, | ||
1252 | tb[IFLA_PORT_SELF], ifla_port_policy); | ||
1253 | if (err < 0) | ||
1254 | goto errout; | ||
1255 | |||
1022 | err = -EOPNOTSUPP; | 1256 | err = -EOPNOTSUPP; |
1023 | if (ops->ndo_set_vf_tx_rate) | 1257 | if (ops->ndo_set_vf_port) |
1024 | err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); | 1258 | err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); |
1025 | if (err < 0) | 1259 | if (err < 0) |
1026 | goto errout; | 1260 | goto errout; |
1027 | modified = 1; | 1261 | modified = 1; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a9b0e1f77806..9f07e749d7b1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb); | |||
482 | * reference count dropping and cleans up the skbuff as if it | 482 | * reference count dropping and cleans up the skbuff as if it |
483 | * just came from __alloc_skb(). | 483 | * just came from __alloc_skb(). |
484 | */ | 484 | */ |
485 | int skb_recycle_check(struct sk_buff *skb, int skb_size) | 485 | bool skb_recycle_check(struct sk_buff *skb, int skb_size) |
486 | { | 486 | { |
487 | struct skb_shared_info *shinfo; | 487 | struct skb_shared_info *shinfo; |
488 | 488 | ||
489 | if (irqs_disabled()) | 489 | if (irqs_disabled()) |
490 | return 0; | 490 | return false; |
491 | 491 | ||
492 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) | 492 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) |
493 | return 0; | 493 | return false; |
494 | 494 | ||
495 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); | 495 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); |
496 | if (skb_end_pointer(skb) - skb->head < skb_size) | 496 | if (skb_end_pointer(skb) - skb->head < skb_size) |
497 | return 0; | 497 | return false; |
498 | 498 | ||
499 | if (skb_shared(skb) || skb_cloned(skb)) | 499 | if (skb_shared(skb) || skb_cloned(skb)) |
500 | return 0; | 500 | return false; |
501 | 501 | ||
502 | skb_release_head_state(skb); | 502 | skb_release_head_state(skb); |
503 | 503 | ||
@@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
509 | skb->data = skb->head + NET_SKB_PAD; | 509 | skb->data = skb->head + NET_SKB_PAD; |
510 | skb_reset_tail_pointer(skb); | 510 | skb_reset_tail_pointer(skb); |
511 | 511 | ||
512 | return 1; | 512 | return true; |
513 | } | 513 | } |
514 | EXPORT_SYMBOL(skb_recycle_check); | 514 | EXPORT_SYMBOL(skb_recycle_check); |
515 | 515 | ||
@@ -520,7 +520,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
520 | new->transport_header = old->transport_header; | 520 | new->transport_header = old->transport_header; |
521 | new->network_header = old->network_header; | 521 | new->network_header = old->network_header; |
522 | new->mac_header = old->mac_header; | 522 | new->mac_header = old->mac_header; |
523 | skb_dst_set(new, dst_clone(skb_dst(old))); | 523 | skb_dst_copy(new, old); |
524 | new->rxhash = old->rxhash; | 524 | new->rxhash = old->rxhash; |
525 | #ifdef CONFIG_XFRM | 525 | #ifdef CONFIG_XFRM |
526 | new->sp = secpath_get(old->sp); | 526 | new->sp = secpath_get(old->sp); |
@@ -1406,12 +1406,13 @@ new_page: | |||
1406 | /* | 1406 | /* |
1407 | * Fill page/offset/length into spd, if it can hold more pages. | 1407 | * Fill page/offset/length into spd, if it can hold more pages. |
1408 | */ | 1408 | */ |
1409 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 1409 | static inline int spd_fill_page(struct splice_pipe_desc *spd, |
1410 | struct pipe_inode_info *pipe, struct page *page, | ||
1410 | unsigned int *len, unsigned int offset, | 1411 | unsigned int *len, unsigned int offset, |
1411 | struct sk_buff *skb, int linear, | 1412 | struct sk_buff *skb, int linear, |
1412 | struct sock *sk) | 1413 | struct sock *sk) |
1413 | { | 1414 | { |
1414 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) | 1415 | if (unlikely(spd->nr_pages == pipe->buffers)) |
1415 | return 1; | 1416 | return 1; |
1416 | 1417 | ||
1417 | if (linear) { | 1418 | if (linear) { |
@@ -1447,7 +1448,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1447 | unsigned int plen, unsigned int *off, | 1448 | unsigned int plen, unsigned int *off, |
1448 | unsigned int *len, struct sk_buff *skb, | 1449 | unsigned int *len, struct sk_buff *skb, |
1449 | struct splice_pipe_desc *spd, int linear, | 1450 | struct splice_pipe_desc *spd, int linear, |
1450 | struct sock *sk) | 1451 | struct sock *sk, |
1452 | struct pipe_inode_info *pipe) | ||
1451 | { | 1453 | { |
1452 | if (!*len) | 1454 | if (!*len) |
1453 | return 1; | 1455 | return 1; |
@@ -1470,7 +1472,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1470 | /* the linear region may spread across several pages */ | 1472 | /* the linear region may spread across several pages */ |
1471 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); | 1473 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1472 | 1474 | ||
1473 | if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) | 1475 | if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) |
1474 | return 1; | 1476 | return 1; |
1475 | 1477 | ||
1476 | __segment_seek(&page, &poff, &plen, flen); | 1478 | __segment_seek(&page, &poff, &plen, flen); |
@@ -1485,9 +1487,9 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1485 | * Map linear and fragment data from the skb to spd. It reports failure if the | 1487 | * Map linear and fragment data from the skb to spd. It reports failure if the |
1486 | * pipe is full or if we already spliced the requested length. | 1488 | * pipe is full or if we already spliced the requested length. |
1487 | */ | 1489 | */ |
1488 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | 1490 | static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, |
1489 | unsigned int *len, struct splice_pipe_desc *spd, | 1491 | unsigned int *offset, unsigned int *len, |
1490 | struct sock *sk) | 1492 | struct splice_pipe_desc *spd, struct sock *sk) |
1491 | { | 1493 | { |
1492 | int seg; | 1494 | int seg; |
1493 | 1495 | ||
@@ -1497,7 +1499,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1497 | if (__splice_segment(virt_to_page(skb->data), | 1499 | if (__splice_segment(virt_to_page(skb->data), |
1498 | (unsigned long) skb->data & (PAGE_SIZE - 1), | 1500 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
1499 | skb_headlen(skb), | 1501 | skb_headlen(skb), |
1500 | offset, len, skb, spd, 1, sk)) | 1502 | offset, len, skb, spd, 1, sk, pipe)) |
1501 | return 1; | 1503 | return 1; |
1502 | 1504 | ||
1503 | /* | 1505 | /* |
@@ -1507,7 +1509,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1507 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 1509 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
1508 | 1510 | ||
1509 | if (__splice_segment(f->page, f->page_offset, f->size, | 1511 | if (__splice_segment(f->page, f->page_offset, f->size, |
1510 | offset, len, skb, spd, 0, sk)) | 1512 | offset, len, skb, spd, 0, sk, pipe)) |
1511 | return 1; | 1513 | return 1; |
1512 | } | 1514 | } |
1513 | 1515 | ||
@@ -1524,8 +1526,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1524 | struct pipe_inode_info *pipe, unsigned int tlen, | 1526 | struct pipe_inode_info *pipe, unsigned int tlen, |
1525 | unsigned int flags) | 1527 | unsigned int flags) |
1526 | { | 1528 | { |
1527 | struct partial_page partial[PIPE_BUFFERS]; | 1529 | struct partial_page partial[PIPE_DEF_BUFFERS]; |
1528 | struct page *pages[PIPE_BUFFERS]; | 1530 | struct page *pages[PIPE_DEF_BUFFERS]; |
1529 | struct splice_pipe_desc spd = { | 1531 | struct splice_pipe_desc spd = { |
1530 | .pages = pages, | 1532 | .pages = pages, |
1531 | .partial = partial, | 1533 | .partial = partial, |
@@ -1535,12 +1537,16 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1535 | }; | 1537 | }; |
1536 | struct sk_buff *frag_iter; | 1538 | struct sk_buff *frag_iter; |
1537 | struct sock *sk = skb->sk; | 1539 | struct sock *sk = skb->sk; |
1540 | int ret = 0; | ||
1541 | |||
1542 | if (splice_grow_spd(pipe, &spd)) | ||
1543 | return -ENOMEM; | ||
1538 | 1544 | ||
1539 | /* | 1545 | /* |
1540 | * __skb_splice_bits() only fails if the output has no room left, | 1546 | * __skb_splice_bits() only fails if the output has no room left, |
1541 | * so no point in going over the frag_list for the error case. | 1547 | * so no point in going over the frag_list for the error case. |
1542 | */ | 1548 | */ |
1543 | if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) | 1549 | if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) |
1544 | goto done; | 1550 | goto done; |
1545 | else if (!tlen) | 1551 | else if (!tlen) |
1546 | goto done; | 1552 | goto done; |
@@ -1551,14 +1557,12 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1551 | skb_walk_frags(skb, frag_iter) { | 1557 | skb_walk_frags(skb, frag_iter) { |
1552 | if (!tlen) | 1558 | if (!tlen) |
1553 | break; | 1559 | break; |
1554 | if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk)) | 1560 | if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) |
1555 | break; | 1561 | break; |
1556 | } | 1562 | } |
1557 | 1563 | ||
1558 | done: | 1564 | done: |
1559 | if (spd.nr_pages) { | 1565 | if (spd.nr_pages) { |
1560 | int ret; | ||
1561 | |||
1562 | /* | 1566 | /* |
1563 | * Drop the socket lock, otherwise we have reverse | 1567 | * Drop the socket lock, otherwise we have reverse |
1564 | * locking dependencies between sk_lock and i_mutex | 1568 | * locking dependencies between sk_lock and i_mutex |
@@ -1571,10 +1575,10 @@ done: | |||
1571 | release_sock(sk); | 1575 | release_sock(sk); |
1572 | ret = splice_to_pipe(pipe, &spd); | 1576 | ret = splice_to_pipe(pipe, &spd); |
1573 | lock_sock(sk); | 1577 | lock_sock(sk); |
1574 | return ret; | ||
1575 | } | 1578 | } |
1576 | 1579 | ||
1577 | return 0; | 1580 | splice_shrink_spd(pipe, &spd); |
1581 | return ret; | ||
1578 | } | 1582 | } |
1579 | 1583 | ||
1580 | /** | 1584 | /** |
@@ -2718,6 +2722,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2718 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); | 2722 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); |
2719 | skb_shinfo(nskb)->frag_list = p; | 2723 | skb_shinfo(nskb)->frag_list = p; |
2720 | skb_shinfo(nskb)->gso_size = pinfo->gso_size; | 2724 | skb_shinfo(nskb)->gso_size = pinfo->gso_size; |
2725 | pinfo->gso_size = 0; | ||
2721 | skb_header_release(p); | 2726 | skb_header_release(p); |
2722 | nskb->prev = p; | 2727 | nskb->prev = p; |
2723 | 2728 | ||
@@ -2960,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2960 | } | 2965 | } |
2961 | EXPORT_SYMBOL_GPL(skb_cow_data); | 2966 | EXPORT_SYMBOL_GPL(skb_cow_data); |
2962 | 2967 | ||
2968 | static void sock_rmem_free(struct sk_buff *skb) | ||
2969 | { | ||
2970 | struct sock *sk = skb->sk; | ||
2971 | |||
2972 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); | ||
2973 | } | ||
2974 | |||
2975 | /* | ||
2976 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) | ||
2977 | */ | ||
2978 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | ||
2979 | { | ||
2980 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
2981 | (unsigned)sk->sk_rcvbuf) | ||
2982 | return -ENOMEM; | ||
2983 | |||
2984 | skb_orphan(skb); | ||
2985 | skb->sk = sk; | ||
2986 | skb->destructor = sock_rmem_free; | ||
2987 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | ||
2988 | |||
2989 | skb_queue_tail(&sk->sk_error_queue, skb); | ||
2990 | if (!sock_flag(sk, SOCK_DEAD)) | ||
2991 | sk->sk_data_ready(sk, skb->len); | ||
2992 | return 0; | ||
2993 | } | ||
2994 | EXPORT_SYMBOL(sock_queue_err_skb); | ||
2995 | |||
2963 | void skb_tstamp_tx(struct sk_buff *orig_skb, | 2996 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
2964 | struct skb_shared_hwtstamps *hwtstamps) | 2997 | struct skb_shared_hwtstamps *hwtstamps) |
2965 | { | 2998 | { |
@@ -2991,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, | |||
2991 | memset(serr, 0, sizeof(*serr)); | 3024 | memset(serr, 0, sizeof(*serr)); |
2992 | serr->ee.ee_errno = ENOMSG; | 3025 | serr->ee.ee_errno = ENOMSG; |
2993 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; | 3026 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
3027 | |||
2994 | err = sock_queue_err_skb(sk, skb); | 3028 | err = sock_queue_err_skb(sk, skb); |
3029 | |||
2995 | if (err) | 3030 | if (err) |
2996 | kfree_skb(skb); | 3031 | kfree_skb(skb); |
2997 | } | 3032 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 94c4affdda9b..2cf7f9f7e775 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -123,6 +123,7 @@ | |||
123 | #include <linux/net_tstamp.h> | 123 | #include <linux/net_tstamp.h> |
124 | #include <net/xfrm.h> | 124 | #include <net/xfrm.h> |
125 | #include <linux/ipsec.h> | 125 | #include <linux/ipsec.h> |
126 | #include <net/cls_cgroup.h> | ||
126 | 127 | ||
127 | #include <linux/filter.h> | 128 | #include <linux/filter.h> |
128 | 129 | ||
@@ -217,6 +218,11 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; | |||
217 | int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); | 218 | int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); |
218 | EXPORT_SYMBOL(sysctl_optmem_max); | 219 | EXPORT_SYMBOL(sysctl_optmem_max); |
219 | 220 | ||
221 | #if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) | ||
222 | int net_cls_subsys_id = -1; | ||
223 | EXPORT_SYMBOL_GPL(net_cls_subsys_id); | ||
224 | #endif | ||
225 | |||
220 | static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) | 226 | static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) |
221 | { | 227 | { |
222 | struct timeval tv; | 228 | struct timeval tv; |
@@ -307,6 +313,11 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
307 | */ | 313 | */ |
308 | skb_len = skb->len; | 314 | skb_len = skb->len; |
309 | 315 | ||
316 | /* we escape from rcu protected region, make sure we dont leak | ||
317 | * a norefcounted dst | ||
318 | */ | ||
319 | skb_dst_force(skb); | ||
320 | |||
310 | spin_lock_irqsave(&list->lock, flags); | 321 | spin_lock_irqsave(&list->lock, flags); |
311 | skb->dropcount = atomic_read(&sk->sk_drops); | 322 | skb->dropcount = atomic_read(&sk->sk_drops); |
312 | __skb_queue_tail(list, skb); | 323 | __skb_queue_tail(list, skb); |
@@ -1045,6 +1056,17 @@ static void sk_prot_free(struct proto *prot, struct sock *sk) | |||
1045 | module_put(owner); | 1056 | module_put(owner); |
1046 | } | 1057 | } |
1047 | 1058 | ||
1059 | #ifdef CONFIG_CGROUPS | ||
1060 | void sock_update_classid(struct sock *sk) | ||
1061 | { | ||
1062 | u32 classid = task_cls_classid(current); | ||
1063 | |||
1064 | if (classid && classid != sk->sk_classid) | ||
1065 | sk->sk_classid = classid; | ||
1066 | } | ||
1067 | EXPORT_SYMBOL(sock_update_classid); | ||
1068 | #endif | ||
1069 | |||
1048 | /** | 1070 | /** |
1049 | * sk_alloc - All socket objects are allocated here | 1071 | * sk_alloc - All socket objects are allocated here |
1050 | * @net: the applicable net namespace | 1072 | * @net: the applicable net namespace |
@@ -1068,6 +1090,8 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, | |||
1068 | sock_lock_init(sk); | 1090 | sock_lock_init(sk); |
1069 | sock_net_set(sk, get_net(net)); | 1091 | sock_net_set(sk, get_net(net)); |
1070 | atomic_set(&sk->sk_wmem_alloc, 1); | 1092 | atomic_set(&sk->sk_wmem_alloc, 1); |
1093 | |||
1094 | sock_update_classid(sk); | ||
1071 | } | 1095 | } |
1072 | 1096 | ||
1073 | return sk; | 1097 | return sk; |
@@ -1231,6 +1255,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) | |||
1231 | sk->sk_route_caps = dst->dev->features; | 1255 | sk->sk_route_caps = dst->dev->features; |
1232 | if (sk->sk_route_caps & NETIF_F_GSO) | 1256 | if (sk->sk_route_caps & NETIF_F_GSO) |
1233 | sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; | 1257 | sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; |
1258 | sk->sk_route_caps &= ~sk->sk_route_nocaps; | ||
1234 | if (sk_can_gso(sk)) { | 1259 | if (sk_can_gso(sk)) { |
1235 | if (dst->header_len) { | 1260 | if (dst->header_len) { |
1236 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 1261 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
@@ -1535,6 +1560,7 @@ static void __release_sock(struct sock *sk) | |||
1535 | do { | 1560 | do { |
1536 | struct sk_buff *next = skb->next; | 1561 | struct sk_buff *next = skb->next; |
1537 | 1562 | ||
1563 | WARN_ON_ONCE(skb_dst_is_noref(skb)); | ||
1538 | skb->next = NULL; | 1564 | skb->next = NULL; |
1539 | sk_backlog_rcv(sk, skb); | 1565 | sk_backlog_rcv(sk, skb); |
1540 | 1566 | ||
@@ -1981,6 +2007,39 @@ void release_sock(struct sock *sk) | |||
1981 | } | 2007 | } |
1982 | EXPORT_SYMBOL(release_sock); | 2008 | EXPORT_SYMBOL(release_sock); |
1983 | 2009 | ||
2010 | /** | ||
2011 | * lock_sock_fast - fast version of lock_sock | ||
2012 | * @sk: socket | ||
2013 | * | ||
2014 | * This version should be used for very small section, where process wont block | ||
2015 | * return false if fast path is taken | ||
2016 | * sk_lock.slock locked, owned = 0, BH disabled | ||
2017 | * return true if slow path is taken | ||
2018 | * sk_lock.slock unlocked, owned = 1, BH enabled | ||
2019 | */ | ||
2020 | bool lock_sock_fast(struct sock *sk) | ||
2021 | { | ||
2022 | might_sleep(); | ||
2023 | spin_lock_bh(&sk->sk_lock.slock); | ||
2024 | |||
2025 | if (!sk->sk_lock.owned) | ||
2026 | /* | ||
2027 | * Note : We must disable BH | ||
2028 | */ | ||
2029 | return false; | ||
2030 | |||
2031 | __lock_sock(sk); | ||
2032 | sk->sk_lock.owned = 1; | ||
2033 | spin_unlock(&sk->sk_lock.slock); | ||
2034 | /* | ||
2035 | * The sk_lock has mutex_lock() semantics here: | ||
2036 | */ | ||
2037 | mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); | ||
2038 | local_bh_enable(); | ||
2039 | return true; | ||
2040 | } | ||
2041 | EXPORT_SYMBOL(lock_sock_fast); | ||
2042 | |||
1984 | int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) | 2043 | int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) |
1985 | { | 2044 | { |
1986 | struct timeval tv; | 2045 | struct timeval tv; |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index dcc7d25996ab..01eee5d984be 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -122,6 +122,13 @@ static struct ctl_table net_core_table[] = { | |||
122 | .proc_handler = proc_dointvec | 122 | .proc_handler = proc_dointvec |
123 | }, | 123 | }, |
124 | { | 124 | { |
125 | .procname = "netdev_tstamp_prequeue", | ||
126 | .data = &netdev_tstamp_prequeue, | ||
127 | .maxlen = sizeof(int), | ||
128 | .mode = 0644, | ||
129 | .proc_handler = proc_dointvec | ||
130 | }, | ||
131 | { | ||
125 | .procname = "message_cost", | 132 | .procname = "message_cost", |
126 | .data = &net_ratelimit_state.interval, | 133 | .data = &net_ratelimit_state.interval, |
127 | .maxlen = sizeof(int), | 134 | .maxlen = sizeof(int), |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 58f7bc156850..6beb6a7d6fba 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -124,9 +124,9 @@ static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) | |||
124 | return queued; | 124 | return queued; |
125 | } | 125 | } |
126 | 126 | ||
127 | static u8 dccp_reset_code_convert(const u8 code) | 127 | static u16 dccp_reset_code_convert(const u8 code) |
128 | { | 128 | { |
129 | const u8 error_code[] = { | 129 | const u16 error_code[] = { |
130 | [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ | 130 | [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ |
131 | [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ | 131 | [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ |
132 | [DCCP_RESET_CODE_ABORTED] = ECONNRESET, | 132 | [DCCP_RESET_CODE_ABORTED] = ECONNRESET, |
@@ -148,7 +148,7 @@ static u8 dccp_reset_code_convert(const u8 code) | |||
148 | 148 | ||
149 | static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) | 149 | static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) |
150 | { | 150 | { |
151 | u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); | 151 | u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); |
152 | 152 | ||
153 | sk->sk_err = err; | 153 | sk->sk_err = err; |
154 | 154 | ||
diff --git a/net/dccp/options.c b/net/dccp/options.c index 1b08cae9c65b..07395f861d35 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -296,7 +296,7 @@ static inline u8 dccp_ndp_len(const u64 ndp) | |||
296 | { | 296 | { |
297 | if (likely(ndp <= 0xFF)) | 297 | if (likely(ndp <= 0xFF)) |
298 | return 1; | 298 | return 1; |
299 | return likely(ndp <= USHORT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6); | 299 | return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6); |
300 | } | 300 | } |
301 | 301 | ||
302 | int dccp_insert_option(struct sock *sk, struct sk_buff *skb, | 302 | int dccp_insert_option(struct sock *sk, struct sk_buff *skb, |
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 615dbe3b43f9..4c409b46aa35 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -1220,17 +1220,14 @@ void dn_dev_down(struct net_device *dev) | |||
1220 | 1220 | ||
1221 | void dn_dev_init_pkt(struct sk_buff *skb) | 1221 | void dn_dev_init_pkt(struct sk_buff *skb) |
1222 | { | 1222 | { |
1223 | return; | ||
1224 | } | 1223 | } |
1225 | 1224 | ||
1226 | void dn_dev_veri_pkt(struct sk_buff *skb) | 1225 | void dn_dev_veri_pkt(struct sk_buff *skb) |
1227 | { | 1226 | { |
1228 | return; | ||
1229 | } | 1227 | } |
1230 | 1228 | ||
1231 | void dn_dev_hello(struct sk_buff *skb) | 1229 | void dn_dev_hello(struct sk_buff *skb) |
1232 | { | 1230 | { |
1233 | return; | ||
1234 | } | 1231 | } |
1235 | 1232 | ||
1236 | void dn_dev_devices_off(void) | 1233 | void dn_dev_devices_off(void) |
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index deb723dba44b..0363bb95cc7d 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c | |||
@@ -266,7 +266,8 @@ static int dn_long_output(struct sk_buff *skb) | |||
266 | 266 | ||
267 | skb_reset_network_header(skb); | 267 | skb_reset_network_header(skb); |
268 | 268 | ||
269 | return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet); | 269 | return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL, |
270 | neigh->dev, dn_neigh_output_packet); | ||
270 | } | 271 | } |
271 | 272 | ||
272 | static int dn_short_output(struct sk_buff *skb) | 273 | static int dn_short_output(struct sk_buff *skb) |
@@ -305,7 +306,8 @@ static int dn_short_output(struct sk_buff *skb) | |||
305 | 306 | ||
306 | skb_reset_network_header(skb); | 307 | skb_reset_network_header(skb); |
307 | 308 | ||
308 | return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet); | 309 | return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL, |
310 | neigh->dev, dn_neigh_output_packet); | ||
309 | } | 311 | } |
310 | 312 | ||
311 | /* | 313 | /* |
@@ -347,7 +349,8 @@ static int dn_phase3_output(struct sk_buff *skb) | |||
347 | 349 | ||
348 | skb_reset_network_header(skb); | 350 | skb_reset_network_header(skb); |
349 | 351 | ||
350 | return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet); | 352 | return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL, |
353 | neigh->dev, dn_neigh_output_packet); | ||
351 | } | 354 | } |
352 | 355 | ||
353 | /* | 356 | /* |
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index 25a37299bc65..b430549e2b91 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c | |||
@@ -810,7 +810,8 @@ free_out: | |||
810 | 810 | ||
811 | int dn_nsp_rx(struct sk_buff *skb) | 811 | int dn_nsp_rx(struct sk_buff *skb) |
812 | { | 812 | { |
813 | return NF_HOOK(PF_DECnet, NF_DN_LOCAL_IN, skb, skb->dev, NULL, dn_nsp_rx_packet); | 813 | return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, skb, skb->dev, NULL, |
814 | dn_nsp_rx_packet); | ||
814 | } | 815 | } |
815 | 816 | ||
816 | /* | 817 | /* |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 70ebe74027d5..812e6dff6067 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -264,7 +264,6 @@ static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) | |||
264 | 264 | ||
265 | static void dn_dst_link_failure(struct sk_buff *skb) | 265 | static void dn_dst_link_failure(struct sk_buff *skb) |
266 | { | 266 | { |
267 | return; | ||
268 | } | 267 | } |
269 | 268 | ||
270 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | 269 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) |
@@ -518,7 +517,8 @@ static int dn_route_rx_long(struct sk_buff *skb) | |||
518 | ptr++; | 517 | ptr++; |
519 | cb->hops = *ptr++; /* Visit Count */ | 518 | cb->hops = *ptr++; /* Visit Count */ |
520 | 519 | ||
521 | return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); | 520 | return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, |
521 | dn_route_rx_packet); | ||
522 | 522 | ||
523 | drop_it: | 523 | drop_it: |
524 | kfree_skb(skb); | 524 | kfree_skb(skb); |
@@ -544,7 +544,8 @@ static int dn_route_rx_short(struct sk_buff *skb) | |||
544 | ptr += 2; | 544 | ptr += 2; |
545 | cb->hops = *ptr & 0x3f; | 545 | cb->hops = *ptr & 0x3f; |
546 | 546 | ||
547 | return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); | 547 | return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, |
548 | dn_route_rx_packet); | ||
548 | 549 | ||
549 | drop_it: | 550 | drop_it: |
550 | kfree_skb(skb); | 551 | kfree_skb(skb); |
@@ -646,16 +647,24 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type | |||
646 | 647 | ||
647 | switch(flags & DN_RT_CNTL_MSK) { | 648 | switch(flags & DN_RT_CNTL_MSK) { |
648 | case DN_RT_PKT_HELO: | 649 | case DN_RT_PKT_HELO: |
649 | return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello); | 650 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, |
651 | skb, skb->dev, NULL, | ||
652 | dn_route_ptp_hello); | ||
650 | 653 | ||
651 | case DN_RT_PKT_L1RT: | 654 | case DN_RT_PKT_L1RT: |
652 | case DN_RT_PKT_L2RT: | 655 | case DN_RT_PKT_L2RT: |
653 | return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard); | 656 | return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE, |
657 | skb, skb->dev, NULL, | ||
658 | dn_route_discard); | ||
654 | case DN_RT_PKT_ERTH: | 659 | case DN_RT_PKT_ERTH: |
655 | return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello); | 660 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, |
661 | skb, skb->dev, NULL, | ||
662 | dn_neigh_router_hello); | ||
656 | 663 | ||
657 | case DN_RT_PKT_EEDH: | 664 | case DN_RT_PKT_EEDH: |
658 | return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello); | 665 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, |
666 | skb, skb->dev, NULL, | ||
667 | dn_neigh_endnode_hello); | ||
659 | } | 668 | } |
660 | } else { | 669 | } else { |
661 | if (dn->parms.state != DN_DEV_S_RU) | 670 | if (dn->parms.state != DN_DEV_S_RU) |
@@ -704,7 +713,8 @@ static int dn_output(struct sk_buff *skb) | |||
704 | cb->rt_flags |= DN_RT_F_IE; | 713 | cb->rt_flags |= DN_RT_F_IE; |
705 | cb->hops = 0; | 714 | cb->hops = 0; |
706 | 715 | ||
707 | return NF_HOOK(PF_DECnet, NF_DN_LOCAL_OUT, skb, NULL, dev, neigh->output); | 716 | return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev, |
717 | neigh->output); | ||
708 | 718 | ||
709 | error: | 719 | error: |
710 | if (net_ratelimit()) | 720 | if (net_ratelimit()) |
@@ -753,7 +763,8 @@ static int dn_forward(struct sk_buff *skb) | |||
753 | if (rt->rt_flags & RTCF_DOREDIRECT) | 763 | if (rt->rt_flags & RTCF_DOREDIRECT) |
754 | cb->rt_flags |= DN_RT_F_IE; | 764 | cb->rt_flags |= DN_RT_F_IE; |
755 | 765 | ||
756 | return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output); | 766 | return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev, |
767 | neigh->output); | ||
757 | 768 | ||
758 | drop: | 769 | drop: |
759 | kfree_skb(skb); | 770 | kfree_skb(skb); |
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c index 3d803a1b9fb6..1627ef2e8522 100644 --- a/net/ieee802154/wpan-class.c +++ b/net/ieee802154/wpan-class.c | |||
@@ -147,13 +147,15 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size) | |||
147 | struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, | 147 | struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, |
148 | GFP_KERNEL); | 148 | GFP_KERNEL); |
149 | 149 | ||
150 | if (!phy) | ||
151 | goto out; | ||
150 | mutex_lock(&wpan_phy_mutex); | 152 | mutex_lock(&wpan_phy_mutex); |
151 | phy->idx = wpan_phy_idx++; | 153 | phy->idx = wpan_phy_idx++; |
152 | if (unlikely(!wpan_phy_idx_valid(phy->idx))) { | 154 | if (unlikely(!wpan_phy_idx_valid(phy->idx))) { |
153 | wpan_phy_idx--; | 155 | wpan_phy_idx--; |
154 | mutex_unlock(&wpan_phy_mutex); | 156 | mutex_unlock(&wpan_phy_mutex); |
155 | kfree(phy); | 157 | kfree(phy); |
156 | return NULL; | 158 | goto out; |
157 | } | 159 | } |
158 | mutex_unlock(&wpan_phy_mutex); | 160 | mutex_unlock(&wpan_phy_mutex); |
159 | 161 | ||
@@ -168,6 +170,9 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size) | |||
168 | phy->current_page = 0; /* for compatibility */ | 170 | phy->current_page = 0; /* for compatibility */ |
169 | 171 | ||
170 | return phy; | 172 | return phy; |
173 | |||
174 | out: | ||
175 | return NULL; | ||
171 | } | 176 | } |
172 | EXPORT_SYMBOL(wpan_phy_alloc); | 177 | EXPORT_SYMBOL(wpan_phy_alloc); |
173 | 178 | ||
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 8e3a1fd938ab..7c3a7d191249 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -303,7 +303,7 @@ config ARPD | |||
303 | If unsure, say N. | 303 | If unsure, say N. |
304 | 304 | ||
305 | config SYN_COOKIES | 305 | config SYN_COOKIES |
306 | bool "IP: TCP syncookie support (disabled per default)" | 306 | bool "IP: TCP syncookie support" |
307 | ---help--- | 307 | ---help--- |
308 | Normal TCP/IP networking is open to an attack known as "SYN | 308 | Normal TCP/IP networking is open to an attack known as "SYN |
309 | flooding". This denial-of-service attack prevents legitimate remote | 309 | flooding". This denial-of-service attack prevents legitimate remote |
@@ -328,13 +328,13 @@ config SYN_COOKIES | |||
328 | server is really overloaded. If this happens frequently better turn | 328 | server is really overloaded. If this happens frequently better turn |
329 | them off. | 329 | them off. |
330 | 330 | ||
331 | If you say Y here, note that SYN cookies aren't enabled by default; | 331 | If you say Y here, you can disable SYN cookies at run time by |
332 | you can enable them by saying Y to "/proc file system support" and | 332 | saying Y to "/proc file system support" and |
333 | "Sysctl support" below and executing the command | 333 | "Sysctl support" below and executing the command |
334 | 334 | ||
335 | echo 1 >/proc/sys/net/ipv4/tcp_syncookies | 335 | echo 0 > /proc/sys/net/ipv4/tcp_syncookies |
336 | 336 | ||
337 | at boot time after the /proc file system has been mounted. | 337 | after the /proc file system has been mounted. |
338 | 338 | ||
339 | If unsure, say N. | 339 | If unsure, say N. |
340 | 340 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index c6c43bcd1c6f..551ce564b035 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1573,9 +1573,13 @@ static int __init inet_init(void) | |||
1573 | 1573 | ||
1574 | BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb)); | 1574 | BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb)); |
1575 | 1575 | ||
1576 | sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL); | ||
1577 | if (!sysctl_local_reserved_ports) | ||
1578 | goto out; | ||
1579 | |||
1576 | rc = proto_register(&tcp_prot, 1); | 1580 | rc = proto_register(&tcp_prot, 1); |
1577 | if (rc) | 1581 | if (rc) |
1578 | goto out; | 1582 | goto out_free_reserved_ports; |
1579 | 1583 | ||
1580 | rc = proto_register(&udp_prot, 1); | 1584 | rc = proto_register(&udp_prot, 1); |
1581 | if (rc) | 1585 | if (rc) |
@@ -1674,6 +1678,8 @@ out_unregister_udp_proto: | |||
1674 | proto_unregister(&udp_prot); | 1678 | proto_unregister(&udp_prot); |
1675 | out_unregister_tcp_proto: | 1679 | out_unregister_tcp_proto: |
1676 | proto_unregister(&tcp_prot); | 1680 | proto_unregister(&tcp_prot); |
1681 | out_free_reserved_ports: | ||
1682 | kfree(sysctl_local_reserved_ports); | ||
1677 | goto out; | 1683 | goto out; |
1678 | } | 1684 | } |
1679 | 1685 | ||
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 6e747065c202..f094b75810db 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -661,13 +661,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
661 | #endif | 661 | #endif |
662 | #endif | 662 | #endif |
663 | 663 | ||
664 | #ifdef CONFIG_FDDI | 664 | #if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE) |
665 | case ARPHRD_FDDI: | 665 | case ARPHRD_FDDI: |
666 | arp->ar_hrd = htons(ARPHRD_ETHER); | 666 | arp->ar_hrd = htons(ARPHRD_ETHER); |
667 | arp->ar_pro = htons(ETH_P_IP); | 667 | arp->ar_pro = htons(ETH_P_IP); |
668 | break; | 668 | break; |
669 | #endif | 669 | #endif |
670 | #ifdef CONFIG_TR | 670 | #if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) |
671 | case ARPHRD_IEEE802_TR: | 671 | case ARPHRD_IEEE802_TR: |
672 | arp->ar_hrd = htons(ARPHRD_IEEE802); | 672 | arp->ar_hrd = htons(ARPHRD_IEEE802); |
673 | arp->ar_pro = htons(ETH_P_IP); | 673 | arp->ar_pro = htons(ETH_P_IP); |
@@ -854,7 +854,7 @@ static int arp_process(struct sk_buff *skb) | |||
854 | } | 854 | } |
855 | 855 | ||
856 | if (arp->ar_op == htons(ARPOP_REQUEST) && | 856 | if (arp->ar_op == htons(ARPOP_REQUEST) && |
857 | ip_route_input(skb, tip, sip, 0, dev) == 0) { | 857 | ip_route_input_noref(skb, tip, sip, 0, dev) == 0) { |
858 | 858 | ||
859 | rt = skb_rtable(skb); | 859 | rt = skb_rtable(skb); |
860 | addr_type = rt->rt_type; | 860 | addr_type = rt->rt_type; |
@@ -1051,7 +1051,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, | |||
1051 | return -EINVAL; | 1051 | return -EINVAL; |
1052 | } | 1052 | } |
1053 | switch (dev->type) { | 1053 | switch (dev->type) { |
1054 | #ifdef CONFIG_FDDI | 1054 | #if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE) |
1055 | case ARPHRD_FDDI: | 1055 | case ARPHRD_FDDI: |
1056 | /* | 1056 | /* |
1057 | * According to RFC 1390, FDDI devices should accept ARP | 1057 | * According to RFC 1390, FDDI devices should accept ARP |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index c97cd9ff697e..3a92a76ae41d 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -290,8 +290,6 @@ void cipso_v4_cache_invalidate(void) | |||
290 | cipso_v4_cache[iter].size = 0; | 290 | cipso_v4_cache[iter].size = 0; |
291 | spin_unlock_bh(&cipso_v4_cache[iter].lock); | 291 | spin_unlock_bh(&cipso_v4_cache[iter].lock); |
292 | } | 292 | } |
293 | |||
294 | return; | ||
295 | } | 293 | } |
296 | 294 | ||
297 | /** | 295 | /** |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index c98f115fb0fd..79d057a939ba 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1022,8 +1022,6 @@ static void trie_rebalance(struct trie *t, struct tnode *tn) | |||
1022 | 1022 | ||
1023 | rcu_assign_pointer(t->trie, (struct node *)tn); | 1023 | rcu_assign_pointer(t->trie, (struct node *)tn); |
1024 | tnode_free_flush(); | 1024 | tnode_free_flush(); |
1025 | |||
1026 | return; | ||
1027 | } | 1025 | } |
1028 | 1026 | ||
1029 | /* only used from updater-side */ | 1027 | /* only used from updater-side */ |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index f3d339f728b0..d65e9215bcd7 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -587,20 +587,20 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
587 | err = __ip_route_output_key(net, &rt2, &fl); | 587 | err = __ip_route_output_key(net, &rt2, &fl); |
588 | else { | 588 | else { |
589 | struct flowi fl2 = {}; | 589 | struct flowi fl2 = {}; |
590 | struct dst_entry *odst; | 590 | unsigned long orefdst; |
591 | 591 | ||
592 | fl2.fl4_dst = fl.fl4_src; | 592 | fl2.fl4_dst = fl.fl4_src; |
593 | if (ip_route_output_key(net, &rt2, &fl2)) | 593 | if (ip_route_output_key(net, &rt2, &fl2)) |
594 | goto relookup_failed; | 594 | goto relookup_failed; |
595 | 595 | ||
596 | /* Ugh! */ | 596 | /* Ugh! */ |
597 | odst = skb_dst(skb_in); | 597 | orefdst = skb_in->_skb_refdst; /* save old refdst */ |
598 | err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, | 598 | err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, |
599 | RT_TOS(tos), rt2->u.dst.dev); | 599 | RT_TOS(tos), rt2->u.dst.dev); |
600 | 600 | ||
601 | dst_release(&rt2->u.dst); | 601 | dst_release(&rt2->u.dst); |
602 | rt2 = skb_rtable(skb_in); | 602 | rt2 = skb_rtable(skb_in); |
603 | skb_dst_set(skb_in, odst); | 603 | skb_in->_skb_refdst = orefdst; /* restore old refdst */ |
604 | } | 604 | } |
605 | 605 | ||
606 | if (err) | 606 | if (err) |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index e0a3e3537b14..70eb3507c406 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -37,6 +37,9 @@ struct local_ports sysctl_local_ports __read_mostly = { | |||
37 | .range = { 32768, 61000 }, | 37 | .range = { 32768, 61000 }, |
38 | }; | 38 | }; |
39 | 39 | ||
40 | unsigned long *sysctl_local_reserved_ports; | ||
41 | EXPORT_SYMBOL(sysctl_local_reserved_ports); | ||
42 | |||
40 | void inet_get_local_port_range(int *low, int *high) | 43 | void inet_get_local_port_range(int *low, int *high) |
41 | { | 44 | { |
42 | unsigned seq; | 45 | unsigned seq; |
@@ -108,6 +111,8 @@ again: | |||
108 | 111 | ||
109 | smallest_size = -1; | 112 | smallest_size = -1; |
110 | do { | 113 | do { |
114 | if (inet_is_reserved_local_port(rover)) | ||
115 | goto next_nolock; | ||
111 | head = &hashinfo->bhash[inet_bhashfn(net, rover, | 116 | head = &hashinfo->bhash[inet_bhashfn(net, rover, |
112 | hashinfo->bhash_size)]; | 117 | hashinfo->bhash_size)]; |
113 | spin_lock(&head->lock); | 118 | spin_lock(&head->lock); |
@@ -130,6 +135,7 @@ again: | |||
130 | break; | 135 | break; |
131 | next: | 136 | next: |
132 | spin_unlock(&head->lock); | 137 | spin_unlock(&head->lock); |
138 | next_nolock: | ||
133 | if (++rover > high) | 139 | if (++rover > high) |
134 | rover = low; | 140 | rover = low; |
135 | } while (--remaining > 0); | 141 | } while (--remaining > 0); |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 2b79377b468d..d3e160a88219 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -456,6 +456,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
456 | local_bh_disable(); | 456 | local_bh_disable(); |
457 | for (i = 1; i <= remaining; i++) { | 457 | for (i = 1; i <= remaining; i++) { |
458 | port = low + (i + offset) % remaining; | 458 | port = low + (i + offset) % remaining; |
459 | if (inet_is_reserved_local_port(port)) | ||
460 | continue; | ||
459 | head = &hinfo->bhash[inet_bhashfn(net, port, | 461 | head = &hinfo->bhash[inet_bhashfn(net, port, |
460 | hinfo->bhash_size)]; | 462 | hinfo->bhash_size)]; |
461 | spin_lock(&head->lock); | 463 | spin_lock(&head->lock); |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index af10942b326c..56cdf68a074c 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -112,8 +112,8 @@ int ip_forward(struct sk_buff *skb) | |||
112 | 112 | ||
113 | skb->priority = rt_tos2priority(iph->tos); | 113 | skb->priority = rt_tos2priority(iph->tos); |
114 | 114 | ||
115 | return NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, rt->u.dst.dev, | 115 | return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, |
116 | ip_forward_finish); | 116 | rt->u.dst.dev, ip_forward_finish); |
117 | 117 | ||
118 | sr_failed: | 118 | sr_failed: |
119 | /* | 119 | /* |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index fe381d12ecdd..32618e11076d 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -502,7 +502,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info) | |||
502 | t->err_time = jiffies; | 502 | t->err_time = jiffies; |
503 | out: | 503 | out: |
504 | rcu_read_unlock(); | 504 | rcu_read_unlock(); |
505 | return; | ||
506 | } | 505 | } |
507 | 506 | ||
508 | static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) | 507 | static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) |
@@ -538,7 +537,6 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
538 | struct ip_tunnel *tunnel; | 537 | struct ip_tunnel *tunnel; |
539 | int offset = 4; | 538 | int offset = 4; |
540 | __be16 gre_proto; | 539 | __be16 gre_proto; |
541 | unsigned int len; | ||
542 | 540 | ||
543 | if (!pskb_may_pull(skb, 16)) | 541 | if (!pskb_may_pull(skb, 16)) |
544 | goto drop_nolock; | 542 | goto drop_nolock; |
@@ -629,8 +627,6 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
629 | tunnel->i_seqno = seqno + 1; | 627 | tunnel->i_seqno = seqno + 1; |
630 | } | 628 | } |
631 | 629 | ||
632 | len = skb->len; | ||
633 | |||
634 | /* Warning: All skb pointers will be invalidated! */ | 630 | /* Warning: All skb pointers will be invalidated! */ |
635 | if (tunnel->dev->type == ARPHRD_ETHER) { | 631 | if (tunnel->dev->type == ARPHRD_ETHER) { |
636 | if (!pskb_may_pull(skb, ETH_HLEN)) { | 632 | if (!pskb_may_pull(skb, ETH_HLEN)) { |
@@ -644,11 +640,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
644 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | 640 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
645 | } | 641 | } |
646 | 642 | ||
647 | stats->rx_packets++; | 643 | skb_tunnel_rx(skb, tunnel->dev); |
648 | stats->rx_bytes += len; | ||
649 | skb->dev = tunnel->dev; | ||
650 | skb_dst_drop(skb); | ||
651 | nf_reset(skb); | ||
652 | 644 | ||
653 | skb_reset_network_header(skb); | 645 | skb_reset_network_header(skb); |
654 | ipgre_ecn_decapsulate(iph, skb); | 646 | ipgre_ecn_decapsulate(iph, skb); |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index f8ab7a380d4a..d930dc5e4d85 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -266,7 +266,7 @@ int ip_local_deliver(struct sk_buff *skb) | |||
266 | return 0; | 266 | return 0; |
267 | } | 267 | } |
268 | 268 | ||
269 | return NF_HOOK(PF_INET, NF_INET_LOCAL_IN, skb, skb->dev, NULL, | 269 | return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL, |
270 | ip_local_deliver_finish); | 270 | ip_local_deliver_finish); |
271 | } | 271 | } |
272 | 272 | ||
@@ -331,8 +331,8 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
331 | * how the packet travels inside Linux networking. | 331 | * how the packet travels inside Linux networking. |
332 | */ | 332 | */ |
333 | if (skb_dst(skb) == NULL) { | 333 | if (skb_dst(skb) == NULL) { |
334 | int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, | 334 | int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, |
335 | skb->dev); | 335 | iph->tos, skb->dev); |
336 | if (unlikely(err)) { | 336 | if (unlikely(err)) { |
337 | if (err == -EHOSTUNREACH) | 337 | if (err == -EHOSTUNREACH) |
338 | IP_INC_STATS_BH(dev_net(skb->dev), | 338 | IP_INC_STATS_BH(dev_net(skb->dev), |
@@ -444,7 +444,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
444 | /* Must drop socket now because of tproxy. */ | 444 | /* Must drop socket now because of tproxy. */ |
445 | skb_orphan(skb); | 445 | skb_orphan(skb); |
446 | 446 | ||
447 | return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL, | 447 | return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL, |
448 | ip_rcv_finish); | 448 | ip_rcv_finish); |
449 | 449 | ||
450 | inhdr_error: | 450 | inhdr_error: |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 4c09a31fd140..ba9836c488ed 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -238,7 +238,6 @@ void ip_options_fragment(struct sk_buff * skb) | |||
238 | opt->rr_needaddr = 0; | 238 | opt->rr_needaddr = 0; |
239 | opt->ts_needaddr = 0; | 239 | opt->ts_needaddr = 0; |
240 | opt->ts_needtime = 0; | 240 | opt->ts_needtime = 0; |
241 | return; | ||
242 | } | 241 | } |
243 | 242 | ||
244 | /* | 243 | /* |
@@ -601,6 +600,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
601 | unsigned char *optptr = skb_network_header(skb) + opt->srr; | 600 | unsigned char *optptr = skb_network_header(skb) + opt->srr; |
602 | struct rtable *rt = skb_rtable(skb); | 601 | struct rtable *rt = skb_rtable(skb); |
603 | struct rtable *rt2; | 602 | struct rtable *rt2; |
603 | unsigned long orefdst; | ||
604 | int err; | 604 | int err; |
605 | 605 | ||
606 | if (!opt->srr) | 606 | if (!opt->srr) |
@@ -624,16 +624,16 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
624 | } | 624 | } |
625 | memcpy(&nexthop, &optptr[srrptr-1], 4); | 625 | memcpy(&nexthop, &optptr[srrptr-1], 4); |
626 | 626 | ||
627 | rt = skb_rtable(skb); | 627 | orefdst = skb->_skb_refdst; |
628 | skb_dst_set(skb, NULL); | 628 | skb_dst_set(skb, NULL); |
629 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); | 629 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); |
630 | rt2 = skb_rtable(skb); | 630 | rt2 = skb_rtable(skb); |
631 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { | 631 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { |
632 | ip_rt_put(rt2); | 632 | skb_dst_drop(skb); |
633 | skb_dst_set(skb, &rt->u.dst); | 633 | skb->_skb_refdst = orefdst; |
634 | return -EINVAL; | 634 | return -EINVAL; |
635 | } | 635 | } |
636 | ip_rt_put(rt); | 636 | refdst_drop(orefdst); |
637 | if (rt2->rt_type != RTN_LOCAL) | 637 | if (rt2->rt_type != RTN_LOCAL) |
638 | break; | 638 | break; |
639 | /* Superfast 8) loopback forward */ | 639 | /* Superfast 8) loopback forward */ |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index f0392191740b..9a4a6c96cb0d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -96,8 +96,8 @@ int __ip_local_out(struct sk_buff *skb) | |||
96 | 96 | ||
97 | iph->tot_len = htons(skb->len); | 97 | iph->tot_len = htons(skb->len); |
98 | ip_send_check(iph); | 98 | ip_send_check(iph); |
99 | return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, | 99 | return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, |
100 | dst_output); | 100 | skb_dst(skb)->dev, dst_output); |
101 | } | 101 | } |
102 | 102 | ||
103 | int ip_local_out(struct sk_buff *skb) | 103 | int ip_local_out(struct sk_buff *skb) |
@@ -272,8 +272,8 @@ int ip_mc_output(struct sk_buff *skb) | |||
272 | ) { | 272 | ) { |
273 | struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); | 273 | struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); |
274 | if (newskb) | 274 | if (newskb) |
275 | NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb, | 275 | NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, |
276 | NULL, newskb->dev, | 276 | newskb, NULL, newskb->dev, |
277 | ip_dev_loopback_xmit); | 277 | ip_dev_loopback_xmit); |
278 | } | 278 | } |
279 | 279 | ||
@@ -288,12 +288,12 @@ int ip_mc_output(struct sk_buff *skb) | |||
288 | if (rt->rt_flags&RTCF_BROADCAST) { | 288 | if (rt->rt_flags&RTCF_BROADCAST) { |
289 | struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); | 289 | struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); |
290 | if (newskb) | 290 | if (newskb) |
291 | NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb, NULL, | 291 | NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb, |
292 | newskb->dev, ip_dev_loopback_xmit); | 292 | NULL, newskb->dev, ip_dev_loopback_xmit); |
293 | } | 293 | } |
294 | 294 | ||
295 | return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, skb->dev, | 295 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, |
296 | ip_finish_output, | 296 | skb->dev, ip_finish_output, |
297 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 297 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
298 | } | 298 | } |
299 | 299 | ||
@@ -306,7 +306,7 @@ int ip_output(struct sk_buff *skb) | |||
306 | skb->dev = dev; | 306 | skb->dev = dev; |
307 | skb->protocol = htons(ETH_P_IP); | 307 | skb->protocol = htons(ETH_P_IP); |
308 | 308 | ||
309 | return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, dev, | 309 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev, |
310 | ip_finish_output, | 310 | ip_finish_output, |
311 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 311 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
312 | } | 312 | } |
@@ -318,10 +318,12 @@ int ip_queue_xmit(struct sk_buff *skb) | |||
318 | struct ip_options *opt = inet->opt; | 318 | struct ip_options *opt = inet->opt; |
319 | struct rtable *rt; | 319 | struct rtable *rt; |
320 | struct iphdr *iph; | 320 | struct iphdr *iph; |
321 | int res; | ||
321 | 322 | ||
322 | /* Skip all of this if the packet is already routed, | 323 | /* Skip all of this if the packet is already routed, |
323 | * f.e. by something like SCTP. | 324 | * f.e. by something like SCTP. |
324 | */ | 325 | */ |
326 | rcu_read_lock(); | ||
325 | rt = skb_rtable(skb); | 327 | rt = skb_rtable(skb); |
326 | if (rt != NULL) | 328 | if (rt != NULL) |
327 | goto packet_routed; | 329 | goto packet_routed; |
@@ -359,7 +361,7 @@ int ip_queue_xmit(struct sk_buff *skb) | |||
359 | } | 361 | } |
360 | sk_setup_caps(sk, &rt->u.dst); | 362 | sk_setup_caps(sk, &rt->u.dst); |
361 | } | 363 | } |
362 | skb_dst_set(skb, dst_clone(&rt->u.dst)); | 364 | skb_dst_set_noref(skb, &rt->u.dst); |
363 | 365 | ||
364 | packet_routed: | 366 | packet_routed: |
365 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | 367 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) |
@@ -391,9 +393,12 @@ packet_routed: | |||
391 | skb->priority = sk->sk_priority; | 393 | skb->priority = sk->sk_priority; |
392 | skb->mark = sk->sk_mark; | 394 | skb->mark = sk->sk_mark; |
393 | 395 | ||
394 | return ip_local_out(skb); | 396 | res = ip_local_out(skb); |
397 | rcu_read_unlock(); | ||
398 | return res; | ||
395 | 399 | ||
396 | no_route: | 400 | no_route: |
401 | rcu_read_unlock(); | ||
397 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); | 402 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
398 | kfree_skb(skb); | 403 | kfree_skb(skb); |
399 | return -EHOSTUNREACH; | 404 | return -EHOSTUNREACH; |
@@ -469,6 +474,10 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
469 | 474 | ||
470 | hlen = iph->ihl * 4; | 475 | hlen = iph->ihl * 4; |
471 | mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */ | 476 | mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */ |
477 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
478 | if (skb->nf_bridge) | ||
479 | mtu -= nf_bridge_mtu_reduction(skb); | ||
480 | #endif | ||
472 | IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; | 481 | IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; |
473 | 482 | ||
474 | /* When frag_list is given, use it. First, check its validity: | 483 | /* When frag_list is given, use it. First, check its validity: |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 0b27b14dcc9d..7fd636711037 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -374,11 +374,8 @@ static int ipip_rcv(struct sk_buff *skb) | |||
374 | skb->protocol = htons(ETH_P_IP); | 374 | skb->protocol = htons(ETH_P_IP); |
375 | skb->pkt_type = PACKET_HOST; | 375 | skb->pkt_type = PACKET_HOST; |
376 | 376 | ||
377 | tunnel->dev->stats.rx_packets++; | 377 | skb_tunnel_rx(skb, tunnel->dev); |
378 | tunnel->dev->stats.rx_bytes += skb->len; | 378 | |
379 | skb->dev = tunnel->dev; | ||
380 | skb_dst_drop(skb); | ||
381 | nf_reset(skb); | ||
382 | ipip_ecn_decapsulate(iph, skb); | 379 | ipip_ecn_decapsulate(iph, skb); |
383 | netif_rx(skb); | 380 | netif_rx(skb); |
384 | rcu_read_unlock(); | 381 | rcu_read_unlock(); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index eddfd12f55b8..757f25eb9b4b 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -22,7 +22,7 @@ | |||
22 | * overflow. | 22 | * overflow. |
23 | * Carlos Picoto : PIMv1 Support | 23 | * Carlos Picoto : PIMv1 Support |
24 | * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header | 24 | * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header |
25 | * Relax this requrement to work with older peers. | 25 | * Relax this requirement to work with older peers. |
26 | * | 26 | * |
27 | */ | 27 | */ |
28 | 28 | ||
@@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net) | |||
267 | { | 267 | { |
268 | struct mr_table *mrt, *next; | 268 | struct mr_table *mrt, *next; |
269 | 269 | ||
270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) | 270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { |
271 | list_del(&mrt->list); | ||
271 | kfree(mrt); | 272 | kfree(mrt); |
273 | } | ||
272 | fib_rules_unregister(net->ipv4.mr_rules_ops); | 274 | fib_rules_unregister(net->ipv4.mr_rules_ops); |
273 | } | 275 | } |
274 | #else | 276 | #else |
@@ -998,7 +1000,8 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) | |||
998 | atomic_inc(&mrt->cache_resolve_queue_len); | 1000 | atomic_inc(&mrt->cache_resolve_queue_len); |
999 | list_add(&c->list, &mrt->mfc_unres_queue); | 1001 | list_add(&c->list, &mrt->mfc_unres_queue); |
1000 | 1002 | ||
1001 | mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); | 1003 | if (atomic_read(&mrt->cache_resolve_queue_len) == 1) |
1004 | mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); | ||
1002 | } | 1005 | } |
1003 | 1006 | ||
1004 | /* | 1007 | /* |
@@ -1599,13 +1602,12 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, | |||
1599 | * not mrouter) cannot join to more than one interface - it will | 1602 | * not mrouter) cannot join to more than one interface - it will |
1600 | * result in receiving multiple packets. | 1603 | * result in receiving multiple packets. |
1601 | */ | 1604 | */ |
1602 | NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev, | 1605 | NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev, |
1603 | ipmr_forward_finish); | 1606 | ipmr_forward_finish); |
1604 | return; | 1607 | return; |
1605 | 1608 | ||
1606 | out_free: | 1609 | out_free: |
1607 | kfree_skb(skb); | 1610 | kfree_skb(skb); |
1608 | return; | ||
1609 | } | 1611 | } |
1610 | 1612 | ||
1611 | static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) | 1613 | static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) |
@@ -1830,14 +1832,12 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, | |||
1830 | skb->mac_header = skb->network_header; | 1832 | skb->mac_header = skb->network_header; |
1831 | skb_pull(skb, (u8*)encap - skb->data); | 1833 | skb_pull(skb, (u8*)encap - skb->data); |
1832 | skb_reset_network_header(skb); | 1834 | skb_reset_network_header(skb); |
1833 | skb->dev = reg_dev; | ||
1834 | skb->protocol = htons(ETH_P_IP); | 1835 | skb->protocol = htons(ETH_P_IP); |
1835 | skb->ip_summed = 0; | 1836 | skb->ip_summed = 0; |
1836 | skb->pkt_type = PACKET_HOST; | 1837 | skb->pkt_type = PACKET_HOST; |
1837 | skb_dst_drop(skb); | 1838 | |
1838 | reg_dev->stats.rx_bytes += skb->len; | 1839 | skb_tunnel_rx(skb, reg_dev); |
1839 | reg_dev->stats.rx_packets++; | 1840 | |
1840 | nf_reset(skb); | ||
1841 | netif_rx(skb); | 1841 | netif_rx(skb); |
1842 | dev_put(reg_dev); | 1842 | dev_put(reg_dev); |
1843 | 1843 | ||
@@ -1913,7 +1913,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | |||
1913 | struct rtattr *mp_head; | 1913 | struct rtattr *mp_head; |
1914 | 1914 | ||
1915 | /* If cache is unresolved, don't try to parse IIF and OIF */ | 1915 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
1916 | if (c->mfc_parent > MAXVIFS) | 1916 | if (c->mfc_parent >= MAXVIFS) |
1917 | return -ENOENT; | 1917 | return -ENOENT; |
1918 | 1918 | ||
1919 | if (VIF_EXISTS(mrt, c->mfc_parent)) | 1919 | if (VIF_EXISTS(mrt, c->mfc_parent)) |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 82fb43c5c59e..07de855e2175 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -17,7 +17,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
17 | const struct iphdr *iph = ip_hdr(skb); | 17 | const struct iphdr *iph = ip_hdr(skb); |
18 | struct rtable *rt; | 18 | struct rtable *rt; |
19 | struct flowi fl = {}; | 19 | struct flowi fl = {}; |
20 | struct dst_entry *odst; | 20 | unsigned long orefdst; |
21 | unsigned int hh_len; | 21 | unsigned int hh_len; |
22 | unsigned int type; | 22 | unsigned int type; |
23 | 23 | ||
@@ -51,14 +51,14 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
51 | if (ip_route_output_key(net, &rt, &fl) != 0) | 51 | if (ip_route_output_key(net, &rt, &fl) != 0) |
52 | return -1; | 52 | return -1; |
53 | 53 | ||
54 | odst = skb_dst(skb); | 54 | orefdst = skb->_skb_refdst; |
55 | if (ip_route_input(skb, iph->daddr, iph->saddr, | 55 | if (ip_route_input(skb, iph->daddr, iph->saddr, |
56 | RT_TOS(iph->tos), rt->u.dst.dev) != 0) { | 56 | RT_TOS(iph->tos), rt->u.dst.dev) != 0) { |
57 | dst_release(&rt->u.dst); | 57 | dst_release(&rt->u.dst); |
58 | return -1; | 58 | return -1; |
59 | } | 59 | } |
60 | dst_release(&rt->u.dst); | 60 | dst_release(&rt->u.dst); |
61 | dst_release(odst); | 61 | refdst_drop(orefdst); |
62 | } | 62 | } |
63 | 63 | ||
64 | if (skb_dst(skb)->error) | 64 | if (skb_dst(skb)->error) |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index f07d77f65751..1ac01b128621 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -49,12 +49,7 @@ MODULE_DESCRIPTION("arptables core"); | |||
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #ifdef CONFIG_NETFILTER_DEBUG | 51 | #ifdef CONFIG_NETFILTER_DEBUG |
52 | #define ARP_NF_ASSERT(x) \ | 52 | #define ARP_NF_ASSERT(x) WARN_ON(!(x)) |
53 | do { \ | ||
54 | if (!(x)) \ | ||
55 | printk("ARP_NF_ASSERT: %s:%s:%u\n", \ | ||
56 | __func__, __FILE__, __LINE__); \ | ||
57 | } while(0) | ||
58 | #else | 53 | #else |
59 | #define ARP_NF_ASSERT(x) | 54 | #define ARP_NF_ASSERT(x) |
60 | #endif | 55 | #endif |
@@ -224,10 +219,10 @@ static inline int arp_checkentry(const struct arpt_arp *arp) | |||
224 | } | 219 | } |
225 | 220 | ||
226 | static unsigned int | 221 | static unsigned int |
227 | arpt_error(struct sk_buff *skb, const struct xt_target_param *par) | 222 | arpt_error(struct sk_buff *skb, const struct xt_action_param *par) |
228 | { | 223 | { |
229 | if (net_ratelimit()) | 224 | if (net_ratelimit()) |
230 | printk("arp_tables: error: '%s'\n", | 225 | pr_err("arp_tables: error: '%s'\n", |
231 | (const char *)par->targinfo); | 226 | (const char *)par->targinfo); |
232 | 227 | ||
233 | return NF_DROP; | 228 | return NF_DROP; |
@@ -260,12 +255,11 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
260 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 255 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
261 | unsigned int verdict = NF_DROP; | 256 | unsigned int verdict = NF_DROP; |
262 | const struct arphdr *arp; | 257 | const struct arphdr *arp; |
263 | bool hotdrop = false; | ||
264 | struct arpt_entry *e, *back; | 258 | struct arpt_entry *e, *back; |
265 | const char *indev, *outdev; | 259 | const char *indev, *outdev; |
266 | void *table_base; | 260 | void *table_base; |
267 | const struct xt_table_info *private; | 261 | const struct xt_table_info *private; |
268 | struct xt_target_param tgpar; | 262 | struct xt_action_param acpar; |
269 | 263 | ||
270 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) | 264 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) |
271 | return NF_DROP; | 265 | return NF_DROP; |
@@ -280,10 +274,11 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
280 | e = get_entry(table_base, private->hook_entry[hook]); | 274 | e = get_entry(table_base, private->hook_entry[hook]); |
281 | back = get_entry(table_base, private->underflow[hook]); | 275 | back = get_entry(table_base, private->underflow[hook]); |
282 | 276 | ||
283 | tgpar.in = in; | 277 | acpar.in = in; |
284 | tgpar.out = out; | 278 | acpar.out = out; |
285 | tgpar.hooknum = hook; | 279 | acpar.hooknum = hook; |
286 | tgpar.family = NFPROTO_ARP; | 280 | acpar.family = NFPROTO_ARP; |
281 | acpar.hotdrop = false; | ||
287 | 282 | ||
288 | arp = arp_hdr(skb); | 283 | arp = arp_hdr(skb); |
289 | do { | 284 | do { |
@@ -333,9 +328,9 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
333 | /* Targets which reenter must return | 328 | /* Targets which reenter must return |
334 | * abs. verdicts | 329 | * abs. verdicts |
335 | */ | 330 | */ |
336 | tgpar.target = t->u.kernel.target; | 331 | acpar.target = t->u.kernel.target; |
337 | tgpar.targinfo = t->data; | 332 | acpar.targinfo = t->data; |
338 | verdict = t->u.kernel.target->target(skb, &tgpar); | 333 | verdict = t->u.kernel.target->target(skb, &acpar); |
339 | 334 | ||
340 | /* Target might have changed stuff. */ | 335 | /* Target might have changed stuff. */ |
341 | arp = arp_hdr(skb); | 336 | arp = arp_hdr(skb); |
@@ -345,10 +340,10 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
345 | else | 340 | else |
346 | /* Verdict */ | 341 | /* Verdict */ |
347 | break; | 342 | break; |
348 | } while (!hotdrop); | 343 | } while (!acpar.hotdrop); |
349 | xt_info_rdunlock_bh(); | 344 | xt_info_rdunlock_bh(); |
350 | 345 | ||
351 | if (hotdrop) | 346 | if (acpar.hotdrop) |
352 | return NF_DROP; | 347 | return NF_DROP; |
353 | else | 348 | else |
354 | return verdict; | 349 | return verdict; |
@@ -390,7 +385,7 @@ static int mark_source_chains(const struct xt_table_info *newinfo, | |||
390 | int visited = e->comefrom & (1 << hook); | 385 | int visited = e->comefrom & (1 << hook); |
391 | 386 | ||
392 | if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { | 387 | if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { |
393 | printk("arptables: loop hook %u pos %u %08X.\n", | 388 | pr_notice("arptables: loop hook %u pos %u %08X.\n", |
394 | hook, pos, e->comefrom); | 389 | hook, pos, e->comefrom); |
395 | return 0; | 390 | return 0; |
396 | } | 391 | } |
@@ -523,13 +518,11 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) | |||
523 | return ret; | 518 | return ret; |
524 | 519 | ||
525 | t = arpt_get_target(e); | 520 | t = arpt_get_target(e); |
526 | target = try_then_request_module(xt_find_target(NFPROTO_ARP, | 521 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, |
527 | t->u.user.name, | 522 | t->u.user.revision); |
528 | t->u.user.revision), | 523 | if (IS_ERR(target)) { |
529 | "arpt_%s", t->u.user.name); | ||
530 | if (IS_ERR(target) || !target) { | ||
531 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); | 524 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); |
532 | ret = target ? PTR_ERR(target) : -ENOENT; | 525 | ret = PTR_ERR(target); |
533 | goto out; | 526 | goto out; |
534 | } | 527 | } |
535 | t->u.kernel.target = target; | 528 | t->u.kernel.target = target; |
@@ -651,6 +644,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, | |||
651 | if (ret != 0) | 644 | if (ret != 0) |
652 | break; | 645 | break; |
653 | ++i; | 646 | ++i; |
647 | if (strcmp(arpt_get_target(iter)->u.user.name, | ||
648 | XT_ERROR_TARGET) == 0) | ||
649 | ++newinfo->stacksize; | ||
654 | } | 650 | } |
655 | duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); | 651 | duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); |
656 | if (ret != 0) | 652 | if (ret != 0) |
@@ -1252,14 +1248,12 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, | |||
1252 | entry_offset = (void *)e - (void *)base; | 1248 | entry_offset = (void *)e - (void *)base; |
1253 | 1249 | ||
1254 | t = compat_arpt_get_target(e); | 1250 | t = compat_arpt_get_target(e); |
1255 | target = try_then_request_module(xt_find_target(NFPROTO_ARP, | 1251 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, |
1256 | t->u.user.name, | 1252 | t->u.user.revision); |
1257 | t->u.user.revision), | 1253 | if (IS_ERR(target)) { |
1258 | "arpt_%s", t->u.user.name); | ||
1259 | if (IS_ERR(target) || !target) { | ||
1260 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", | 1254 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", |
1261 | t->u.user.name); | 1255 | t->u.user.name); |
1262 | ret = target ? PTR_ERR(target) : -ENOENT; | 1256 | ret = PTR_ERR(target); |
1263 | goto out; | 1257 | goto out; |
1264 | } | 1258 | } |
1265 | t->u.kernel.target = target; | 1259 | t->u.kernel.target = target; |
@@ -1778,8 +1772,7 @@ struct xt_table *arpt_register_table(struct net *net, | |||
1778 | { | 1772 | { |
1779 | int ret; | 1773 | int ret; |
1780 | struct xt_table_info *newinfo; | 1774 | struct xt_table_info *newinfo; |
1781 | struct xt_table_info bootstrap | 1775 | struct xt_table_info bootstrap = {0}; |
1782 | = { 0, 0, 0, { 0 }, { 0 }, { } }; | ||
1783 | void *loc_cpu_entry; | 1776 | void *loc_cpu_entry; |
1784 | struct xt_table *new_table; | 1777 | struct xt_table *new_table; |
1785 | 1778 | ||
@@ -1830,22 +1823,23 @@ void arpt_unregister_table(struct xt_table *table) | |||
1830 | } | 1823 | } |
1831 | 1824 | ||
1832 | /* The built-in targets: standard (NULL) and error. */ | 1825 | /* The built-in targets: standard (NULL) and error. */ |
1833 | static struct xt_target arpt_standard_target __read_mostly = { | 1826 | static struct xt_target arpt_builtin_tg[] __read_mostly = { |
1834 | .name = ARPT_STANDARD_TARGET, | 1827 | { |
1835 | .targetsize = sizeof(int), | 1828 | .name = ARPT_STANDARD_TARGET, |
1836 | .family = NFPROTO_ARP, | 1829 | .targetsize = sizeof(int), |
1830 | .family = NFPROTO_ARP, | ||
1837 | #ifdef CONFIG_COMPAT | 1831 | #ifdef CONFIG_COMPAT |
1838 | .compatsize = sizeof(compat_int_t), | 1832 | .compatsize = sizeof(compat_int_t), |
1839 | .compat_from_user = compat_standard_from_user, | 1833 | .compat_from_user = compat_standard_from_user, |
1840 | .compat_to_user = compat_standard_to_user, | 1834 | .compat_to_user = compat_standard_to_user, |
1841 | #endif | 1835 | #endif |
1842 | }; | 1836 | }, |
1843 | 1837 | { | |
1844 | static struct xt_target arpt_error_target __read_mostly = { | 1838 | .name = ARPT_ERROR_TARGET, |
1845 | .name = ARPT_ERROR_TARGET, | 1839 | .target = arpt_error, |
1846 | .target = arpt_error, | 1840 | .targetsize = ARPT_FUNCTION_MAXNAMELEN, |
1847 | .targetsize = ARPT_FUNCTION_MAXNAMELEN, | 1841 | .family = NFPROTO_ARP, |
1848 | .family = NFPROTO_ARP, | 1842 | }, |
1849 | }; | 1843 | }; |
1850 | 1844 | ||
1851 | static struct nf_sockopt_ops arpt_sockopts = { | 1845 | static struct nf_sockopt_ops arpt_sockopts = { |
@@ -1889,12 +1883,9 @@ static int __init arp_tables_init(void) | |||
1889 | goto err1; | 1883 | goto err1; |
1890 | 1884 | ||
1891 | /* Noone else will be downing sem now, so we won't sleep */ | 1885 | /* Noone else will be downing sem now, so we won't sleep */ |
1892 | ret = xt_register_target(&arpt_standard_target); | 1886 | ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); |
1893 | if (ret < 0) | 1887 | if (ret < 0) |
1894 | goto err2; | 1888 | goto err2; |
1895 | ret = xt_register_target(&arpt_error_target); | ||
1896 | if (ret < 0) | ||
1897 | goto err3; | ||
1898 | 1889 | ||
1899 | /* Register setsockopt */ | 1890 | /* Register setsockopt */ |
1900 | ret = nf_register_sockopt(&arpt_sockopts); | 1891 | ret = nf_register_sockopt(&arpt_sockopts); |
@@ -1905,9 +1896,7 @@ static int __init arp_tables_init(void) | |||
1905 | return 0; | 1896 | return 0; |
1906 | 1897 | ||
1907 | err4: | 1898 | err4: |
1908 | xt_unregister_target(&arpt_error_target); | 1899 | xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); |
1909 | err3: | ||
1910 | xt_unregister_target(&arpt_standard_target); | ||
1911 | err2: | 1900 | err2: |
1912 | unregister_pernet_subsys(&arp_tables_net_ops); | 1901 | unregister_pernet_subsys(&arp_tables_net_ops); |
1913 | err1: | 1902 | err1: |
@@ -1917,8 +1906,7 @@ err1: | |||
1917 | static void __exit arp_tables_fini(void) | 1906 | static void __exit arp_tables_fini(void) |
1918 | { | 1907 | { |
1919 | nf_unregister_sockopt(&arpt_sockopts); | 1908 | nf_unregister_sockopt(&arpt_sockopts); |
1920 | xt_unregister_target(&arpt_error_target); | 1909 | xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); |
1921 | xt_unregister_target(&arpt_standard_target); | ||
1922 | unregister_pernet_subsys(&arp_tables_net_ops); | 1910 | unregister_pernet_subsys(&arp_tables_net_ops); |
1923 | } | 1911 | } |
1924 | 1912 | ||
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index b0d5b1d0a769..e1be7dd1171b 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c | |||
@@ -9,7 +9,7 @@ MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); | |||
9 | MODULE_DESCRIPTION("arptables arp payload mangle target"); | 9 | MODULE_DESCRIPTION("arptables arp payload mangle target"); |
10 | 10 | ||
11 | static unsigned int | 11 | static unsigned int |
12 | target(struct sk_buff *skb, const struct xt_target_param *par) | 12 | target(struct sk_buff *skb, const struct xt_action_param *par) |
13 | { | 13 | { |
14 | const struct arpt_mangle *mangle = par->targinfo; | 14 | const struct arpt_mangle *mangle = par->targinfo; |
15 | const struct arphdr *arp; | 15 | const struct arphdr *arp; |
@@ -54,7 +54,7 @@ target(struct sk_buff *skb, const struct xt_target_param *par) | |||
54 | return mangle->target; | 54 | return mangle->target; |
55 | } | 55 | } |
56 | 56 | ||
57 | static bool checkentry(const struct xt_tgchk_param *par) | 57 | static int checkentry(const struct xt_tgchk_param *par) |
58 | { | 58 | { |
59 | const struct arpt_mangle *mangle = par->targinfo; | 59 | const struct arpt_mangle *mangle = par->targinfo; |
60 | 60 | ||
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index e2787048aa0a..a4e5fc5df4bf 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
@@ -161,8 +161,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) | |||
161 | break; | 161 | break; |
162 | 162 | ||
163 | case IPQ_COPY_PACKET: | 163 | case IPQ_COPY_PACKET: |
164 | if ((entry->skb->ip_summed == CHECKSUM_PARTIAL || | 164 | if (entry->skb->ip_summed == CHECKSUM_PARTIAL && |
165 | entry->skb->ip_summed == CHECKSUM_COMPLETE) && | ||
166 | (*errp = skb_checksum_help(entry->skb))) { | 165 | (*errp = skb_checksum_help(entry->skb))) { |
167 | read_unlock_bh(&queue_lock); | 166 | read_unlock_bh(&queue_lock); |
168 | return NULL; | 167 | return NULL; |
@@ -462,7 +461,6 @@ __ipq_rcv_skb(struct sk_buff *skb) | |||
462 | 461 | ||
463 | if (flags & NLM_F_ACK) | 462 | if (flags & NLM_F_ACK) |
464 | netlink_ack(skb, nlh, 0); | 463 | netlink_ack(skb, nlh, 0); |
465 | return; | ||
466 | } | 464 | } |
467 | 465 | ||
468 | static void | 466 | static void |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index b29c66df8d1f..4b6c5ca610fc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -39,24 +39,19 @@ MODULE_DESCRIPTION("IPv4 packet filter"); | |||
39 | /*#define DEBUG_IP_FIREWALL_USER*/ | 39 | /*#define DEBUG_IP_FIREWALL_USER*/ |
40 | 40 | ||
41 | #ifdef DEBUG_IP_FIREWALL | 41 | #ifdef DEBUG_IP_FIREWALL |
42 | #define dprintf(format, args...) printk(format , ## args) | 42 | #define dprintf(format, args...) pr_info(format , ## args) |
43 | #else | 43 | #else |
44 | #define dprintf(format, args...) | 44 | #define dprintf(format, args...) |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #ifdef DEBUG_IP_FIREWALL_USER | 47 | #ifdef DEBUG_IP_FIREWALL_USER |
48 | #define duprintf(format, args...) printk(format , ## args) | 48 | #define duprintf(format, args...) pr_info(format , ## args) |
49 | #else | 49 | #else |
50 | #define duprintf(format, args...) | 50 | #define duprintf(format, args...) |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #ifdef CONFIG_NETFILTER_DEBUG | 53 | #ifdef CONFIG_NETFILTER_DEBUG |
54 | #define IP_NF_ASSERT(x) \ | 54 | #define IP_NF_ASSERT(x) WARN_ON(!(x)) |
55 | do { \ | ||
56 | if (!(x)) \ | ||
57 | printk("IP_NF_ASSERT: %s:%s:%u\n", \ | ||
58 | __func__, __FILE__, __LINE__); \ | ||
59 | } while(0) | ||
60 | #else | 55 | #else |
61 | #define IP_NF_ASSERT(x) | 56 | #define IP_NF_ASSERT(x) |
62 | #endif | 57 | #endif |
@@ -165,30 +160,14 @@ ip_checkentry(const struct ipt_ip *ip) | |||
165 | } | 160 | } |
166 | 161 | ||
167 | static unsigned int | 162 | static unsigned int |
168 | ipt_error(struct sk_buff *skb, const struct xt_target_param *par) | 163 | ipt_error(struct sk_buff *skb, const struct xt_action_param *par) |
169 | { | 164 | { |
170 | if (net_ratelimit()) | 165 | if (net_ratelimit()) |
171 | printk("ip_tables: error: `%s'\n", | 166 | pr_info("error: `%s'\n", (const char *)par->targinfo); |
172 | (const char *)par->targinfo); | ||
173 | 167 | ||
174 | return NF_DROP; | 168 | return NF_DROP; |
175 | } | 169 | } |
176 | 170 | ||
177 | /* Performance critical - called for every packet */ | ||
178 | static inline bool | ||
179 | do_match(const struct ipt_entry_match *m, const struct sk_buff *skb, | ||
180 | struct xt_match_param *par) | ||
181 | { | ||
182 | par->match = m->u.kernel.match; | ||
183 | par->matchinfo = m->data; | ||
184 | |||
185 | /* Stop iteration if it doesn't match */ | ||
186 | if (!m->u.kernel.match->match(skb, par)) | ||
187 | return true; | ||
188 | else | ||
189 | return false; | ||
190 | } | ||
191 | |||
192 | /* Performance critical */ | 171 | /* Performance critical */ |
193 | static inline struct ipt_entry * | 172 | static inline struct ipt_entry * |
194 | get_entry(const void *base, unsigned int offset) | 173 | get_entry(const void *base, unsigned int offset) |
@@ -322,19 +301,16 @@ ipt_do_table(struct sk_buff *skb, | |||
322 | const struct net_device *out, | 301 | const struct net_device *out, |
323 | struct xt_table *table) | 302 | struct xt_table *table) |
324 | { | 303 | { |
325 | #define tb_comefrom ((struct ipt_entry *)table_base)->comefrom | ||
326 | |||
327 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 304 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
328 | const struct iphdr *ip; | 305 | const struct iphdr *ip; |
329 | bool hotdrop = false; | ||
330 | /* Initializing verdict to NF_DROP keeps gcc happy. */ | 306 | /* Initializing verdict to NF_DROP keeps gcc happy. */ |
331 | unsigned int verdict = NF_DROP; | 307 | unsigned int verdict = NF_DROP; |
332 | const char *indev, *outdev; | 308 | const char *indev, *outdev; |
333 | const void *table_base; | 309 | const void *table_base; |
334 | struct ipt_entry *e, *back; | 310 | struct ipt_entry *e, **jumpstack; |
311 | unsigned int *stackptr, origptr, cpu; | ||
335 | const struct xt_table_info *private; | 312 | const struct xt_table_info *private; |
336 | struct xt_match_param mtpar; | 313 | struct xt_action_param acpar; |
337 | struct xt_target_param tgpar; | ||
338 | 314 | ||
339 | /* Initialization */ | 315 | /* Initialization */ |
340 | ip = ip_hdr(skb); | 316 | ip = ip_hdr(skb); |
@@ -346,40 +322,47 @@ ipt_do_table(struct sk_buff *skb, | |||
346 | * things we don't know, ie. tcp syn flag or ports). If the | 322 | * things we don't know, ie. tcp syn flag or ports). If the |
347 | * rule is also a fragment-specific rule, non-fragments won't | 323 | * rule is also a fragment-specific rule, non-fragments won't |
348 | * match it. */ | 324 | * match it. */ |
349 | mtpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; | 325 | acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; |
350 | mtpar.thoff = ip_hdrlen(skb); | 326 | acpar.thoff = ip_hdrlen(skb); |
351 | mtpar.hotdrop = &hotdrop; | 327 | acpar.hotdrop = false; |
352 | mtpar.in = tgpar.in = in; | 328 | acpar.in = in; |
353 | mtpar.out = tgpar.out = out; | 329 | acpar.out = out; |
354 | mtpar.family = tgpar.family = NFPROTO_IPV4; | 330 | acpar.family = NFPROTO_IPV4; |
355 | mtpar.hooknum = tgpar.hooknum = hook; | 331 | acpar.hooknum = hook; |
356 | 332 | ||
357 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 333 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
358 | xt_info_rdlock_bh(); | 334 | xt_info_rdlock_bh(); |
359 | private = table->private; | 335 | private = table->private; |
360 | table_base = private->entries[smp_processor_id()]; | 336 | cpu = smp_processor_id(); |
337 | table_base = private->entries[cpu]; | ||
338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; | ||
339 | stackptr = per_cpu_ptr(private->stackptr, cpu); | ||
340 | origptr = *stackptr; | ||
361 | 341 | ||
362 | e = get_entry(table_base, private->hook_entry[hook]); | 342 | e = get_entry(table_base, private->hook_entry[hook]); |
363 | 343 | ||
364 | /* For return from builtin chain */ | 344 | pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n", |
365 | back = get_entry(table_base, private->underflow[hook]); | 345 | table->name, hook, origptr, |
346 | get_entry(table_base, private->underflow[hook])); | ||
366 | 347 | ||
367 | do { | 348 | do { |
368 | const struct ipt_entry_target *t; | 349 | const struct ipt_entry_target *t; |
369 | const struct xt_entry_match *ematch; | 350 | const struct xt_entry_match *ematch; |
370 | 351 | ||
371 | IP_NF_ASSERT(e); | 352 | IP_NF_ASSERT(e); |
372 | IP_NF_ASSERT(back); | ||
373 | if (!ip_packet_match(ip, indev, outdev, | 353 | if (!ip_packet_match(ip, indev, outdev, |
374 | &e->ip, mtpar.fragoff)) { | 354 | &e->ip, acpar.fragoff)) { |
375 | no_match: | 355 | no_match: |
376 | e = ipt_next_entry(e); | 356 | e = ipt_next_entry(e); |
377 | continue; | 357 | continue; |
378 | } | 358 | } |
379 | 359 | ||
380 | xt_ematch_foreach(ematch, e) | 360 | xt_ematch_foreach(ematch, e) { |
381 | if (do_match(ematch, skb, &mtpar) != 0) | 361 | acpar.match = ematch->u.kernel.match; |
362 | acpar.matchinfo = ematch->data; | ||
363 | if (!acpar.match->match(skb, &acpar)) | ||
382 | goto no_match; | 364 | goto no_match; |
365 | } | ||
383 | 366 | ||
384 | ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); | 367 | ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); |
385 | 368 | ||
@@ -404,41 +387,38 @@ ipt_do_table(struct sk_buff *skb, | |||
404 | verdict = (unsigned)(-v) - 1; | 387 | verdict = (unsigned)(-v) - 1; |
405 | break; | 388 | break; |
406 | } | 389 | } |
407 | e = back; | 390 | if (*stackptr == 0) { |
408 | back = get_entry(table_base, back->comefrom); | 391 | e = get_entry(table_base, |
392 | private->underflow[hook]); | ||
393 | pr_debug("Underflow (this is normal) " | ||
394 | "to %p\n", e); | ||
395 | } else { | ||
396 | e = jumpstack[--*stackptr]; | ||
397 | pr_debug("Pulled %p out from pos %u\n", | ||
398 | e, *stackptr); | ||
399 | e = ipt_next_entry(e); | ||
400 | } | ||
409 | continue; | 401 | continue; |
410 | } | 402 | } |
411 | if (table_base + v != ipt_next_entry(e) && | 403 | if (table_base + v != ipt_next_entry(e) && |
412 | !(e->ip.flags & IPT_F_GOTO)) { | 404 | !(e->ip.flags & IPT_F_GOTO)) { |
413 | /* Save old back ptr in next entry */ | 405 | if (*stackptr >= private->stacksize) { |
414 | struct ipt_entry *next = ipt_next_entry(e); | 406 | verdict = NF_DROP; |
415 | next->comefrom = (void *)back - table_base; | 407 | break; |
416 | /* set back pointer to next entry */ | 408 | } |
417 | back = next; | 409 | jumpstack[(*stackptr)++] = e; |
410 | pr_debug("Pushed %p into pos %u\n", | ||
411 | e, *stackptr - 1); | ||
418 | } | 412 | } |
419 | 413 | ||
420 | e = get_entry(table_base, v); | 414 | e = get_entry(table_base, v); |
421 | continue; | 415 | continue; |
422 | } | 416 | } |
423 | 417 | ||
424 | /* Targets which reenter must return | 418 | acpar.target = t->u.kernel.target; |
425 | abs. verdicts */ | 419 | acpar.targinfo = t->data; |
426 | tgpar.target = t->u.kernel.target; | ||
427 | tgpar.targinfo = t->data; | ||
428 | |||
429 | 420 | ||
430 | #ifdef CONFIG_NETFILTER_DEBUG | 421 | verdict = t->u.kernel.target->target(skb, &acpar); |
431 | tb_comefrom = 0xeeeeeeec; | ||
432 | #endif | ||
433 | verdict = t->u.kernel.target->target(skb, &tgpar); | ||
434 | #ifdef CONFIG_NETFILTER_DEBUG | ||
435 | if (tb_comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) { | ||
436 | printk("Target %s reentered!\n", | ||
437 | t->u.kernel.target->name); | ||
438 | verdict = NF_DROP; | ||
439 | } | ||
440 | tb_comefrom = 0x57acc001; | ||
441 | #endif | ||
442 | /* Target might have changed stuff. */ | 422 | /* Target might have changed stuff. */ |
443 | ip = ip_hdr(skb); | 423 | ip = ip_hdr(skb); |
444 | if (verdict == IPT_CONTINUE) | 424 | if (verdict == IPT_CONTINUE) |
@@ -446,18 +426,18 @@ ipt_do_table(struct sk_buff *skb, | |||
446 | else | 426 | else |
447 | /* Verdict */ | 427 | /* Verdict */ |
448 | break; | 428 | break; |
449 | } while (!hotdrop); | 429 | } while (!acpar.hotdrop); |
450 | xt_info_rdunlock_bh(); | 430 | xt_info_rdunlock_bh(); |
451 | 431 | pr_debug("Exiting %s; resetting sp from %u to %u\n", | |
432 | __func__, *stackptr, origptr); | ||
433 | *stackptr = origptr; | ||
452 | #ifdef DEBUG_ALLOW_ALL | 434 | #ifdef DEBUG_ALLOW_ALL |
453 | return NF_ACCEPT; | 435 | return NF_ACCEPT; |
454 | #else | 436 | #else |
455 | if (hotdrop) | 437 | if (acpar.hotdrop) |
456 | return NF_DROP; | 438 | return NF_DROP; |
457 | else return verdict; | 439 | else return verdict; |
458 | #endif | 440 | #endif |
459 | |||
460 | #undef tb_comefrom | ||
461 | } | 441 | } |
462 | 442 | ||
463 | /* Figures out from what hook each rule can be called: returns 0 if | 443 | /* Figures out from what hook each rule can be called: returns 0 if |
@@ -486,7 +466,7 @@ mark_source_chains(const struct xt_table_info *newinfo, | |||
486 | int visited = e->comefrom & (1 << hook); | 466 | int visited = e->comefrom & (1 << hook); |
487 | 467 | ||
488 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { | 468 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { |
489 | printk("iptables: loop hook %u pos %u %08X.\n", | 469 | pr_err("iptables: loop hook %u pos %u %08X.\n", |
490 | hook, pos, e->comefrom); | 470 | hook, pos, e->comefrom); |
491 | return 0; | 471 | return 0; |
492 | } | 472 | } |
@@ -591,7 +571,7 @@ check_entry(const struct ipt_entry *e, const char *name) | |||
591 | const struct ipt_entry_target *t; | 571 | const struct ipt_entry_target *t; |
592 | 572 | ||
593 | if (!ip_checkentry(&e->ip)) { | 573 | if (!ip_checkentry(&e->ip)) { |
594 | duprintf("ip_tables: ip check failed %p %s.\n", e, name); | 574 | duprintf("ip check failed %p %s.\n", e, par->match->name); |
595 | return -EINVAL; | 575 | return -EINVAL; |
596 | } | 576 | } |
597 | 577 | ||
@@ -618,8 +598,7 @@ check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par) | |||
618 | ret = xt_check_match(par, m->u.match_size - sizeof(*m), | 598 | ret = xt_check_match(par, m->u.match_size - sizeof(*m), |
619 | ip->proto, ip->invflags & IPT_INV_PROTO); | 599 | ip->proto, ip->invflags & IPT_INV_PROTO); |
620 | if (ret < 0) { | 600 | if (ret < 0) { |
621 | duprintf("ip_tables: check failed for `%s'.\n", | 601 | duprintf("check failed for `%s'.\n", par->match->name); |
622 | par.match->name); | ||
623 | return ret; | 602 | return ret; |
624 | } | 603 | } |
625 | return 0; | 604 | return 0; |
@@ -631,12 +610,11 @@ find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par) | |||
631 | struct xt_match *match; | 610 | struct xt_match *match; |
632 | int ret; | 611 | int ret; |
633 | 612 | ||
634 | match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name, | 613 | match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, |
635 | m->u.user.revision), | 614 | m->u.user.revision); |
636 | "ipt_%s", m->u.user.name); | 615 | if (IS_ERR(match)) { |
637 | if (IS_ERR(match) || !match) { | ||
638 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); | 616 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); |
639 | return match ? PTR_ERR(match) : -ENOENT; | 617 | return PTR_ERR(match); |
640 | } | 618 | } |
641 | m->u.kernel.match = match; | 619 | m->u.kernel.match = match; |
642 | 620 | ||
@@ -667,7 +645,7 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name) | |||
667 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), | 645 | ret = xt_check_target(&par, t->u.target_size - sizeof(*t), |
668 | e->ip.proto, e->ip.invflags & IPT_INV_PROTO); | 646 | e->ip.proto, e->ip.invflags & IPT_INV_PROTO); |
669 | if (ret < 0) { | 647 | if (ret < 0) { |
670 | duprintf("ip_tables: check failed for `%s'.\n", | 648 | duprintf("check failed for `%s'.\n", |
671 | t->u.kernel.target->name); | 649 | t->u.kernel.target->name); |
672 | return ret; | 650 | return ret; |
673 | } | 651 | } |
@@ -703,13 +681,11 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, | |||
703 | } | 681 | } |
704 | 682 | ||
705 | t = ipt_get_target(e); | 683 | t = ipt_get_target(e); |
706 | target = try_then_request_module(xt_find_target(AF_INET, | 684 | target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, |
707 | t->u.user.name, | 685 | t->u.user.revision); |
708 | t->u.user.revision), | 686 | if (IS_ERR(target)) { |
709 | "ipt_%s", t->u.user.name); | ||
710 | if (IS_ERR(target) || !target) { | ||
711 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); | 687 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); |
712 | ret = target ? PTR_ERR(target) : -ENOENT; | 688 | ret = PTR_ERR(target); |
713 | goto cleanup_matches; | 689 | goto cleanup_matches; |
714 | } | 690 | } |
715 | t->u.kernel.target = target; | 691 | t->u.kernel.target = target; |
@@ -843,6 +819,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
843 | if (ret != 0) | 819 | if (ret != 0) |
844 | return ret; | 820 | return ret; |
845 | ++i; | 821 | ++i; |
822 | if (strcmp(ipt_get_target(iter)->u.user.name, | ||
823 | XT_ERROR_TARGET) == 0) | ||
824 | ++newinfo->stacksize; | ||
846 | } | 825 | } |
847 | 826 | ||
848 | if (i != repl->num_entries) { | 827 | if (i != repl->num_entries) { |
@@ -1311,7 +1290,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) | |||
1311 | if (ret != 0) | 1290 | if (ret != 0) |
1312 | goto free_newinfo; | 1291 | goto free_newinfo; |
1313 | 1292 | ||
1314 | duprintf("ip_tables: Translated table\n"); | 1293 | duprintf("Translated table\n"); |
1315 | 1294 | ||
1316 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, | 1295 | ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, |
1317 | tmp.num_counters, tmp.counters); | 1296 | tmp.num_counters, tmp.counters); |
@@ -1476,13 +1455,12 @@ compat_find_calc_match(struct ipt_entry_match *m, | |||
1476 | { | 1455 | { |
1477 | struct xt_match *match; | 1456 | struct xt_match *match; |
1478 | 1457 | ||
1479 | match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name, | 1458 | match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, |
1480 | m->u.user.revision), | 1459 | m->u.user.revision); |
1481 | "ipt_%s", m->u.user.name); | 1460 | if (IS_ERR(match)) { |
1482 | if (IS_ERR(match) || !match) { | ||
1483 | duprintf("compat_check_calc_match: `%s' not found\n", | 1461 | duprintf("compat_check_calc_match: `%s' not found\n", |
1484 | m->u.user.name); | 1462 | m->u.user.name); |
1485 | return match ? PTR_ERR(match) : -ENOENT; | 1463 | return PTR_ERR(match); |
1486 | } | 1464 | } |
1487 | m->u.kernel.match = match; | 1465 | m->u.kernel.match = match; |
1488 | *size += xt_compat_match_offset(match); | 1466 | *size += xt_compat_match_offset(match); |
@@ -1549,14 +1527,12 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, | |||
1549 | } | 1527 | } |
1550 | 1528 | ||
1551 | t = compat_ipt_get_target(e); | 1529 | t = compat_ipt_get_target(e); |
1552 | target = try_then_request_module(xt_find_target(AF_INET, | 1530 | target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, |
1553 | t->u.user.name, | 1531 | t->u.user.revision); |
1554 | t->u.user.revision), | 1532 | if (IS_ERR(target)) { |
1555 | "ipt_%s", t->u.user.name); | ||
1556 | if (IS_ERR(target) || !target) { | ||
1557 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", | 1533 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", |
1558 | t->u.user.name); | 1534 | t->u.user.name); |
1559 | ret = target ? PTR_ERR(target) : -ENOENT; | 1535 | ret = PTR_ERR(target); |
1560 | goto release_matches; | 1536 | goto release_matches; |
1561 | } | 1537 | } |
1562 | t->u.kernel.target = target; | 1538 | t->u.kernel.target = target; |
@@ -2094,8 +2070,7 @@ struct xt_table *ipt_register_table(struct net *net, | |||
2094 | { | 2070 | { |
2095 | int ret; | 2071 | int ret; |
2096 | struct xt_table_info *newinfo; | 2072 | struct xt_table_info *newinfo; |
2097 | struct xt_table_info bootstrap | 2073 | struct xt_table_info bootstrap = {0}; |
2098 | = { 0, 0, 0, { 0 }, { 0 }, { } }; | ||
2099 | void *loc_cpu_entry; | 2074 | void *loc_cpu_entry; |
2100 | struct xt_table *new_table; | 2075 | struct xt_table *new_table; |
2101 | 2076 | ||
@@ -2157,7 +2132,7 @@ icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, | |||
2157 | } | 2132 | } |
2158 | 2133 | ||
2159 | static bool | 2134 | static bool |
2160 | icmp_match(const struct sk_buff *skb, const struct xt_match_param *par) | 2135 | icmp_match(const struct sk_buff *skb, struct xt_action_param *par) |
2161 | { | 2136 | { |
2162 | const struct icmphdr *ic; | 2137 | const struct icmphdr *ic; |
2163 | struct icmphdr _icmph; | 2138 | struct icmphdr _icmph; |
@@ -2173,7 +2148,7 @@ icmp_match(const struct sk_buff *skb, const struct xt_match_param *par) | |||
2173 | * can't. Hence, no choice but to drop. | 2148 | * can't. Hence, no choice but to drop. |
2174 | */ | 2149 | */ |
2175 | duprintf("Dropping evil ICMP tinygram.\n"); | 2150 | duprintf("Dropping evil ICMP tinygram.\n"); |
2176 | *par->hotdrop = true; | 2151 | par->hotdrop = true; |
2177 | return false; | 2152 | return false; |
2178 | } | 2153 | } |
2179 | 2154 | ||
@@ -2184,31 +2159,31 @@ icmp_match(const struct sk_buff *skb, const struct xt_match_param *par) | |||
2184 | !!(icmpinfo->invflags&IPT_ICMP_INV)); | 2159 | !!(icmpinfo->invflags&IPT_ICMP_INV)); |
2185 | } | 2160 | } |
2186 | 2161 | ||
2187 | static bool icmp_checkentry(const struct xt_mtchk_param *par) | 2162 | static int icmp_checkentry(const struct xt_mtchk_param *par) |
2188 | { | 2163 | { |
2189 | const struct ipt_icmp *icmpinfo = par->matchinfo; | 2164 | const struct ipt_icmp *icmpinfo = par->matchinfo; |
2190 | 2165 | ||
2191 | /* Must specify no unknown invflags */ | 2166 | /* Must specify no unknown invflags */ |
2192 | return !(icmpinfo->invflags & ~IPT_ICMP_INV); | 2167 | return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; |
2193 | } | 2168 | } |
2194 | 2169 | ||
2195 | /* The built-in targets: standard (NULL) and error. */ | 2170 | static struct xt_target ipt_builtin_tg[] __read_mostly = { |
2196 | static struct xt_target ipt_standard_target __read_mostly = { | 2171 | { |
2197 | .name = IPT_STANDARD_TARGET, | 2172 | .name = IPT_STANDARD_TARGET, |
2198 | .targetsize = sizeof(int), | 2173 | .targetsize = sizeof(int), |
2199 | .family = NFPROTO_IPV4, | 2174 | .family = NFPROTO_IPV4, |
2200 | #ifdef CONFIG_COMPAT | 2175 | #ifdef CONFIG_COMPAT |
2201 | .compatsize = sizeof(compat_int_t), | 2176 | .compatsize = sizeof(compat_int_t), |
2202 | .compat_from_user = compat_standard_from_user, | 2177 | .compat_from_user = compat_standard_from_user, |
2203 | .compat_to_user = compat_standard_to_user, | 2178 | .compat_to_user = compat_standard_to_user, |
2204 | #endif | 2179 | #endif |
2205 | }; | 2180 | }, |
2206 | 2181 | { | |
2207 | static struct xt_target ipt_error_target __read_mostly = { | 2182 | .name = IPT_ERROR_TARGET, |
2208 | .name = IPT_ERROR_TARGET, | 2183 | .target = ipt_error, |
2209 | .target = ipt_error, | 2184 | .targetsize = IPT_FUNCTION_MAXNAMELEN, |
2210 | .targetsize = IPT_FUNCTION_MAXNAMELEN, | 2185 | .family = NFPROTO_IPV4, |
2211 | .family = NFPROTO_IPV4, | 2186 | }, |
2212 | }; | 2187 | }; |
2213 | 2188 | ||
2214 | static struct nf_sockopt_ops ipt_sockopts = { | 2189 | static struct nf_sockopt_ops ipt_sockopts = { |
@@ -2228,13 +2203,15 @@ static struct nf_sockopt_ops ipt_sockopts = { | |||
2228 | .owner = THIS_MODULE, | 2203 | .owner = THIS_MODULE, |
2229 | }; | 2204 | }; |
2230 | 2205 | ||
2231 | static struct xt_match icmp_matchstruct __read_mostly = { | 2206 | static struct xt_match ipt_builtin_mt[] __read_mostly = { |
2232 | .name = "icmp", | 2207 | { |
2233 | .match = icmp_match, | 2208 | .name = "icmp", |
2234 | .matchsize = sizeof(struct ipt_icmp), | 2209 | .match = icmp_match, |
2235 | .checkentry = icmp_checkentry, | 2210 | .matchsize = sizeof(struct ipt_icmp), |
2236 | .proto = IPPROTO_ICMP, | 2211 | .checkentry = icmp_checkentry, |
2237 | .family = NFPROTO_IPV4, | 2212 | .proto = IPPROTO_ICMP, |
2213 | .family = NFPROTO_IPV4, | ||
2214 | }, | ||
2238 | }; | 2215 | }; |
2239 | 2216 | ||
2240 | static int __net_init ip_tables_net_init(struct net *net) | 2217 | static int __net_init ip_tables_net_init(struct net *net) |
@@ -2261,13 +2238,10 @@ static int __init ip_tables_init(void) | |||
2261 | goto err1; | 2238 | goto err1; |
2262 | 2239 | ||
2263 | /* Noone else will be downing sem now, so we won't sleep */ | 2240 | /* Noone else will be downing sem now, so we won't sleep */ |
2264 | ret = xt_register_target(&ipt_standard_target); | 2241 | ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); |
2265 | if (ret < 0) | 2242 | if (ret < 0) |
2266 | goto err2; | 2243 | goto err2; |
2267 | ret = xt_register_target(&ipt_error_target); | 2244 | ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); |
2268 | if (ret < 0) | ||
2269 | goto err3; | ||
2270 | ret = xt_register_match(&icmp_matchstruct); | ||
2271 | if (ret < 0) | 2245 | if (ret < 0) |
2272 | goto err4; | 2246 | goto err4; |
2273 | 2247 | ||
@@ -2276,15 +2250,13 @@ static int __init ip_tables_init(void) | |||
2276 | if (ret < 0) | 2250 | if (ret < 0) |
2277 | goto err5; | 2251 | goto err5; |
2278 | 2252 | ||
2279 | printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n"); | 2253 | pr_info("(C) 2000-2006 Netfilter Core Team\n"); |
2280 | return 0; | 2254 | return 0; |
2281 | 2255 | ||
2282 | err5: | 2256 | err5: |
2283 | xt_unregister_match(&icmp_matchstruct); | 2257 | xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); |
2284 | err4: | 2258 | err4: |
2285 | xt_unregister_target(&ipt_error_target); | 2259 | xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); |
2286 | err3: | ||
2287 | xt_unregister_target(&ipt_standard_target); | ||
2288 | err2: | 2260 | err2: |
2289 | unregister_pernet_subsys(&ip_tables_net_ops); | 2261 | unregister_pernet_subsys(&ip_tables_net_ops); |
2290 | err1: | 2262 | err1: |
@@ -2295,10 +2267,8 @@ static void __exit ip_tables_fini(void) | |||
2295 | { | 2267 | { |
2296 | nf_unregister_sockopt(&ipt_sockopts); | 2268 | nf_unregister_sockopt(&ipt_sockopts); |
2297 | 2269 | ||
2298 | xt_unregister_match(&icmp_matchstruct); | 2270 | xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); |
2299 | xt_unregister_target(&ipt_error_target); | 2271 | xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); |
2300 | xt_unregister_target(&ipt_standard_target); | ||
2301 | |||
2302 | unregister_pernet_subsys(&ip_tables_net_ops); | 2272 | unregister_pernet_subsys(&ip_tables_net_ops); |
2303 | } | 2273 | } |
2304 | 2274 | ||
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index a992dc826f1c..f91c94b9a790 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/proc_fs.h> | 14 | #include <linux/proc_fs.h> |
14 | #include <linux/jhash.h> | 15 | #include <linux/jhash.h> |
@@ -239,8 +240,7 @@ clusterip_hashfn(const struct sk_buff *skb, | |||
239 | break; | 240 | break; |
240 | default: | 241 | default: |
241 | if (net_ratelimit()) | 242 | if (net_ratelimit()) |
242 | printk(KERN_NOTICE "CLUSTERIP: unknown protocol `%u'\n", | 243 | pr_info("unknown protocol %u\n", iph->protocol); |
243 | iph->protocol); | ||
244 | sport = dport = 0; | 244 | sport = dport = 0; |
245 | } | 245 | } |
246 | 246 | ||
@@ -262,7 +262,7 @@ clusterip_hashfn(const struct sk_buff *skb, | |||
262 | hashval = 0; | 262 | hashval = 0; |
263 | /* This cannot happen, unless the check function wasn't called | 263 | /* This cannot happen, unless the check function wasn't called |
264 | * at rule load time */ | 264 | * at rule load time */ |
265 | printk("CLUSTERIP: unknown mode `%u'\n", config->hash_mode); | 265 | pr_info("unknown mode %u\n", config->hash_mode); |
266 | BUG(); | 266 | BUG(); |
267 | break; | 267 | break; |
268 | } | 268 | } |
@@ -282,7 +282,7 @@ clusterip_responsible(const struct clusterip_config *config, u_int32_t hash) | |||
282 | ***********************************************************************/ | 282 | ***********************************************************************/ |
283 | 283 | ||
284 | static unsigned int | 284 | static unsigned int |
285 | clusterip_tg(struct sk_buff *skb, const struct xt_target_param *par) | 285 | clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par) |
286 | { | 286 | { |
287 | const struct ipt_clusterip_tgt_info *cipinfo = par->targinfo; | 287 | const struct ipt_clusterip_tgt_info *cipinfo = par->targinfo; |
288 | struct nf_conn *ct; | 288 | struct nf_conn *ct; |
@@ -295,7 +295,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
295 | 295 | ||
296 | ct = nf_ct_get(skb, &ctinfo); | 296 | ct = nf_ct_get(skb, &ctinfo); |
297 | if (ct == NULL) { | 297 | if (ct == NULL) { |
298 | printk(KERN_ERR "CLUSTERIP: no conntrack!\n"); | 298 | pr_info("no conntrack!\n"); |
299 | /* FIXME: need to drop invalid ones, since replies | 299 | /* FIXME: need to drop invalid ones, since replies |
300 | * to outgoing connections of other nodes will be | 300 | * to outgoing connections of other nodes will be |
301 | * marked as INVALID */ | 301 | * marked as INVALID */ |
@@ -348,25 +348,24 @@ clusterip_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
348 | return XT_CONTINUE; | 348 | return XT_CONTINUE; |
349 | } | 349 | } |
350 | 350 | ||
351 | static bool clusterip_tg_check(const struct xt_tgchk_param *par) | 351 | static int clusterip_tg_check(const struct xt_tgchk_param *par) |
352 | { | 352 | { |
353 | struct ipt_clusterip_tgt_info *cipinfo = par->targinfo; | 353 | struct ipt_clusterip_tgt_info *cipinfo = par->targinfo; |
354 | const struct ipt_entry *e = par->entryinfo; | 354 | const struct ipt_entry *e = par->entryinfo; |
355 | |||
356 | struct clusterip_config *config; | 355 | struct clusterip_config *config; |
356 | int ret; | ||
357 | 357 | ||
358 | if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP && | 358 | if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP && |
359 | cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT && | 359 | cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT && |
360 | cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) { | 360 | cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) { |
361 | printk(KERN_WARNING "CLUSTERIP: unknown mode `%u'\n", | 361 | pr_info("unknown mode %u\n", cipinfo->hash_mode); |
362 | cipinfo->hash_mode); | 362 | return -EINVAL; |
363 | return false; | ||
364 | 363 | ||
365 | } | 364 | } |
366 | if (e->ip.dmsk.s_addr != htonl(0xffffffff) || | 365 | if (e->ip.dmsk.s_addr != htonl(0xffffffff) || |
367 | e->ip.dst.s_addr == 0) { | 366 | e->ip.dst.s_addr == 0) { |
368 | printk(KERN_ERR "CLUSTERIP: Please specify destination IP\n"); | 367 | pr_info("Please specify destination IP\n"); |
369 | return false; | 368 | return -EINVAL; |
370 | } | 369 | } |
371 | 370 | ||
372 | /* FIXME: further sanity checks */ | 371 | /* FIXME: further sanity checks */ |
@@ -374,41 +373,41 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par) | |||
374 | config = clusterip_config_find_get(e->ip.dst.s_addr, 1); | 373 | config = clusterip_config_find_get(e->ip.dst.s_addr, 1); |
375 | if (!config) { | 374 | if (!config) { |
376 | if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) { | 375 | if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) { |
377 | printk(KERN_WARNING "CLUSTERIP: no config found for %pI4, need 'new'\n", &e->ip.dst.s_addr); | 376 | pr_info("no config found for %pI4, need 'new'\n", |
378 | return false; | 377 | &e->ip.dst.s_addr); |
378 | return -EINVAL; | ||
379 | } else { | 379 | } else { |
380 | struct net_device *dev; | 380 | struct net_device *dev; |
381 | 381 | ||
382 | if (e->ip.iniface[0] == '\0') { | 382 | if (e->ip.iniface[0] == '\0') { |
383 | printk(KERN_WARNING "CLUSTERIP: Please specify an interface name\n"); | 383 | pr_info("Please specify an interface name\n"); |
384 | return false; | 384 | return -EINVAL; |
385 | } | 385 | } |
386 | 386 | ||
387 | dev = dev_get_by_name(&init_net, e->ip.iniface); | 387 | dev = dev_get_by_name(&init_net, e->ip.iniface); |
388 | if (!dev) { | 388 | if (!dev) { |
389 | printk(KERN_WARNING "CLUSTERIP: no such interface %s\n", e->ip.iniface); | 389 | pr_info("no such interface %s\n", |
390 | return false; | 390 | e->ip.iniface); |
391 | return -ENOENT; | ||
391 | } | 392 | } |
392 | 393 | ||
393 | config = clusterip_config_init(cipinfo, | 394 | config = clusterip_config_init(cipinfo, |
394 | e->ip.dst.s_addr, dev); | 395 | e->ip.dst.s_addr, dev); |
395 | if (!config) { | 396 | if (!config) { |
396 | printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n"); | 397 | pr_info("cannot allocate config\n"); |
397 | dev_put(dev); | 398 | dev_put(dev); |
398 | return false; | 399 | return -ENOMEM; |
399 | } | 400 | } |
400 | dev_mc_add(config->dev, config->clustermac); | 401 | dev_mc_add(config->dev, config->clustermac); |
401 | } | 402 | } |
402 | } | 403 | } |
403 | cipinfo->config = config; | 404 | cipinfo->config = config; |
404 | 405 | ||
405 | if (nf_ct_l3proto_try_module_get(par->target->family) < 0) { | 406 | ret = nf_ct_l3proto_try_module_get(par->family); |
406 | printk(KERN_WARNING "can't load conntrack support for " | 407 | if (ret < 0) |
407 | "proto=%u\n", par->target->family); | 408 | pr_info("cannot load conntrack support for proto=%u\n", |
408 | return false; | 409 | par->family); |
409 | } | 410 | return ret; |
410 | |||
411 | return true; | ||
412 | } | 411 | } |
413 | 412 | ||
414 | /* drop reference count of cluster config when rule is deleted */ | 413 | /* drop reference count of cluster config when rule is deleted */ |
@@ -422,7 +421,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par) | |||
422 | 421 | ||
423 | clusterip_config_put(cipinfo->config); | 422 | clusterip_config_put(cipinfo->config); |
424 | 423 | ||
425 | nf_ct_l3proto_module_put(par->target->family); | 424 | nf_ct_l3proto_module_put(par->family); |
426 | } | 425 | } |
427 | 426 | ||
428 | #ifdef CONFIG_COMPAT | 427 | #ifdef CONFIG_COMPAT |
@@ -479,8 +478,8 @@ static void arp_print(struct arp_payload *payload) | |||
479 | } | 478 | } |
480 | hbuffer[--k]='\0'; | 479 | hbuffer[--k]='\0'; |
481 | 480 | ||
482 | printk("src %pI4@%s, dst %pI4\n", | 481 | pr_debug("src %pI4@%s, dst %pI4\n", |
483 | &payload->src_ip, hbuffer, &payload->dst_ip); | 482 | &payload->src_ip, hbuffer, &payload->dst_ip); |
484 | } | 483 | } |
485 | #endif | 484 | #endif |
486 | 485 | ||
@@ -519,7 +518,7 @@ arp_mangle(unsigned int hook, | |||
519 | * this wouldn't work, since we didn't subscribe the mcast group on | 518 | * this wouldn't work, since we didn't subscribe the mcast group on |
520 | * other interfaces */ | 519 | * other interfaces */ |
521 | if (c->dev != out) { | 520 | if (c->dev != out) { |
522 | pr_debug("CLUSTERIP: not mangling arp reply on different " | 521 | pr_debug("not mangling arp reply on different " |
523 | "interface: cip'%s'-skb'%s'\n", | 522 | "interface: cip'%s'-skb'%s'\n", |
524 | c->dev->name, out->name); | 523 | c->dev->name, out->name); |
525 | clusterip_config_put(c); | 524 | clusterip_config_put(c); |
@@ -530,7 +529,7 @@ arp_mangle(unsigned int hook, | |||
530 | memcpy(payload->src_hw, c->clustermac, arp->ar_hln); | 529 | memcpy(payload->src_hw, c->clustermac, arp->ar_hln); |
531 | 530 | ||
532 | #ifdef DEBUG | 531 | #ifdef DEBUG |
533 | pr_debug(KERN_DEBUG "CLUSTERIP mangled arp reply: "); | 532 | pr_debug("mangled arp reply: "); |
534 | arp_print(payload); | 533 | arp_print(payload); |
535 | #endif | 534 | #endif |
536 | 535 | ||
@@ -601,7 +600,8 @@ static void *clusterip_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
601 | 600 | ||
602 | static void clusterip_seq_stop(struct seq_file *s, void *v) | 601 | static void clusterip_seq_stop(struct seq_file *s, void *v) |
603 | { | 602 | { |
604 | kfree(v); | 603 | if (!IS_ERR(v)) |
604 | kfree(v); | ||
605 | } | 605 | } |
606 | 606 | ||
607 | static int clusterip_seq_show(struct seq_file *s, void *v) | 607 | static int clusterip_seq_show(struct seq_file *s, void *v) |
@@ -706,13 +706,13 @@ static int __init clusterip_tg_init(void) | |||
706 | #ifdef CONFIG_PROC_FS | 706 | #ifdef CONFIG_PROC_FS |
707 | clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", init_net.proc_net); | 707 | clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", init_net.proc_net); |
708 | if (!clusterip_procdir) { | 708 | if (!clusterip_procdir) { |
709 | printk(KERN_ERR "CLUSTERIP: Unable to proc dir entry\n"); | 709 | pr_err("Unable to proc dir entry\n"); |
710 | ret = -ENOMEM; | 710 | ret = -ENOMEM; |
711 | goto cleanup_hook; | 711 | goto cleanup_hook; |
712 | } | 712 | } |
713 | #endif /* CONFIG_PROC_FS */ | 713 | #endif /* CONFIG_PROC_FS */ |
714 | 714 | ||
715 | printk(KERN_NOTICE "ClusterIP Version %s loaded successfully\n", | 715 | pr_info("ClusterIP Version %s loaded successfully\n", |
716 | CLUSTERIP_VERSION); | 716 | CLUSTERIP_VERSION); |
717 | return 0; | 717 | return 0; |
718 | 718 | ||
@@ -727,8 +727,7 @@ cleanup_target: | |||
727 | 727 | ||
728 | static void __exit clusterip_tg_exit(void) | 728 | static void __exit clusterip_tg_exit(void) |
729 | { | 729 | { |
730 | printk(KERN_NOTICE "ClusterIP Version %s unloading\n", | 730 | pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION); |
731 | CLUSTERIP_VERSION); | ||
732 | #ifdef CONFIG_PROC_FS | 731 | #ifdef CONFIG_PROC_FS |
733 | remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent); | 732 | remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent); |
734 | #endif | 733 | #endif |
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index ea5cea2415c1..4bf3dc49ad1e 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/in.h> | 10 | #include <linux/in.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
@@ -77,7 +77,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | static unsigned int | 79 | static unsigned int |
80 | ecn_tg(struct sk_buff *skb, const struct xt_target_param *par) | 80 | ecn_tg(struct sk_buff *skb, const struct xt_action_param *par) |
81 | { | 81 | { |
82 | const struct ipt_ECN_info *einfo = par->targinfo; | 82 | const struct ipt_ECN_info *einfo = par->targinfo; |
83 | 83 | ||
@@ -93,28 +93,25 @@ ecn_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
93 | return XT_CONTINUE; | 93 | return XT_CONTINUE; |
94 | } | 94 | } |
95 | 95 | ||
96 | static bool ecn_tg_check(const struct xt_tgchk_param *par) | 96 | static int ecn_tg_check(const struct xt_tgchk_param *par) |
97 | { | 97 | { |
98 | const struct ipt_ECN_info *einfo = par->targinfo; | 98 | const struct ipt_ECN_info *einfo = par->targinfo; |
99 | const struct ipt_entry *e = par->entryinfo; | 99 | const struct ipt_entry *e = par->entryinfo; |
100 | 100 | ||
101 | if (einfo->operation & IPT_ECN_OP_MASK) { | 101 | if (einfo->operation & IPT_ECN_OP_MASK) { |
102 | printk(KERN_WARNING "ECN: unsupported ECN operation %x\n", | 102 | pr_info("unsupported ECN operation %x\n", einfo->operation); |
103 | einfo->operation); | 103 | return -EINVAL; |
104 | return false; | ||
105 | } | 104 | } |
106 | if (einfo->ip_ect & ~IPT_ECN_IP_MASK) { | 105 | if (einfo->ip_ect & ~IPT_ECN_IP_MASK) { |
107 | printk(KERN_WARNING "ECN: new ECT codepoint %x out of mask\n", | 106 | pr_info("new ECT codepoint %x out of mask\n", einfo->ip_ect); |
108 | einfo->ip_ect); | 107 | return -EINVAL; |
109 | return false; | ||
110 | } | 108 | } |
111 | if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) && | 109 | if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) && |
112 | (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) { | 110 | (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) { |
113 | printk(KERN_WARNING "ECN: cannot use TCP operations on a " | 111 | pr_info("cannot use TCP operations on a non-tcp rule\n"); |
114 | "non-tcp rule\n"); | 112 | return -EINVAL; |
115 | return false; | ||
116 | } | 113 | } |
117 | return true; | 114 | return 0; |
118 | } | 115 | } |
119 | 116 | ||
120 | static struct xt_target ecn_tg_reg __read_mostly = { | 117 | static struct xt_target ecn_tg_reg __read_mostly = { |
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index ee128efa1c8d..5234f4f3499a 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
@@ -367,7 +367,7 @@ static struct nf_loginfo default_loginfo = { | |||
367 | .type = NF_LOG_TYPE_LOG, | 367 | .type = NF_LOG_TYPE_LOG, |
368 | .u = { | 368 | .u = { |
369 | .log = { | 369 | .log = { |
370 | .level = 0, | 370 | .level = 5, |
371 | .logflags = NF_LOG_MASK, | 371 | .logflags = NF_LOG_MASK, |
372 | }, | 372 | }, |
373 | }, | 373 | }, |
@@ -425,7 +425,7 @@ ipt_log_packet(u_int8_t pf, | |||
425 | } | 425 | } |
426 | 426 | ||
427 | static unsigned int | 427 | static unsigned int |
428 | log_tg(struct sk_buff *skb, const struct xt_target_param *par) | 428 | log_tg(struct sk_buff *skb, const struct xt_action_param *par) |
429 | { | 429 | { |
430 | const struct ipt_log_info *loginfo = par->targinfo; | 430 | const struct ipt_log_info *loginfo = par->targinfo; |
431 | struct nf_loginfo li; | 431 | struct nf_loginfo li; |
@@ -439,20 +439,19 @@ log_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
439 | return XT_CONTINUE; | 439 | return XT_CONTINUE; |
440 | } | 440 | } |
441 | 441 | ||
442 | static bool log_tg_check(const struct xt_tgchk_param *par) | 442 | static int log_tg_check(const struct xt_tgchk_param *par) |
443 | { | 443 | { |
444 | const struct ipt_log_info *loginfo = par->targinfo; | 444 | const struct ipt_log_info *loginfo = par->targinfo; |
445 | 445 | ||
446 | if (loginfo->level >= 8) { | 446 | if (loginfo->level >= 8) { |
447 | pr_debug("LOG: level %u >= 8\n", loginfo->level); | 447 | pr_debug("level %u >= 8\n", loginfo->level); |
448 | return false; | 448 | return -EINVAL; |
449 | } | 449 | } |
450 | if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { | 450 | if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { |
451 | pr_debug("LOG: prefix term %i\n", | 451 | pr_debug("prefix is not null-terminated\n"); |
452 | loginfo->prefix[sizeof(loginfo->prefix)-1]); | 452 | return -EINVAL; |
453 | return false; | ||
454 | } | 453 | } |
455 | return true; | 454 | return 0; |
456 | } | 455 | } |
457 | 456 | ||
458 | static struct xt_target log_tg_reg __read_mostly = { | 457 | static struct xt_target log_tg_reg __read_mostly = { |
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 650b54042b01..d2ed9dc74ebc 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/inetdevice.h> | 13 | #include <linux/inetdevice.h> |
14 | #include <linux/ip.h> | 14 | #include <linux/ip.h> |
@@ -28,23 +28,23 @@ MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | |||
28 | MODULE_DESCRIPTION("Xtables: automatic-address SNAT"); | 28 | MODULE_DESCRIPTION("Xtables: automatic-address SNAT"); |
29 | 29 | ||
30 | /* FIXME: Multiple targets. --RR */ | 30 | /* FIXME: Multiple targets. --RR */ |
31 | static bool masquerade_tg_check(const struct xt_tgchk_param *par) | 31 | static int masquerade_tg_check(const struct xt_tgchk_param *par) |
32 | { | 32 | { |
33 | const struct nf_nat_multi_range_compat *mr = par->targinfo; | 33 | const struct nf_nat_multi_range_compat *mr = par->targinfo; |
34 | 34 | ||
35 | if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { | 35 | if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { |
36 | pr_debug("masquerade_check: bad MAP_IPS.\n"); | 36 | pr_debug("bad MAP_IPS.\n"); |
37 | return false; | 37 | return -EINVAL; |
38 | } | 38 | } |
39 | if (mr->rangesize != 1) { | 39 | if (mr->rangesize != 1) { |
40 | pr_debug("masquerade_check: bad rangesize %u\n", mr->rangesize); | 40 | pr_debug("bad rangesize %u\n", mr->rangesize); |
41 | return false; | 41 | return -EINVAL; |
42 | } | 42 | } |
43 | return true; | 43 | return 0; |
44 | } | 44 | } |
45 | 45 | ||
46 | static unsigned int | 46 | static unsigned int |
47 | masquerade_tg(struct sk_buff *skb, const struct xt_target_param *par) | 47 | masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par) |
48 | { | 48 | { |
49 | struct nf_conn *ct; | 49 | struct nf_conn *ct; |
50 | struct nf_conn_nat *nat; | 50 | struct nf_conn_nat *nat; |
@@ -72,7 +72,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
72 | rt = skb_rtable(skb); | 72 | rt = skb_rtable(skb); |
73 | newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE); | 73 | newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE); |
74 | if (!newsrc) { | 74 | if (!newsrc) { |
75 | printk("MASQUERADE: %s ate my IP address\n", par->out->name); | 75 | pr_info("%s ate my IP address\n", par->out->name); |
76 | return NF_DROP; | 76 | return NF_DROP; |
77 | } | 77 | } |
78 | 78 | ||
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c index 7c29582d4ec8..f43867d1697f 100644 --- a/net/ipv4/netfilter/ipt_NETMAP.c +++ b/net/ipv4/netfilter/ipt_NETMAP.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | #include <linux/ip.h> | 13 | #include <linux/ip.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
@@ -22,23 +22,23 @@ MODULE_LICENSE("GPL"); | |||
22 | MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>"); | 22 | MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>"); |
23 | MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets"); | 23 | MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets"); |
24 | 24 | ||
25 | static bool netmap_tg_check(const struct xt_tgchk_param *par) | 25 | static int netmap_tg_check(const struct xt_tgchk_param *par) |
26 | { | 26 | { |
27 | const struct nf_nat_multi_range_compat *mr = par->targinfo; | 27 | const struct nf_nat_multi_range_compat *mr = par->targinfo; |
28 | 28 | ||
29 | if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) { | 29 | if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) { |
30 | pr_debug("NETMAP:check: bad MAP_IPS.\n"); | 30 | pr_debug("bad MAP_IPS.\n"); |
31 | return false; | 31 | return -EINVAL; |
32 | } | 32 | } |
33 | if (mr->rangesize != 1) { | 33 | if (mr->rangesize != 1) { |
34 | pr_debug("NETMAP:check: bad rangesize %u.\n", mr->rangesize); | 34 | pr_debug("bad rangesize %u.\n", mr->rangesize); |
35 | return false; | 35 | return -EINVAL; |
36 | } | 36 | } |
37 | return true; | 37 | return 0; |
38 | } | 38 | } |
39 | 39 | ||
40 | static unsigned int | 40 | static unsigned int |
41 | netmap_tg(struct sk_buff *skb, const struct xt_target_param *par) | 41 | netmap_tg(struct sk_buff *skb, const struct xt_action_param *par) |
42 | { | 42 | { |
43 | struct nf_conn *ct; | 43 | struct nf_conn *ct; |
44 | enum ip_conntrack_info ctinfo; | 44 | enum ip_conntrack_info ctinfo; |
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c index 698e5e78685b..18a0656505a0 100644 --- a/net/ipv4/netfilter/ipt_REDIRECT.c +++ b/net/ipv4/netfilter/ipt_REDIRECT.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/ip.h> | 11 | #include <linux/ip.h> |
12 | #include <linux/timer.h> | 12 | #include <linux/timer.h> |
@@ -26,23 +26,23 @@ MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | |||
26 | MODULE_DESCRIPTION("Xtables: Connection redirection to localhost"); | 26 | MODULE_DESCRIPTION("Xtables: Connection redirection to localhost"); |
27 | 27 | ||
28 | /* FIXME: Take multiple ranges --RR */ | 28 | /* FIXME: Take multiple ranges --RR */ |
29 | static bool redirect_tg_check(const struct xt_tgchk_param *par) | 29 | static int redirect_tg_check(const struct xt_tgchk_param *par) |
30 | { | 30 | { |
31 | const struct nf_nat_multi_range_compat *mr = par->targinfo; | 31 | const struct nf_nat_multi_range_compat *mr = par->targinfo; |
32 | 32 | ||
33 | if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { | 33 | if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { |
34 | pr_debug("redirect_check: bad MAP_IPS.\n"); | 34 | pr_debug("bad MAP_IPS.\n"); |
35 | return false; | 35 | return -EINVAL; |
36 | } | 36 | } |
37 | if (mr->rangesize != 1) { | 37 | if (mr->rangesize != 1) { |
38 | pr_debug("redirect_check: bad rangesize %u.\n", mr->rangesize); | 38 | pr_debug("bad rangesize %u.\n", mr->rangesize); |
39 | return false; | 39 | return -EINVAL; |
40 | } | 40 | } |
41 | return true; | 41 | return 0; |
42 | } | 42 | } |
43 | 43 | ||
44 | static unsigned int | 44 | static unsigned int |
45 | redirect_tg(struct sk_buff *skb, const struct xt_target_param *par) | 45 | redirect_tg(struct sk_buff *skb, const struct xt_action_param *par) |
46 | { | 46 | { |
47 | struct nf_conn *ct; | 47 | struct nf_conn *ct; |
48 | enum ip_conntrack_info ctinfo; | 48 | enum ip_conntrack_info ctinfo; |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index a0e8bcf04159..f5f4a888e4ec 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
@@ -136,13 +136,10 @@ static inline void send_unreach(struct sk_buff *skb_in, int code) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | static unsigned int | 138 | static unsigned int |
139 | reject_tg(struct sk_buff *skb, const struct xt_target_param *par) | 139 | reject_tg(struct sk_buff *skb, const struct xt_action_param *par) |
140 | { | 140 | { |
141 | const struct ipt_reject_info *reject = par->targinfo; | 141 | const struct ipt_reject_info *reject = par->targinfo; |
142 | 142 | ||
143 | /* WARNING: This code causes reentry within iptables. | ||
144 | This means that the iptables jump stack is now crap. We | ||
145 | must return an absolute verdict. --RR */ | ||
146 | switch (reject->with) { | 143 | switch (reject->with) { |
147 | case IPT_ICMP_NET_UNREACHABLE: | 144 | case IPT_ICMP_NET_UNREACHABLE: |
148 | send_unreach(skb, ICMP_NET_UNREACH); | 145 | send_unreach(skb, ICMP_NET_UNREACH); |
@@ -175,23 +172,23 @@ reject_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
175 | return NF_DROP; | 172 | return NF_DROP; |
176 | } | 173 | } |
177 | 174 | ||
178 | static bool reject_tg_check(const struct xt_tgchk_param *par) | 175 | static int reject_tg_check(const struct xt_tgchk_param *par) |
179 | { | 176 | { |
180 | const struct ipt_reject_info *rejinfo = par->targinfo; | 177 | const struct ipt_reject_info *rejinfo = par->targinfo; |
181 | const struct ipt_entry *e = par->entryinfo; | 178 | const struct ipt_entry *e = par->entryinfo; |
182 | 179 | ||
183 | if (rejinfo->with == IPT_ICMP_ECHOREPLY) { | 180 | if (rejinfo->with == IPT_ICMP_ECHOREPLY) { |
184 | printk("ipt_REJECT: ECHOREPLY no longer supported.\n"); | 181 | pr_info("ECHOREPLY no longer supported.\n"); |
185 | return false; | 182 | return -EINVAL; |
186 | } else if (rejinfo->with == IPT_TCP_RESET) { | 183 | } else if (rejinfo->with == IPT_TCP_RESET) { |
187 | /* Must specify that it's a TCP packet */ | 184 | /* Must specify that it's a TCP packet */ |
188 | if (e->ip.proto != IPPROTO_TCP || | 185 | if (e->ip.proto != IPPROTO_TCP || |
189 | (e->ip.invflags & XT_INV_PROTO)) { | 186 | (e->ip.invflags & XT_INV_PROTO)) { |
190 | printk("ipt_REJECT: TCP_RESET invalid for non-tcp\n"); | 187 | pr_info("TCP_RESET invalid for non-tcp\n"); |
191 | return false; | 188 | return -EINVAL; |
192 | } | 189 | } |
193 | } | 190 | } |
194 | return true; | 191 | return 0; |
195 | } | 192 | } |
196 | 193 | ||
197 | static struct xt_target reject_tg_reg __read_mostly = { | 194 | static struct xt_target reject_tg_reg __read_mostly = { |
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 0dbe697f164f..446e0f467a17 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c | |||
@@ -29,7 +29,7 @@ | |||
29 | * Specify, after how many hundredths of a second the queue should be | 29 | * Specify, after how many hundredths of a second the queue should be |
30 | * flushed even if it is not full yet. | 30 | * flushed even if it is not full yet. |
31 | */ | 31 | */ |
32 | 32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
35 | #include <linux/socket.h> | 35 | #include <linux/socket.h> |
@@ -57,8 +57,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG); | |||
57 | #define ULOG_NL_EVENT 111 /* Harald's favorite number */ | 57 | #define ULOG_NL_EVENT 111 /* Harald's favorite number */ |
58 | #define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */ | 58 | #define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */ |
59 | 59 | ||
60 | #define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) | ||
61 | |||
62 | static unsigned int nlbufsiz = NLMSG_GOODSIZE; | 60 | static unsigned int nlbufsiz = NLMSG_GOODSIZE; |
63 | module_param(nlbufsiz, uint, 0400); | 61 | module_param(nlbufsiz, uint, 0400); |
64 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size"); | 62 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size"); |
@@ -91,12 +89,12 @@ static void ulog_send(unsigned int nlgroupnum) | |||
91 | ulog_buff_t *ub = &ulog_buffers[nlgroupnum]; | 89 | ulog_buff_t *ub = &ulog_buffers[nlgroupnum]; |
92 | 90 | ||
93 | if (timer_pending(&ub->timer)) { | 91 | if (timer_pending(&ub->timer)) { |
94 | pr_debug("ipt_ULOG: ulog_send: timer was pending, deleting\n"); | 92 | pr_debug("ulog_send: timer was pending, deleting\n"); |
95 | del_timer(&ub->timer); | 93 | del_timer(&ub->timer); |
96 | } | 94 | } |
97 | 95 | ||
98 | if (!ub->skb) { | 96 | if (!ub->skb) { |
99 | pr_debug("ipt_ULOG: ulog_send: nothing to send\n"); | 97 | pr_debug("ulog_send: nothing to send\n"); |
100 | return; | 98 | return; |
101 | } | 99 | } |
102 | 100 | ||
@@ -105,7 +103,7 @@ static void ulog_send(unsigned int nlgroupnum) | |||
105 | ub->lastnlh->nlmsg_type = NLMSG_DONE; | 103 | ub->lastnlh->nlmsg_type = NLMSG_DONE; |
106 | 104 | ||
107 | NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1; | 105 | NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1; |
108 | pr_debug("ipt_ULOG: throwing %d packets to netlink group %u\n", | 106 | pr_debug("throwing %d packets to netlink group %u\n", |
109 | ub->qlen, nlgroupnum + 1); | 107 | ub->qlen, nlgroupnum + 1); |
110 | netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC); | 108 | netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC); |
111 | 109 | ||
@@ -118,7 +116,7 @@ static void ulog_send(unsigned int nlgroupnum) | |||
118 | /* timer function to flush queue in flushtimeout time */ | 116 | /* timer function to flush queue in flushtimeout time */ |
119 | static void ulog_timer(unsigned long data) | 117 | static void ulog_timer(unsigned long data) |
120 | { | 118 | { |
121 | pr_debug("ipt_ULOG: timer function called, calling ulog_send\n"); | 119 | pr_debug("timer function called, calling ulog_send\n"); |
122 | 120 | ||
123 | /* lock to protect against somebody modifying our structure | 121 | /* lock to protect against somebody modifying our structure |
124 | * from ipt_ulog_target at the same time */ | 122 | * from ipt_ulog_target at the same time */ |
@@ -139,7 +137,7 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size) | |||
139 | n = max(size, nlbufsiz); | 137 | n = max(size, nlbufsiz); |
140 | skb = alloc_skb(n, GFP_ATOMIC); | 138 | skb = alloc_skb(n, GFP_ATOMIC); |
141 | if (!skb) { | 139 | if (!skb) { |
142 | PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n); | 140 | pr_debug("cannot alloc whole buffer %ub!\n", n); |
143 | 141 | ||
144 | if (n > size) { | 142 | if (n > size) { |
145 | /* try to allocate only as much as we need for | 143 | /* try to allocate only as much as we need for |
@@ -147,8 +145,7 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size) | |||
147 | 145 | ||
148 | skb = alloc_skb(size, GFP_ATOMIC); | 146 | skb = alloc_skb(size, GFP_ATOMIC); |
149 | if (!skb) | 147 | if (!skb) |
150 | PRINTR("ipt_ULOG: can't even allocate %ub\n", | 148 | pr_debug("cannot even allocate %ub\n", size); |
151 | size); | ||
152 | } | 149 | } |
153 | } | 150 | } |
154 | 151 | ||
@@ -199,8 +196,7 @@ static void ipt_ulog_packet(unsigned int hooknum, | |||
199 | goto alloc_failure; | 196 | goto alloc_failure; |
200 | } | 197 | } |
201 | 198 | ||
202 | pr_debug("ipt_ULOG: qlen %d, qthreshold %Zu\n", ub->qlen, | 199 | pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold); |
203 | loginfo->qthreshold); | ||
204 | 200 | ||
205 | /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ | 201 | /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ |
206 | nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, | 202 | nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, |
@@ -273,16 +269,14 @@ static void ipt_ulog_packet(unsigned int hooknum, | |||
273 | return; | 269 | return; |
274 | 270 | ||
275 | nlmsg_failure: | 271 | nlmsg_failure: |
276 | PRINTR("ipt_ULOG: error during NLMSG_PUT\n"); | 272 | pr_debug("error during NLMSG_PUT\n"); |
277 | |||
278 | alloc_failure: | 273 | alloc_failure: |
279 | PRINTR("ipt_ULOG: Error building netlink message\n"); | 274 | pr_debug("Error building netlink message\n"); |
280 | |||
281 | spin_unlock_bh(&ulog_lock); | 275 | spin_unlock_bh(&ulog_lock); |
282 | } | 276 | } |
283 | 277 | ||
284 | static unsigned int | 278 | static unsigned int |
285 | ulog_tg(struct sk_buff *skb, const struct xt_target_param *par) | 279 | ulog_tg(struct sk_buff *skb, const struct xt_action_param *par) |
286 | { | 280 | { |
287 | ipt_ulog_packet(par->hooknum, skb, par->in, par->out, | 281 | ipt_ulog_packet(par->hooknum, skb, par->in, par->out, |
288 | par->targinfo, NULL); | 282 | par->targinfo, NULL); |
@@ -314,21 +308,20 @@ static void ipt_logfn(u_int8_t pf, | |||
314 | ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); | 308 | ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); |
315 | } | 309 | } |
316 | 310 | ||
317 | static bool ulog_tg_check(const struct xt_tgchk_param *par) | 311 | static int ulog_tg_check(const struct xt_tgchk_param *par) |
318 | { | 312 | { |
319 | const struct ipt_ulog_info *loginfo = par->targinfo; | 313 | const struct ipt_ulog_info *loginfo = par->targinfo; |
320 | 314 | ||
321 | if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') { | 315 | if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') { |
322 | pr_debug("ipt_ULOG: prefix term %i\n", | 316 | pr_debug("prefix not null-terminated\n"); |
323 | loginfo->prefix[sizeof(loginfo->prefix) - 1]); | 317 | return -EINVAL; |
324 | return false; | ||
325 | } | 318 | } |
326 | if (loginfo->qthreshold > ULOG_MAX_QLEN) { | 319 | if (loginfo->qthreshold > ULOG_MAX_QLEN) { |
327 | pr_debug("ipt_ULOG: queue threshold %Zu > MAX_QLEN\n", | 320 | pr_debug("queue threshold %Zu > MAX_QLEN\n", |
328 | loginfo->qthreshold); | 321 | loginfo->qthreshold); |
329 | return false; | 322 | return -EINVAL; |
330 | } | 323 | } |
331 | return true; | 324 | return 0; |
332 | } | 325 | } |
333 | 326 | ||
334 | #ifdef CONFIG_COMPAT | 327 | #ifdef CONFIG_COMPAT |
@@ -390,10 +383,10 @@ static int __init ulog_tg_init(void) | |||
390 | { | 383 | { |
391 | int ret, i; | 384 | int ret, i; |
392 | 385 | ||
393 | pr_debug("ipt_ULOG: init module\n"); | 386 | pr_debug("init module\n"); |
394 | 387 | ||
395 | if (nlbufsiz > 128*1024) { | 388 | if (nlbufsiz > 128*1024) { |
396 | printk("Netlink buffer has to be <= 128kB\n"); | 389 | pr_warning("Netlink buffer has to be <= 128kB\n"); |
397 | return -EINVAL; | 390 | return -EINVAL; |
398 | } | 391 | } |
399 | 392 | ||
@@ -423,7 +416,7 @@ static void __exit ulog_tg_exit(void) | |||
423 | ulog_buff_t *ub; | 416 | ulog_buff_t *ub; |
424 | int i; | 417 | int i; |
425 | 418 | ||
426 | pr_debug("ipt_ULOG: cleanup_module\n"); | 419 | pr_debug("cleanup_module\n"); |
427 | 420 | ||
428 | if (nflog) | 421 | if (nflog) |
429 | nf_log_unregister(&ipt_ulog_logger); | 422 | nf_log_unregister(&ipt_ulog_logger); |
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c index 3b216be3bc9f..db8bff0fb86d 100644 --- a/net/ipv4/netfilter/ipt_addrtype.c +++ b/net/ipv4/netfilter/ipt_addrtype.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
@@ -30,7 +30,7 @@ static inline bool match_type(struct net *net, const struct net_device *dev, | |||
30 | } | 30 | } |
31 | 31 | ||
32 | static bool | 32 | static bool |
33 | addrtype_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | 33 | addrtype_mt_v0(const struct sk_buff *skb, struct xt_action_param *par) |
34 | { | 34 | { |
35 | struct net *net = dev_net(par->in ? par->in : par->out); | 35 | struct net *net = dev_net(par->in ? par->in : par->out); |
36 | const struct ipt_addrtype_info *info = par->matchinfo; | 36 | const struct ipt_addrtype_info *info = par->matchinfo; |
@@ -48,7 +48,7 @@ addrtype_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | static bool | 50 | static bool |
51 | addrtype_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par) | 51 | addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) |
52 | { | 52 | { |
53 | struct net *net = dev_net(par->in ? par->in : par->out); | 53 | struct net *net = dev_net(par->in ? par->in : par->out); |
54 | const struct ipt_addrtype_info_v1 *info = par->matchinfo; | 54 | const struct ipt_addrtype_info_v1 *info = par->matchinfo; |
@@ -70,34 +70,34 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par) | |||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | static bool addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par) | 73 | static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par) |
74 | { | 74 | { |
75 | struct ipt_addrtype_info_v1 *info = par->matchinfo; | 75 | struct ipt_addrtype_info_v1 *info = par->matchinfo; |
76 | 76 | ||
77 | if (info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN && | 77 | if (info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN && |
78 | info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) { | 78 | info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) { |
79 | printk(KERN_ERR "ipt_addrtype: both incoming and outgoing " | 79 | pr_info("both incoming and outgoing " |
80 | "interface limitation cannot be selected\n"); | 80 | "interface limitation cannot be selected\n"); |
81 | return false; | 81 | return -EINVAL; |
82 | } | 82 | } |
83 | 83 | ||
84 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | | 84 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | |
85 | (1 << NF_INET_LOCAL_IN)) && | 85 | (1 << NF_INET_LOCAL_IN)) && |
86 | info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) { | 86 | info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) { |
87 | printk(KERN_ERR "ipt_addrtype: output interface limitation " | 87 | pr_info("output interface limitation " |
88 | "not valid in PRE_ROUTING and INPUT\n"); | 88 | "not valid in PREROUTING and INPUT\n"); |
89 | return false; | 89 | return -EINVAL; |
90 | } | 90 | } |
91 | 91 | ||
92 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | | 92 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | |
93 | (1 << NF_INET_LOCAL_OUT)) && | 93 | (1 << NF_INET_LOCAL_OUT)) && |
94 | info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN) { | 94 | info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN) { |
95 | printk(KERN_ERR "ipt_addrtype: input interface limitation " | 95 | pr_info("input interface limitation " |
96 | "not valid in POST_ROUTING and OUTPUT\n"); | 96 | "not valid in POSTROUTING and OUTPUT\n"); |
97 | return false; | 97 | return -EINVAL; |
98 | } | 98 | } |
99 | 99 | ||
100 | return true; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | static struct xt_match addrtype_mt_reg[] __read_mostly = { | 103 | static struct xt_match addrtype_mt_reg[] __read_mostly = { |
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c index 0104c0b399de..14a2aa8b8a14 100644 --- a/net/ipv4/netfilter/ipt_ah.c +++ b/net/ipv4/netfilter/ipt_ah.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
9 | #include <linux/in.h> | 9 | #include <linux/in.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
@@ -18,25 +18,19 @@ MODULE_LICENSE("GPL"); | |||
18 | MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>"); | 18 | MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>"); |
19 | MODULE_DESCRIPTION("Xtables: IPv4 IPsec-AH SPI match"); | 19 | MODULE_DESCRIPTION("Xtables: IPv4 IPsec-AH SPI match"); |
20 | 20 | ||
21 | #ifdef DEBUG_CONNTRACK | ||
22 | #define duprintf(format, args...) printk(format , ## args) | ||
23 | #else | ||
24 | #define duprintf(format, args...) | ||
25 | #endif | ||
26 | |||
27 | /* Returns 1 if the spi is matched by the range, 0 otherwise */ | 21 | /* Returns 1 if the spi is matched by the range, 0 otherwise */ |
28 | static inline bool | 22 | static inline bool |
29 | spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert) | 23 | spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert) |
30 | { | 24 | { |
31 | bool r; | 25 | bool r; |
32 | duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', | 26 | pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n", |
33 | min,spi,max); | 27 | invert ? '!' : ' ', min, spi, max); |
34 | r=(spi >= min && spi <= max) ^ invert; | 28 | r=(spi >= min && spi <= max) ^ invert; |
35 | duprintf(" result %s\n",r? "PASS" : "FAILED"); | 29 | pr_debug(" result %s\n", r ? "PASS" : "FAILED"); |
36 | return r; | 30 | return r; |
37 | } | 31 | } |
38 | 32 | ||
39 | static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 33 | static bool ah_mt(const struct sk_buff *skb, struct xt_action_param *par) |
40 | { | 34 | { |
41 | struct ip_auth_hdr _ahdr; | 35 | struct ip_auth_hdr _ahdr; |
42 | const struct ip_auth_hdr *ah; | 36 | const struct ip_auth_hdr *ah; |
@@ -51,8 +45,8 @@ static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
51 | /* We've been asked to examine this packet, and we | 45 | /* We've been asked to examine this packet, and we |
52 | * can't. Hence, no choice but to drop. | 46 | * can't. Hence, no choice but to drop. |
53 | */ | 47 | */ |
54 | duprintf("Dropping evil AH tinygram.\n"); | 48 | pr_debug("Dropping evil AH tinygram.\n"); |
55 | *par->hotdrop = true; | 49 | par->hotdrop = true; |
56 | return 0; | 50 | return 0; |
57 | } | 51 | } |
58 | 52 | ||
@@ -61,16 +55,16 @@ static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
61 | !!(ahinfo->invflags & IPT_AH_INV_SPI)); | 55 | !!(ahinfo->invflags & IPT_AH_INV_SPI)); |
62 | } | 56 | } |
63 | 57 | ||
64 | static bool ah_mt_check(const struct xt_mtchk_param *par) | 58 | static int ah_mt_check(const struct xt_mtchk_param *par) |
65 | { | 59 | { |
66 | const struct ipt_ah *ahinfo = par->matchinfo; | 60 | const struct ipt_ah *ahinfo = par->matchinfo; |
67 | 61 | ||
68 | /* Must specify no unknown invflags */ | 62 | /* Must specify no unknown invflags */ |
69 | if (ahinfo->invflags & ~IPT_AH_INV_MASK) { | 63 | if (ahinfo->invflags & ~IPT_AH_INV_MASK) { |
70 | duprintf("ipt_ah: unknown flags %X\n", ahinfo->invflags); | 64 | pr_debug("unknown flags %X\n", ahinfo->invflags); |
71 | return false; | 65 | return -EINVAL; |
72 | } | 66 | } |
73 | return true; | 67 | return 0; |
74 | } | 68 | } |
75 | 69 | ||
76 | static struct xt_match ah_mt_reg __read_mostly = { | 70 | static struct xt_match ah_mt_reg __read_mostly = { |
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c index 2a1e56b71908..af6e9c778345 100644 --- a/net/ipv4/netfilter/ipt_ecn.c +++ b/net/ipv4/netfilter/ipt_ecn.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/in.h> | 10 | #include <linux/in.h> |
11 | #include <linux/ip.h> | 11 | #include <linux/ip.h> |
12 | #include <net/ip.h> | 12 | #include <net/ip.h> |
@@ -67,7 +67,7 @@ static inline bool match_tcp(const struct sk_buff *skb, | |||
67 | return true; | 67 | return true; |
68 | } | 68 | } |
69 | 69 | ||
70 | static bool ecn_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 70 | static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par) |
71 | { | 71 | { |
72 | const struct ipt_ecn_info *info = par->matchinfo; | 72 | const struct ipt_ecn_info *info = par->matchinfo; |
73 | 73 | ||
@@ -78,32 +78,31 @@ static bool ecn_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
78 | if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { | 78 | if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { |
79 | if (ip_hdr(skb)->protocol != IPPROTO_TCP) | 79 | if (ip_hdr(skb)->protocol != IPPROTO_TCP) |
80 | return false; | 80 | return false; |
81 | if (!match_tcp(skb, info, par->hotdrop)) | 81 | if (!match_tcp(skb, info, &par->hotdrop)) |
82 | return false; | 82 | return false; |
83 | } | 83 | } |
84 | 84 | ||
85 | return true; | 85 | return true; |
86 | } | 86 | } |
87 | 87 | ||
88 | static bool ecn_mt_check(const struct xt_mtchk_param *par) | 88 | static int ecn_mt_check(const struct xt_mtchk_param *par) |
89 | { | 89 | { |
90 | const struct ipt_ecn_info *info = par->matchinfo; | 90 | const struct ipt_ecn_info *info = par->matchinfo; |
91 | const struct ipt_ip *ip = par->entryinfo; | 91 | const struct ipt_ip *ip = par->entryinfo; |
92 | 92 | ||
93 | if (info->operation & IPT_ECN_OP_MATCH_MASK) | 93 | if (info->operation & IPT_ECN_OP_MATCH_MASK) |
94 | return false; | 94 | return -EINVAL; |
95 | 95 | ||
96 | if (info->invert & IPT_ECN_OP_MATCH_MASK) | 96 | if (info->invert & IPT_ECN_OP_MATCH_MASK) |
97 | return false; | 97 | return -EINVAL; |
98 | 98 | ||
99 | if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) && | 99 | if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) && |
100 | ip->proto != IPPROTO_TCP) { | 100 | ip->proto != IPPROTO_TCP) { |
101 | printk(KERN_WARNING "ipt_ecn: can't match TCP bits in rule for" | 101 | pr_info("cannot match TCP bits in rule for non-tcp packets\n"); |
102 | " non-tcp packets\n"); | 102 | return -EINVAL; |
103 | return false; | ||
104 | } | 103 | } |
105 | 104 | ||
106 | return true; | 105 | return 0; |
107 | } | 106 | } |
108 | 107 | ||
109 | static struct xt_match ecn_mt_reg __read_mostly = { | 108 | static struct xt_match ecn_mt_reg __read_mostly = { |
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index 55392466daa4..c37641e819f2 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c | |||
@@ -89,7 +89,7 @@ static int __init iptable_filter_init(void) | |||
89 | int ret; | 89 | int ret; |
90 | 90 | ||
91 | if (forward < 0 || forward > NF_MAX_VERDICT) { | 91 | if (forward < 0 || forward > NF_MAX_VERDICT) { |
92 | printk("iptables forward must be 0 or 1\n"); | 92 | pr_err("iptables forward must be 0 or 1\n"); |
93 | return -EINVAL; | 93 | return -EINVAL; |
94 | } | 94 | } |
95 | 95 | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 2bb1f87051c4..5a03c02af999 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -382,32 +382,32 @@ static int __init nf_conntrack_l3proto_ipv4_init(void) | |||
382 | 382 | ||
383 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4); | 383 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4); |
384 | if (ret < 0) { | 384 | if (ret < 0) { |
385 | printk("nf_conntrack_ipv4: can't register tcp.\n"); | 385 | pr_err("nf_conntrack_ipv4: can't register tcp.\n"); |
386 | goto cleanup_sockopt; | 386 | goto cleanup_sockopt; |
387 | } | 387 | } |
388 | 388 | ||
389 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4); | 389 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4); |
390 | if (ret < 0) { | 390 | if (ret < 0) { |
391 | printk("nf_conntrack_ipv4: can't register udp.\n"); | 391 | pr_err("nf_conntrack_ipv4: can't register udp.\n"); |
392 | goto cleanup_tcp; | 392 | goto cleanup_tcp; |
393 | } | 393 | } |
394 | 394 | ||
395 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp); | 395 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp); |
396 | if (ret < 0) { | 396 | if (ret < 0) { |
397 | printk("nf_conntrack_ipv4: can't register icmp.\n"); | 397 | pr_err("nf_conntrack_ipv4: can't register icmp.\n"); |
398 | goto cleanup_udp; | 398 | goto cleanup_udp; |
399 | } | 399 | } |
400 | 400 | ||
401 | ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4); | 401 | ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4); |
402 | if (ret < 0) { | 402 | if (ret < 0) { |
403 | printk("nf_conntrack_ipv4: can't register ipv4\n"); | 403 | pr_err("nf_conntrack_ipv4: can't register ipv4\n"); |
404 | goto cleanup_icmp; | 404 | goto cleanup_icmp; |
405 | } | 405 | } |
406 | 406 | ||
407 | ret = nf_register_hooks(ipv4_conntrack_ops, | 407 | ret = nf_register_hooks(ipv4_conntrack_ops, |
408 | ARRAY_SIZE(ipv4_conntrack_ops)); | 408 | ARRAY_SIZE(ipv4_conntrack_ops)); |
409 | if (ret < 0) { | 409 | if (ret < 0) { |
410 | printk("nf_conntrack_ipv4: can't register hooks.\n"); | 410 | pr_err("nf_conntrack_ipv4: can't register hooks.\n"); |
411 | goto cleanup_ipv4; | 411 | goto cleanup_ipv4; |
412 | } | 412 | } |
413 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) | 413 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 2fb7b76da94f..244f7cb08d68 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
@@ -336,12 +336,12 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v) | |||
336 | const struct ip_conntrack_stat *st = v; | 336 | const struct ip_conntrack_stat *st = v; |
337 | 337 | ||
338 | if (v == SEQ_START_TOKEN) { | 338 | if (v == SEQ_START_TOKEN) { |
339 | seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n"); | 339 | seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n"); |
340 | return 0; | 340 | return 0; |
341 | } | 341 | } |
342 | 342 | ||
343 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x " | 343 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x " |
344 | "%08x %08x %08x %08x %08x %08x %08x %08x \n", | 344 | "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", |
345 | nr_conntracks, | 345 | nr_conntracks, |
346 | st->searched, | 346 | st->searched, |
347 | st->found, | 347 | st->found, |
@@ -358,7 +358,8 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v) | |||
358 | 358 | ||
359 | st->expect_new, | 359 | st->expect_new, |
360 | st->expect_create, | 360 | st->expect_create, |
361 | st->expect_delete | 361 | st->expect_delete, |
362 | st->search_restart | ||
362 | ); | 363 | ); |
363 | return 0; | 364 | return 0; |
364 | } | 365 | } |
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index 7e8e6fc75413..5045196d853c 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c | |||
@@ -10,7 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/moduleparam.h> | ||
14 | #include <linux/tcp.h> | 13 | #include <linux/tcp.h> |
15 | #include <net/tcp.h> | 14 | #include <net/tcp.h> |
16 | 15 | ||
@@ -44,7 +43,7 @@ static int set_addr(struct sk_buff *skb, | |||
44 | addroff, sizeof(buf), | 43 | addroff, sizeof(buf), |
45 | (char *) &buf, sizeof(buf))) { | 44 | (char *) &buf, sizeof(buf))) { |
46 | if (net_ratelimit()) | 45 | if (net_ratelimit()) |
47 | printk("nf_nat_h323: nf_nat_mangle_tcp_packet" | 46 | pr_notice("nf_nat_h323: nf_nat_mangle_tcp_packet" |
48 | " error\n"); | 47 | " error\n"); |
49 | return -1; | 48 | return -1; |
50 | } | 49 | } |
@@ -60,7 +59,7 @@ static int set_addr(struct sk_buff *skb, | |||
60 | addroff, sizeof(buf), | 59 | addroff, sizeof(buf), |
61 | (char *) &buf, sizeof(buf))) { | 60 | (char *) &buf, sizeof(buf))) { |
62 | if (net_ratelimit()) | 61 | if (net_ratelimit()) |
63 | printk("nf_nat_h323: nf_nat_mangle_udp_packet" | 62 | pr_notice("nf_nat_h323: nf_nat_mangle_udp_packet" |
64 | " error\n"); | 63 | " error\n"); |
65 | return -1; | 64 | return -1; |
66 | } | 65 | } |
@@ -216,7 +215,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, | |||
216 | /* Run out of expectations */ | 215 | /* Run out of expectations */ |
217 | if (i >= H323_RTP_CHANNEL_MAX) { | 216 | if (i >= H323_RTP_CHANNEL_MAX) { |
218 | if (net_ratelimit()) | 217 | if (net_ratelimit()) |
219 | printk("nf_nat_h323: out of expectations\n"); | 218 | pr_notice("nf_nat_h323: out of expectations\n"); |
220 | return 0; | 219 | return 0; |
221 | } | 220 | } |
222 | 221 | ||
@@ -235,7 +234,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, | |||
235 | 234 | ||
236 | if (nated_port == 0) { /* No port available */ | 235 | if (nated_port == 0) { /* No port available */ |
237 | if (net_ratelimit()) | 236 | if (net_ratelimit()) |
238 | printk("nf_nat_h323: out of RTP ports\n"); | 237 | pr_notice("nf_nat_h323: out of RTP ports\n"); |
239 | return 0; | 238 | return 0; |
240 | } | 239 | } |
241 | 240 | ||
@@ -292,7 +291,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, | |||
292 | 291 | ||
293 | if (nated_port == 0) { /* No port available */ | 292 | if (nated_port == 0) { /* No port available */ |
294 | if (net_ratelimit()) | 293 | if (net_ratelimit()) |
295 | printk("nf_nat_h323: out of TCP ports\n"); | 294 | pr_notice("nf_nat_h323: out of TCP ports\n"); |
296 | return 0; | 295 | return 0; |
297 | } | 296 | } |
298 | 297 | ||
@@ -342,7 +341,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, | |||
342 | 341 | ||
343 | if (nated_port == 0) { /* No port available */ | 342 | if (nated_port == 0) { /* No port available */ |
344 | if (net_ratelimit()) | 343 | if (net_ratelimit()) |
345 | printk("nf_nat_q931: out of TCP ports\n"); | 344 | pr_notice("nf_nat_q931: out of TCP ports\n"); |
346 | return 0; | 345 | return 0; |
347 | } | 346 | } |
348 | 347 | ||
@@ -426,7 +425,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, | |||
426 | 425 | ||
427 | if (nated_port == 0) { /* No port available */ | 426 | if (nated_port == 0) { /* No port available */ |
428 | if (net_ratelimit()) | 427 | if (net_ratelimit()) |
429 | printk("nf_nat_ras: out of TCP ports\n"); | 428 | pr_notice("nf_nat_ras: out of TCP ports\n"); |
430 | return 0; | 429 | return 0; |
431 | } | 430 | } |
432 | 431 | ||
@@ -508,7 +507,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, | |||
508 | 507 | ||
509 | if (nated_port == 0) { /* No port available */ | 508 | if (nated_port == 0) { /* No port available */ |
510 | if (net_ratelimit()) | 509 | if (net_ratelimit()) |
511 | printk("nf_nat_q931: out of TCP ports\n"); | 510 | pr_notice("nf_nat_q931: out of TCP ports\n"); |
512 | return 0; | 511 | return 0; |
513 | } | 512 | } |
514 | 513 | ||
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index 26de2c1f7fab..98ed78281aee 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* Everything about the rules for NAT. */ | 9 | /* Everything about the rules for NAT. */ |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
10 | #include <linux/types.h> | 11 | #include <linux/types.h> |
11 | #include <linux/ip.h> | 12 | #include <linux/ip.h> |
12 | #include <linux/netfilter.h> | 13 | #include <linux/netfilter.h> |
@@ -38,7 +39,7 @@ static const struct xt_table nat_table = { | |||
38 | 39 | ||
39 | /* Source NAT */ | 40 | /* Source NAT */ |
40 | static unsigned int | 41 | static unsigned int |
41 | ipt_snat_target(struct sk_buff *skb, const struct xt_target_param *par) | 42 | ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par) |
42 | { | 43 | { |
43 | struct nf_conn *ct; | 44 | struct nf_conn *ct; |
44 | enum ip_conntrack_info ctinfo; | 45 | enum ip_conntrack_info ctinfo; |
@@ -57,7 +58,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_target_param *par) | |||
57 | } | 58 | } |
58 | 59 | ||
59 | static unsigned int | 60 | static unsigned int |
60 | ipt_dnat_target(struct sk_buff *skb, const struct xt_target_param *par) | 61 | ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par) |
61 | { | 62 | { |
62 | struct nf_conn *ct; | 63 | struct nf_conn *ct; |
63 | enum ip_conntrack_info ctinfo; | 64 | enum ip_conntrack_info ctinfo; |
@@ -74,28 +75,28 @@ ipt_dnat_target(struct sk_buff *skb, const struct xt_target_param *par) | |||
74 | return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST); | 75 | return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST); |
75 | } | 76 | } |
76 | 77 | ||
77 | static bool ipt_snat_checkentry(const struct xt_tgchk_param *par) | 78 | static int ipt_snat_checkentry(const struct xt_tgchk_param *par) |
78 | { | 79 | { |
79 | const struct nf_nat_multi_range_compat *mr = par->targinfo; | 80 | const struct nf_nat_multi_range_compat *mr = par->targinfo; |
80 | 81 | ||
81 | /* Must be a valid range */ | 82 | /* Must be a valid range */ |
82 | if (mr->rangesize != 1) { | 83 | if (mr->rangesize != 1) { |
83 | printk("SNAT: multiple ranges no longer supported\n"); | 84 | pr_info("SNAT: multiple ranges no longer supported\n"); |
84 | return false; | 85 | return -EINVAL; |
85 | } | 86 | } |
86 | return true; | 87 | return 0; |
87 | } | 88 | } |
88 | 89 | ||
89 | static bool ipt_dnat_checkentry(const struct xt_tgchk_param *par) | 90 | static int ipt_dnat_checkentry(const struct xt_tgchk_param *par) |
90 | { | 91 | { |
91 | const struct nf_nat_multi_range_compat *mr = par->targinfo; | 92 | const struct nf_nat_multi_range_compat *mr = par->targinfo; |
92 | 93 | ||
93 | /* Must be a valid range */ | 94 | /* Must be a valid range */ |
94 | if (mr->rangesize != 1) { | 95 | if (mr->rangesize != 1) { |
95 | printk("DNAT: multiple ranges no longer supported\n"); | 96 | pr_info("DNAT: multiple ranges no longer supported\n"); |
96 | return false; | 97 | return -EINVAL; |
97 | } | 98 | } |
98 | return true; | 99 | return 0; |
99 | } | 100 | } |
100 | 101 | ||
101 | unsigned int | 102 | unsigned int |
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 4d85b6e55f29..1679e2c0963d 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -401,7 +401,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, | |||
401 | *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); | 401 | *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); |
402 | if (*octets == NULL) { | 402 | if (*octets == NULL) { |
403 | if (net_ratelimit()) | 403 | if (net_ratelimit()) |
404 | printk("OOM in bsalg (%d)\n", __LINE__); | 404 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
405 | return 0; | 405 | return 0; |
406 | } | 406 | } |
407 | 407 | ||
@@ -452,7 +452,7 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, | |||
452 | *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); | 452 | *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); |
453 | if (*oid == NULL) { | 453 | if (*oid == NULL) { |
454 | if (net_ratelimit()) | 454 | if (net_ratelimit()) |
455 | printk("OOM in bsalg (%d)\n", __LINE__); | 455 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
456 | return 0; | 456 | return 0; |
457 | } | 457 | } |
458 | 458 | ||
@@ -729,7 +729,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
729 | if (*obj == NULL) { | 729 | if (*obj == NULL) { |
730 | kfree(id); | 730 | kfree(id); |
731 | if (net_ratelimit()) | 731 | if (net_ratelimit()) |
732 | printk("OOM in bsalg (%d)\n", __LINE__); | 732 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
733 | return 0; | 733 | return 0; |
734 | } | 734 | } |
735 | (*obj)->syntax.l[0] = l; | 735 | (*obj)->syntax.l[0] = l; |
@@ -746,7 +746,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
746 | kfree(p); | 746 | kfree(p); |
747 | kfree(id); | 747 | kfree(id); |
748 | if (net_ratelimit()) | 748 | if (net_ratelimit()) |
749 | printk("OOM in bsalg (%d)\n", __LINE__); | 749 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
750 | return 0; | 750 | return 0; |
751 | } | 751 | } |
752 | memcpy((*obj)->syntax.c, p, len); | 752 | memcpy((*obj)->syntax.c, p, len); |
@@ -761,7 +761,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
761 | if (*obj == NULL) { | 761 | if (*obj == NULL) { |
762 | kfree(id); | 762 | kfree(id); |
763 | if (net_ratelimit()) | 763 | if (net_ratelimit()) |
764 | printk("OOM in bsalg (%d)\n", __LINE__); | 764 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
765 | return 0; | 765 | return 0; |
766 | } | 766 | } |
767 | if (!asn1_null_decode(ctx, end)) { | 767 | if (!asn1_null_decode(ctx, end)) { |
@@ -782,7 +782,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
782 | kfree(lp); | 782 | kfree(lp); |
783 | kfree(id); | 783 | kfree(id); |
784 | if (net_ratelimit()) | 784 | if (net_ratelimit()) |
785 | printk("OOM in bsalg (%d)\n", __LINE__); | 785 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
786 | return 0; | 786 | return 0; |
787 | } | 787 | } |
788 | memcpy((*obj)->syntax.ul, lp, len); | 788 | memcpy((*obj)->syntax.ul, lp, len); |
@@ -803,7 +803,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
803 | kfree(p); | 803 | kfree(p); |
804 | kfree(id); | 804 | kfree(id); |
805 | if (net_ratelimit()) | 805 | if (net_ratelimit()) |
806 | printk("OOM in bsalg (%d)\n", __LINE__); | 806 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
807 | return 0; | 807 | return 0; |
808 | } | 808 | } |
809 | memcpy((*obj)->syntax.uc, p, len); | 809 | memcpy((*obj)->syntax.uc, p, len); |
@@ -821,7 +821,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
821 | if (*obj == NULL) { | 821 | if (*obj == NULL) { |
822 | kfree(id); | 822 | kfree(id); |
823 | if (net_ratelimit()) | 823 | if (net_ratelimit()) |
824 | printk("OOM in bsalg (%d)\n", __LINE__); | 824 | pr_notice("OOM in bsalg (%d)\n", __LINE__); |
825 | return 0; | 825 | return 0; |
826 | } | 826 | } |
827 | (*obj)->syntax.ul[0] = ul; | 827 | (*obj)->syntax.ul[0] = ul; |
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index c39c9cf6bee6..beb25819c9c9 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c | |||
@@ -138,9 +138,8 @@ nf_nat_fn(unsigned int hooknum, | |||
138 | ret = nf_nat_rule_find(skb, hooknum, in, out, | 138 | ret = nf_nat_rule_find(skb, hooknum, in, out, |
139 | ct); | 139 | ct); |
140 | 140 | ||
141 | if (ret != NF_ACCEPT) { | 141 | if (ret != NF_ACCEPT) |
142 | return ret; | 142 | return ret; |
143 | } | ||
144 | } else | 143 | } else |
145 | pr_debug("Already setup manip %s for ct %p\n", | 144 | pr_debug("Already setup manip %s for ct %p\n", |
146 | maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", | 145 | maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", |
@@ -294,12 +293,12 @@ static int __init nf_nat_standalone_init(void) | |||
294 | #endif | 293 | #endif |
295 | ret = nf_nat_rule_init(); | 294 | ret = nf_nat_rule_init(); |
296 | if (ret < 0) { | 295 | if (ret < 0) { |
297 | printk("nf_nat_init: can't setup rules.\n"); | 296 | pr_err("nf_nat_init: can't setup rules.\n"); |
298 | goto cleanup_decode_session; | 297 | goto cleanup_decode_session; |
299 | } | 298 | } |
300 | ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); | 299 | ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); |
301 | if (ret < 0) { | 300 | if (ret < 0) { |
302 | printk("nf_nat_init: can't register hooks.\n"); | 301 | pr_err("nf_nat_init: can't register hooks.\n"); |
303 | goto cleanup_rule_init; | 302 | goto cleanup_rule_init; |
304 | } | 303 | } |
305 | return ret; | 304 | return ret; |
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c index b096e81500ae..7274a43c7a12 100644 --- a/net/ipv4/netfilter/nf_nat_tftp.c +++ b/net/ipv4/netfilter/nf_nat_tftp.c | |||
@@ -6,7 +6,6 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/moduleparam.h> | ||
10 | #include <linux/udp.h> | 9 | #include <linux/udp.h> |
11 | 10 | ||
12 | #include <net/netfilter/nf_nat_helper.h> | 11 | #include <net/netfilter/nf_nat_helper.h> |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 52ef5af78a45..2c7a1639388a 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -381,8 +381,8 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
381 | icmp_out_count(net, ((struct icmphdr *) | 381 | icmp_out_count(net, ((struct icmphdr *) |
382 | skb_transport_header(skb))->type); | 382 | skb_transport_header(skb))->type); |
383 | 383 | ||
384 | err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, | 384 | err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, |
385 | dst_output); | 385 | rt->u.dst.dev, dst_output); |
386 | if (err > 0) | 386 | if (err > 0) |
387 | err = net_xmit_errno(err); | 387 | err = net_xmit_errno(err); |
388 | if (err) | 388 | if (err) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index dea3f9264250..560acc677ce4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -2277,8 +2277,8 @@ martian_source: | |||
2277 | goto e_inval; | 2277 | goto e_inval; |
2278 | } | 2278 | } |
2279 | 2279 | ||
2280 | int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | 2280 | int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
2281 | u8 tos, struct net_device *dev) | 2281 | u8 tos, struct net_device *dev, bool noref) |
2282 | { | 2282 | { |
2283 | struct rtable * rth; | 2283 | struct rtable * rth; |
2284 | unsigned hash; | 2284 | unsigned hash; |
@@ -2304,10 +2304,15 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2304 | rth->fl.mark == skb->mark && | 2304 | rth->fl.mark == skb->mark && |
2305 | net_eq(dev_net(rth->u.dst.dev), net) && | 2305 | net_eq(dev_net(rth->u.dst.dev), net) && |
2306 | !rt_is_expired(rth)) { | 2306 | !rt_is_expired(rth)) { |
2307 | dst_use(&rth->u.dst, jiffies); | 2307 | if (noref) { |
2308 | dst_use_noref(&rth->u.dst, jiffies); | ||
2309 | skb_dst_set_noref(skb, &rth->u.dst); | ||
2310 | } else { | ||
2311 | dst_use(&rth->u.dst, jiffies); | ||
2312 | skb_dst_set(skb, &rth->u.dst); | ||
2313 | } | ||
2308 | RT_CACHE_STAT_INC(in_hit); | 2314 | RT_CACHE_STAT_INC(in_hit); |
2309 | rcu_read_unlock(); | 2315 | rcu_read_unlock(); |
2310 | skb_dst_set(skb, &rth->u.dst); | ||
2311 | return 0; | 2316 | return 0; |
2312 | } | 2317 | } |
2313 | RT_CACHE_STAT_INC(in_hlist_search); | 2318 | RT_CACHE_STAT_INC(in_hlist_search); |
@@ -2350,6 +2355,7 @@ skip_cache: | |||
2350 | } | 2355 | } |
2351 | return ip_route_input_slow(skb, daddr, saddr, tos, dev); | 2356 | return ip_route_input_slow(skb, daddr, saddr, tos, dev); |
2352 | } | 2357 | } |
2358 | EXPORT_SYMBOL(ip_route_input_common); | ||
2353 | 2359 | ||
2354 | static int __mkroute_output(struct rtable **result, | 2360 | static int __mkroute_output(struct rtable **result, |
2355 | struct fib_result *res, | 2361 | struct fib_result *res, |
@@ -3033,7 +3039,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
3033 | continue; | 3039 | continue; |
3034 | if (rt_is_expired(rt)) | 3040 | if (rt_is_expired(rt)) |
3035 | continue; | 3041 | continue; |
3036 | skb_dst_set(skb, dst_clone(&rt->u.dst)); | 3042 | skb_dst_set_noref(skb, &rt->u.dst); |
3037 | if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, | 3043 | if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, |
3038 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, | 3044 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
3039 | 1, NLM_F_MULTI) <= 0) { | 3045 | 1, NLM_F_MULTI) <= 0) { |
@@ -3361,5 +3367,4 @@ void __init ip_static_sysctl_init(void) | |||
3361 | #endif | 3367 | #endif |
3362 | 3368 | ||
3363 | EXPORT_SYMBOL(__ip_select_ident); | 3369 | EXPORT_SYMBOL(__ip_select_ident); |
3364 | EXPORT_SYMBOL(ip_route_input); | ||
3365 | EXPORT_SYMBOL(ip_route_output_key); | 3370 | EXPORT_SYMBOL(ip_route_output_key); |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 5c24db4a3c91..9f6b22206c52 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
347 | { .sport = th->dest, | 347 | { .sport = th->dest, |
348 | .dport = th->source } } }; | 348 | .dport = th->source } } }; |
349 | security_req_classify_flow(req, &fl); | 349 | security_req_classify_flow(req, &fl); |
350 | if (ip_route_output_key(&init_net, &rt, &fl)) { | 350 | if (ip_route_output_key(sock_net(sk), &rt, &fl)) { |
351 | reqsk_free(req); | 351 | reqsk_free(req); |
352 | goto out; | 352 | goto out; |
353 | } | 353 | } |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1cd5c15174b8..d96c1da4b17c 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -299,6 +299,13 @@ static struct ctl_table ipv4_table[] = { | |||
299 | .mode = 0644, | 299 | .mode = 0644, |
300 | .proc_handler = ipv4_local_port_range, | 300 | .proc_handler = ipv4_local_port_range, |
301 | }, | 301 | }, |
302 | { | ||
303 | .procname = "ip_local_reserved_ports", | ||
304 | .data = NULL, /* initialized in sysctl_ipv4_init */ | ||
305 | .maxlen = 65536, | ||
306 | .mode = 0644, | ||
307 | .proc_handler = proc_do_large_bitmap, | ||
308 | }, | ||
302 | #ifdef CONFIG_IP_MULTICAST | 309 | #ifdef CONFIG_IP_MULTICAST |
303 | { | 310 | { |
304 | .procname = "igmp_max_memberships", | 311 | .procname = "igmp_max_memberships", |
@@ -736,6 +743,16 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = { | |||
736 | static __init int sysctl_ipv4_init(void) | 743 | static __init int sysctl_ipv4_init(void) |
737 | { | 744 | { |
738 | struct ctl_table_header *hdr; | 745 | struct ctl_table_header *hdr; |
746 | struct ctl_table *i; | ||
747 | |||
748 | for (i = ipv4_table; i->procname; i++) { | ||
749 | if (strcmp(i->procname, "ip_local_reserved_ports") == 0) { | ||
750 | i->data = sysctl_local_reserved_ports; | ||
751 | break; | ||
752 | } | ||
753 | } | ||
754 | if (!i->procname) | ||
755 | return -EINVAL; | ||
739 | 756 | ||
740 | hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table); | 757 | hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table); |
741 | if (hdr == NULL) | 758 | if (hdr == NULL) |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8ce29747ad9b..6596b4feeddc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2215,7 +2215,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
2215 | default: | 2215 | default: |
2216 | /* fallthru */ | 2216 | /* fallthru */ |
2217 | break; | 2217 | break; |
2218 | }; | 2218 | } |
2219 | 2219 | ||
2220 | if (optlen < sizeof(int)) | 2220 | if (optlen < sizeof(int)) |
2221 | return -EINVAL; | 2221 | return -EINVAL; |
@@ -2840,7 +2840,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) | |||
2840 | if (p->md5_desc.tfm) | 2840 | if (p->md5_desc.tfm) |
2841 | crypto_free_hash(p->md5_desc.tfm); | 2841 | crypto_free_hash(p->md5_desc.tfm); |
2842 | kfree(p); | 2842 | kfree(p); |
2843 | p = NULL; | ||
2844 | } | 2843 | } |
2845 | } | 2844 | } |
2846 | free_percpu(pool); | 2845 | free_percpu(pool); |
@@ -2938,25 +2937,40 @@ retry: | |||
2938 | 2937 | ||
2939 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | 2938 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); |
2940 | 2939 | ||
2941 | struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) | 2940 | |
2941 | /** | ||
2942 | * tcp_get_md5sig_pool - get md5sig_pool for this user | ||
2943 | * | ||
2944 | * We use percpu structure, so if we succeed, we exit with preemption | ||
2945 | * and BH disabled, to make sure another thread or softirq handling | ||
2946 | * wont try to get same context. | ||
2947 | */ | ||
2948 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | ||
2942 | { | 2949 | { |
2943 | struct tcp_md5sig_pool * __percpu *p; | 2950 | struct tcp_md5sig_pool * __percpu *p; |
2944 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2951 | |
2952 | local_bh_disable(); | ||
2953 | |||
2954 | spin_lock(&tcp_md5sig_pool_lock); | ||
2945 | p = tcp_md5sig_pool; | 2955 | p = tcp_md5sig_pool; |
2946 | if (p) | 2956 | if (p) |
2947 | tcp_md5sig_users++; | 2957 | tcp_md5sig_users++; |
2948 | spin_unlock_bh(&tcp_md5sig_pool_lock); | 2958 | spin_unlock(&tcp_md5sig_pool_lock); |
2949 | return (p ? *per_cpu_ptr(p, cpu) : NULL); | ||
2950 | } | ||
2951 | 2959 | ||
2952 | EXPORT_SYMBOL(__tcp_get_md5sig_pool); | 2960 | if (p) |
2961 | return *per_cpu_ptr(p, smp_processor_id()); | ||
2953 | 2962 | ||
2954 | void __tcp_put_md5sig_pool(void) | 2963 | local_bh_enable(); |
2964 | return NULL; | ||
2965 | } | ||
2966 | EXPORT_SYMBOL(tcp_get_md5sig_pool); | ||
2967 | |||
2968 | void tcp_put_md5sig_pool(void) | ||
2955 | { | 2969 | { |
2970 | local_bh_enable(); | ||
2956 | tcp_free_md5sig_pool(); | 2971 | tcp_free_md5sig_pool(); |
2957 | } | 2972 | } |
2958 | 2973 | EXPORT_SYMBOL(tcp_put_md5sig_pool); | |
2959 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); | ||
2960 | 2974 | ||
2961 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, | 2975 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, |
2962 | struct tcphdr *th) | 2976 | struct tcphdr *th) |
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index c209e054a634..377bc9349371 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c | |||
@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
126 | * calculate 2^fract in a <<7 value. | 126 | * calculate 2^fract in a <<7 value. |
127 | */ | 127 | */ |
128 | is_slowstart = 1; | 128 | is_slowstart = 1; |
129 | increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) | 129 | increment = ((1 << min(ca->rho, 16U)) * |
130 | - 128; | 130 | hybla_fraction(rho_fractions)) - 128; |
131 | } else { | 131 | } else { |
132 | /* | 132 | /* |
133 | * congestion avoidance | 133 | * congestion avoidance |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e82162c211bf..548d575e6cc6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2639 | if (sk->sk_family == AF_INET) { | 2639 | if (sk->sk_family == AF_INET) { |
2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", | 2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", |
2641 | msg, | 2641 | msg, |
2642 | &inet->daddr, ntohs(inet->dport), | 2642 | &inet->inet_daddr, ntohs(inet->inet_dport), |
2643 | tp->snd_cwnd, tcp_left_out(tp), | 2643 | tp->snd_cwnd, tcp_left_out(tp), |
2644 | tp->snd_ssthresh, tp->prior_ssthresh, | 2644 | tp->snd_ssthresh, tp->prior_ssthresh, |
2645 | tp->packets_out); | 2645 | tp->packets_out); |
@@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2649 | struct ipv6_pinfo *np = inet6_sk(sk); | 2649 | struct ipv6_pinfo *np = inet6_sk(sk); |
2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", | 2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", |
2651 | msg, | 2651 | msg, |
2652 | &np->daddr, ntohs(inet->dport), | 2652 | &np->daddr, ntohs(inet->inet_dport), |
2653 | tp->snd_cwnd, tcp_left_out(tp), | 2653 | tp->snd_cwnd, tcp_left_out(tp), |
2654 | tp->snd_ssthresh, tp->prior_ssthresh, | 2654 | tp->snd_ssthresh, tp->prior_ssthresh, |
2655 | tp->packets_out); | 2655 | tp->packets_out); |
@@ -3845,12 +3845,13 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3845 | /* 16-bit multiple */ | 3845 | /* 16-bit multiple */ |
3846 | opt_rx->cookie_plus = opsize; | 3846 | opt_rx->cookie_plus = opsize; |
3847 | *hvpp = ptr; | 3847 | *hvpp = ptr; |
3848 | break; | ||
3848 | default: | 3849 | default: |
3849 | /* ignore option */ | 3850 | /* ignore option */ |
3850 | break; | 3851 | break; |
3851 | }; | 3852 | } |
3852 | break; | 3853 | break; |
3853 | }; | 3854 | } |
3854 | 3855 | ||
3855 | ptr += opsize-2; | 3856 | ptr += opsize-2; |
3856 | length -= opsize; | 3857 | length -= opsize; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 771f8146a2e5..fe193e53af44 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -891,7 +891,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
891 | kfree(newkey); | 891 | kfree(newkey); |
892 | return -ENOMEM; | 892 | return -ENOMEM; |
893 | } | 893 | } |
894 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 894 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); |
895 | } | 895 | } |
896 | if (tcp_alloc_md5sig_pool(sk) == NULL) { | 896 | if (tcp_alloc_md5sig_pool(sk) == NULL) { |
897 | kfree(newkey); | 897 | kfree(newkey); |
@@ -1021,7 +1021,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, | |||
1021 | return -EINVAL; | 1021 | return -EINVAL; |
1022 | 1022 | ||
1023 | tp->md5sig_info = p; | 1023 | tp->md5sig_info = p; |
1024 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 1024 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation); | 1027 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation); |
@@ -1462,7 +1462,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1462 | if (newkey != NULL) | 1462 | if (newkey != NULL) |
1463 | tcp_v4_md5_do_add(newsk, newinet->inet_daddr, | 1463 | tcp_v4_md5_do_add(newsk, newinet->inet_daddr, |
1464 | newkey, key->keylen); | 1464 | newkey, key->keylen); |
1465 | newsk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 1465 | sk_nocaps_add(newsk, NETIF_F_GSO_MASK); |
1466 | } | 1466 | } |
1467 | #endif | 1467 | #endif |
1468 | 1468 | ||
@@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1555 | #endif | 1555 | #endif |
1556 | 1556 | ||
1557 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1557 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
1558 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1558 | TCP_CHECK_TIMER(sk); | 1559 | TCP_CHECK_TIMER(sk); |
1559 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { | 1560 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { |
1560 | rsk = sk; | 1561 | rsk = sk; |
@@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1579 | } | 1580 | } |
1580 | return 0; | 1581 | return 0; |
1581 | } | 1582 | } |
1582 | } | 1583 | } else |
1584 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1585 | |||
1583 | 1586 | ||
1584 | TCP_CHECK_TIMER(sk); | 1587 | TCP_CHECK_TIMER(sk); |
1585 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { | 1588 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { |
@@ -1672,8 +1675,6 @@ process: | |||
1672 | 1675 | ||
1673 | skb->dev = NULL; | 1676 | skb->dev = NULL; |
1674 | 1677 | ||
1675 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1676 | |||
1677 | bh_lock_sock_nested(sk); | 1678 | bh_lock_sock_nested(sk); |
1678 | ret = 0; | 1679 | ret = 0; |
1679 | if (!sock_owned_by_user(sk)) { | 1680 | if (!sock_owned_by_user(sk)) { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5db3a2c6cb33..b4ed957f201a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -668,7 +668,6 @@ static unsigned tcp_synack_options(struct sock *sk, | |||
668 | u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? | 668 | u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? |
669 | xvp->cookie_plus : | 669 | xvp->cookie_plus : |
670 | 0; | 670 | 0; |
671 | bool doing_ts = ireq->tstamp_ok; | ||
672 | 671 | ||
673 | #ifdef CONFIG_TCP_MD5SIG | 672 | #ifdef CONFIG_TCP_MD5SIG |
674 | *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); | 673 | *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); |
@@ -681,7 +680,7 @@ static unsigned tcp_synack_options(struct sock *sk, | |||
681 | * rather than TS in order to fit in better with old, | 680 | * rather than TS in order to fit in better with old, |
682 | * buggy kernels, but that was deemed to be unnecessary. | 681 | * buggy kernels, but that was deemed to be unnecessary. |
683 | */ | 682 | */ |
684 | doing_ts &= !ireq->sack_ok; | 683 | ireq->tstamp_ok &= !ireq->sack_ok; |
685 | } | 684 | } |
686 | #else | 685 | #else |
687 | *md5 = NULL; | 686 | *md5 = NULL; |
@@ -696,7 +695,7 @@ static unsigned tcp_synack_options(struct sock *sk, | |||
696 | opts->options |= OPTION_WSCALE; | 695 | opts->options |= OPTION_WSCALE; |
697 | remaining -= TCPOLEN_WSCALE_ALIGNED; | 696 | remaining -= TCPOLEN_WSCALE_ALIGNED; |
698 | } | 697 | } |
699 | if (likely(doing_ts)) { | 698 | if (likely(ireq->tstamp_ok)) { |
700 | opts->options |= OPTION_TS; | 699 | opts->options |= OPTION_TS; |
701 | opts->tsval = TCP_SKB_CB(skb)->when; | 700 | opts->tsval = TCP_SKB_CB(skb)->when; |
702 | opts->tsecr = req->ts_recent; | 701 | opts->tsecr = req->ts_recent; |
@@ -704,7 +703,7 @@ static unsigned tcp_synack_options(struct sock *sk, | |||
704 | } | 703 | } |
705 | if (likely(ireq->sack_ok)) { | 704 | if (likely(ireq->sack_ok)) { |
706 | opts->options |= OPTION_SACK_ADVERTISE; | 705 | opts->options |= OPTION_SACK_ADVERTISE; |
707 | if (unlikely(!doing_ts)) | 706 | if (unlikely(!ireq->tstamp_ok)) |
708 | remaining -= TCPOLEN_SACKPERM_ALIGNED; | 707 | remaining -= TCPOLEN_SACKPERM_ALIGNED; |
709 | } | 708 | } |
710 | 709 | ||
@@ -712,7 +711,7 @@ static unsigned tcp_synack_options(struct sock *sk, | |||
712 | * If the <SYN> options fit, the same options should fit now! | 711 | * If the <SYN> options fit, the same options should fit now! |
713 | */ | 712 | */ |
714 | if (*md5 == NULL && | 713 | if (*md5 == NULL && |
715 | doing_ts && | 714 | ireq->tstamp_ok && |
716 | cookie_plus > TCPOLEN_COOKIE_BASE) { | 715 | cookie_plus > TCPOLEN_COOKIE_BASE) { |
717 | int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */ | 716 | int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */ |
718 | 717 | ||
@@ -873,7 +872,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
873 | #ifdef CONFIG_TCP_MD5SIG | 872 | #ifdef CONFIG_TCP_MD5SIG |
874 | /* Calculate the MD5 hash, as we have all we need now */ | 873 | /* Calculate the MD5 hash, as we have all we need now */ |
875 | if (md5) { | 874 | if (md5) { |
876 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 875 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); |
877 | tp->af_specific->calc_md5_hash(opts.hash_location, | 876 | tp->af_specific->calc_md5_hash(opts.hash_location, |
878 | md5, sk, NULL, skb); | 877 | md5, sk, NULL, skb); |
879 | } | 878 | } |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4560b291180b..eec4ff456e33 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -233,7 +233,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
233 | */ | 233 | */ |
234 | do { | 234 | do { |
235 | if (low <= snum && snum <= high && | 235 | if (low <= snum && snum <= high && |
236 | !test_bit(snum >> udptable->log, bitmap)) | 236 | !test_bit(snum >> udptable->log, bitmap) && |
237 | !inet_is_reserved_local_port(snum)) | ||
237 | goto found; | 238 | goto found; |
238 | snum += rand; | 239 | snum += rand; |
239 | } while (snum != first); | 240 | } while (snum != first); |
@@ -632,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |||
632 | if (!inet->recverr) { | 633 | if (!inet->recverr) { |
633 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) | 634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
634 | goto out; | 635 | goto out; |
635 | } else { | 636 | } else |
636 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); | 637 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); |
637 | } | 638 | |
638 | sk->sk_err = err; | 639 | sk->sk_err = err; |
639 | sk->sk_error_report(sk); | 640 | sk->sk_error_report(sk); |
640 | out: | 641 | out: |
@@ -1062,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk) | |||
1062 | spin_unlock_bh(&rcvq->lock); | 1063 | spin_unlock_bh(&rcvq->lock); |
1063 | 1064 | ||
1064 | if (!skb_queue_empty(&list_kill)) { | 1065 | if (!skb_queue_empty(&list_kill)) { |
1065 | lock_sock_bh(sk); | 1066 | bool slow = lock_sock_fast(sk); |
1067 | |||
1066 | __skb_queue_purge(&list_kill); | 1068 | __skb_queue_purge(&list_kill); |
1067 | sk_mem_reclaim_partial(sk); | 1069 | sk_mem_reclaim_partial(sk); |
1068 | unlock_sock_bh(sk); | 1070 | unlock_sock_fast(sk, slow); |
1069 | } | 1071 | } |
1070 | return res; | 1072 | return res; |
1071 | } | 1073 | } |
@@ -1122,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1122 | int peeked; | 1124 | int peeked; |
1123 | int err; | 1125 | int err; |
1124 | int is_udplite = IS_UDPLITE(sk); | 1126 | int is_udplite = IS_UDPLITE(sk); |
1127 | bool slow; | ||
1125 | 1128 | ||
1126 | /* | 1129 | /* |
1127 | * Check any passed addresses | 1130 | * Check any passed addresses |
@@ -1196,10 +1199,10 @@ out: | |||
1196 | return err; | 1199 | return err; |
1197 | 1200 | ||
1198 | csum_copy_err: | 1201 | csum_copy_err: |
1199 | lock_sock_bh(sk); | 1202 | slow = lock_sock_fast(sk); |
1200 | if (!skb_kill_datagram(sk, skb, flags)) | 1203 | if (!skb_kill_datagram(sk, skb, flags)) |
1201 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1204 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1202 | unlock_sock_bh(sk); | 1205 | unlock_sock_fast(sk, slow); |
1203 | 1206 | ||
1204 | if (noblock) | 1207 | if (noblock) |
1205 | return -EAGAIN; | 1208 | return -EAGAIN; |
@@ -1536,6 +1539,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
1536 | 1539 | ||
1537 | uh = udp_hdr(skb); | 1540 | uh = udp_hdr(skb); |
1538 | ulen = ntohs(uh->len); | 1541 | ulen = ntohs(uh->len); |
1542 | saddr = ip_hdr(skb)->saddr; | ||
1543 | daddr = ip_hdr(skb)->daddr; | ||
1544 | |||
1539 | if (ulen > skb->len) | 1545 | if (ulen > skb->len) |
1540 | goto short_packet; | 1546 | goto short_packet; |
1541 | 1547 | ||
@@ -1549,9 +1555,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
1549 | if (udp4_csum_init(skb, uh, proto)) | 1555 | if (udp4_csum_init(skb, uh, proto)) |
1550 | goto csum_error; | 1556 | goto csum_error; |
1551 | 1557 | ||
1552 | saddr = ip_hdr(skb)->saddr; | ||
1553 | daddr = ip_hdr(skb)->daddr; | ||
1554 | |||
1555 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) | 1558 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) |
1556 | return __udp4_lib_mcast_deliver(net, skb, uh, | 1559 | return __udp4_lib_mcast_deliver(net, skb, uh, |
1557 | saddr, daddr, udptable); | 1560 | saddr, daddr, udptable); |
@@ -1624,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb) | |||
1624 | 1627 | ||
1625 | void udp_destroy_sock(struct sock *sk) | 1628 | void udp_destroy_sock(struct sock *sk) |
1626 | { | 1629 | { |
1627 | lock_sock_bh(sk); | 1630 | bool slow = lock_sock_fast(sk); |
1628 | udp_flush_pending_frames(sk); | 1631 | udp_flush_pending_frames(sk); |
1629 | unlock_sock_bh(sk); | 1632 | unlock_sock_fast(sk, slow); |
1630 | } | 1633 | } |
1631 | 1634 | ||
1632 | /* | 1635 | /* |
@@ -1685,8 +1688,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1685 | return -ENOPROTOOPT; | 1688 | return -ENOPROTOOPT; |
1686 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ | 1689 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ |
1687 | val = 8; | 1690 | val = 8; |
1688 | else if (val > USHORT_MAX) | 1691 | else if (val > USHRT_MAX) |
1689 | val = USHORT_MAX; | 1692 | val = USHRT_MAX; |
1690 | up->pcslen = val; | 1693 | up->pcslen = val; |
1691 | up->pcflag |= UDPLITE_SEND_CC; | 1694 | up->pcflag |= UDPLITE_SEND_CC; |
1692 | break; | 1695 | break; |
@@ -1699,8 +1702,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1699 | return -ENOPROTOOPT; | 1702 | return -ENOPROTOOPT; |
1700 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ | 1703 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ |
1701 | val = 8; | 1704 | val = 8; |
1702 | else if (val > USHORT_MAX) | 1705 | else if (val > USHRT_MAX) |
1703 | val = USHORT_MAX; | 1706 | val = USHRT_MAX; |
1704 | up->pcrlen = val; | 1707 | up->pcrlen = val; |
1705 | up->pcflag |= UDPLITE_RECV_CC; | 1708 | up->pcflag |= UDPLITE_RECV_CC; |
1706 | break; | 1709 | break; |
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index c791bb63203f..ad8fbb871aa0 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c | |||
@@ -27,8 +27,8 @@ static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) | |||
27 | if (skb_dst(skb) == NULL) { | 27 | if (skb_dst(skb) == NULL) { |
28 | const struct iphdr *iph = ip_hdr(skb); | 28 | const struct iphdr *iph = ip_hdr(skb); |
29 | 29 | ||
30 | if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, | 30 | if (ip_route_input_noref(skb, iph->daddr, iph->saddr, |
31 | skb->dev)) | 31 | iph->tos, skb->dev)) |
32 | goto drop; | 32 | goto drop; |
33 | } | 33 | } |
34 | return dst_input(skb); | 34 | return dst_input(skb); |
@@ -61,7 +61,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async) | |||
61 | iph->tot_len = htons(skb->len); | 61 | iph->tot_len = htons(skb->len); |
62 | ip_send_check(iph); | 62 | ip_send_check(iph); |
63 | 63 | ||
64 | NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, | 64 | NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, |
65 | xfrm4_rcv_encap_finish); | 65 | xfrm4_rcv_encap_finish); |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index c908bd99bcba..571aa96a175c 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -86,7 +86,7 @@ static int xfrm4_output_finish(struct sk_buff *skb) | |||
86 | 86 | ||
87 | int xfrm4_output(struct sk_buff *skb) | 87 | int xfrm4_output(struct sk_buff *skb) |
88 | { | 88 | { |
89 | return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, | 89 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, |
90 | NULL, skb_dst(skb)->dev, xfrm4_output_finish, | 90 | NULL, skb_dst(skb)->dev, xfrm4_output_finish, |
91 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 91 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
92 | } | 92 | } |
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index a578096152ab..36d7437ac054 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
@@ -229,6 +229,20 @@ config IPV6_MROUTE | |||
229 | Experimental support for IPv6 multicast forwarding. | 229 | Experimental support for IPv6 multicast forwarding. |
230 | If unsure, say N. | 230 | If unsure, say N. |
231 | 231 | ||
232 | config IPV6_MROUTE_MULTIPLE_TABLES | ||
233 | bool "IPv6: multicast policy routing" | ||
234 | depends on IPV6_MROUTE | ||
235 | select FIB_RULES | ||
236 | help | ||
237 | Normally, a multicast router runs a userspace daemon and decides | ||
238 | what to do with a multicast packet based on the source and | ||
239 | destination addresses. If you say Y here, the multicast router | ||
240 | will also be able to take interfaces and packet marks into | ||
241 | account and run multiple instances of userspace daemons | ||
242 | simultaneously, each one handling a single table. | ||
243 | |||
244 | If unsure, say N. | ||
245 | |||
232 | config IPV6_PIMSM_V2 | 246 | config IPV6_PIMSM_V2 |
233 | bool "IPv6: PIM-SM version 2 support (EXPERIMENTAL)" | 247 | bool "IPv6: PIM-SM version 2 support (EXPERIMENTAL)" |
234 | depends on IPV6_MROUTE | 248 | depends on IPV6_MROUTE |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3984f52181f4..e1a698df5706 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -553,7 +553,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | |||
553 | if (del_timer(&ifp->timer)) | 553 | if (del_timer(&ifp->timer)) |
554 | pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); | 554 | pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); |
555 | 555 | ||
556 | if (!ifp->dead) { | 556 | if (ifp->state != INET6_IFADDR_STATE_DEAD) { |
557 | pr_warning("Freeing alive inet6 address %p\n", ifp); | 557 | pr_warning("Freeing alive inet6 address %p\n", ifp); |
558 | return; | 558 | return; |
559 | } | 559 | } |
@@ -648,6 +648,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
648 | ipv6_addr_copy(&ifa->addr, addr); | 648 | ipv6_addr_copy(&ifa->addr, addr); |
649 | 649 | ||
650 | spin_lock_init(&ifa->lock); | 650 | spin_lock_init(&ifa->lock); |
651 | spin_lock_init(&ifa->state_lock); | ||
651 | init_timer(&ifa->timer); | 652 | init_timer(&ifa->timer); |
652 | INIT_HLIST_NODE(&ifa->addr_lst); | 653 | INIT_HLIST_NODE(&ifa->addr_lst); |
653 | ifa->timer.data = (unsigned long) ifa; | 654 | ifa->timer.data = (unsigned long) ifa; |
@@ -714,13 +715,20 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
714 | { | 715 | { |
715 | struct inet6_ifaddr *ifa, *ifn; | 716 | struct inet6_ifaddr *ifa, *ifn; |
716 | struct inet6_dev *idev = ifp->idev; | 717 | struct inet6_dev *idev = ifp->idev; |
718 | int state; | ||
717 | int hash; | 719 | int hash; |
718 | int deleted = 0, onlink = 0; | 720 | int deleted = 0, onlink = 0; |
719 | unsigned long expires = jiffies; | 721 | unsigned long expires = jiffies; |
720 | 722 | ||
721 | hash = ipv6_addr_hash(&ifp->addr); | 723 | hash = ipv6_addr_hash(&ifp->addr); |
722 | 724 | ||
723 | ifp->dead = 1; | 725 | spin_lock_bh(&ifp->state_lock); |
726 | state = ifp->state; | ||
727 | ifp->state = INET6_IFADDR_STATE_DEAD; | ||
728 | spin_unlock_bh(&ifp->state_lock); | ||
729 | |||
730 | if (state == INET6_IFADDR_STATE_DEAD) | ||
731 | goto out; | ||
724 | 732 | ||
725 | spin_lock_bh(&addrconf_hash_lock); | 733 | spin_lock_bh(&addrconf_hash_lock); |
726 | hlist_del_init_rcu(&ifp->addr_lst); | 734 | hlist_del_init_rcu(&ifp->addr_lst); |
@@ -818,6 +826,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
818 | dst_release(&rt->u.dst); | 826 | dst_release(&rt->u.dst); |
819 | } | 827 | } |
820 | 828 | ||
829 | out: | ||
821 | in6_ifa_put(ifp); | 830 | in6_ifa_put(ifp); |
822 | } | 831 | } |
823 | 832 | ||
@@ -1274,7 +1283,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev) | |||
1274 | int ipv6_chk_addr(struct net *net, struct in6_addr *addr, | 1283 | int ipv6_chk_addr(struct net *net, struct in6_addr *addr, |
1275 | struct net_device *dev, int strict) | 1284 | struct net_device *dev, int strict) |
1276 | { | 1285 | { |
1277 | struct inet6_ifaddr *ifp = NULL; | 1286 | struct inet6_ifaddr *ifp; |
1278 | struct hlist_node *node; | 1287 | struct hlist_node *node; |
1279 | unsigned int hash = ipv6_addr_hash(addr); | 1288 | unsigned int hash = ipv6_addr_hash(addr); |
1280 | 1289 | ||
@@ -1283,15 +1292,16 @@ int ipv6_chk_addr(struct net *net, struct in6_addr *addr, | |||
1283 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1292 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1284 | continue; | 1293 | continue; |
1285 | if (ipv6_addr_equal(&ifp->addr, addr) && | 1294 | if (ipv6_addr_equal(&ifp->addr, addr) && |
1286 | !(ifp->flags&IFA_F_TENTATIVE)) { | 1295 | !(ifp->flags&IFA_F_TENTATIVE) && |
1287 | if (dev == NULL || ifp->idev->dev == dev || | 1296 | (dev == NULL || ifp->idev->dev == dev || |
1288 | !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) | 1297 | !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) { |
1289 | break; | 1298 | rcu_read_unlock_bh(); |
1299 | return 1; | ||
1290 | } | 1300 | } |
1291 | } | 1301 | } |
1292 | rcu_read_unlock_bh(); | ||
1293 | 1302 | ||
1294 | return ifp != NULL; | 1303 | rcu_read_unlock_bh(); |
1304 | return 0; | ||
1295 | } | 1305 | } |
1296 | EXPORT_SYMBOL(ipv6_chk_addr); | 1306 | EXPORT_SYMBOL(ipv6_chk_addr); |
1297 | 1307 | ||
@@ -1396,10 +1406,27 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) | |||
1396 | ipv6_del_addr(ifp); | 1406 | ipv6_del_addr(ifp); |
1397 | } | 1407 | } |
1398 | 1408 | ||
1409 | static int addrconf_dad_end(struct inet6_ifaddr *ifp) | ||
1410 | { | ||
1411 | int err = -ENOENT; | ||
1412 | |||
1413 | spin_lock(&ifp->state_lock); | ||
1414 | if (ifp->state == INET6_IFADDR_STATE_DAD) { | ||
1415 | ifp->state = INET6_IFADDR_STATE_POSTDAD; | ||
1416 | err = 0; | ||
1417 | } | ||
1418 | spin_unlock(&ifp->state_lock); | ||
1419 | |||
1420 | return err; | ||
1421 | } | ||
1422 | |||
1399 | void addrconf_dad_failure(struct inet6_ifaddr *ifp) | 1423 | void addrconf_dad_failure(struct inet6_ifaddr *ifp) |
1400 | { | 1424 | { |
1401 | struct inet6_dev *idev = ifp->idev; | 1425 | struct inet6_dev *idev = ifp->idev; |
1402 | 1426 | ||
1427 | if (addrconf_dad_end(ifp)) | ||
1428 | return; | ||
1429 | |||
1403 | if (net_ratelimit()) | 1430 | if (net_ratelimit()) |
1404 | printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n", | 1431 | printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n", |
1405 | ifp->idev->dev->name, &ifp->addr); | 1432 | ifp->idev->dev->name, &ifp->addr); |
@@ -2624,6 +2651,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2624 | struct inet6_dev *idev; | 2651 | struct inet6_dev *idev; |
2625 | struct inet6_ifaddr *ifa; | 2652 | struct inet6_ifaddr *ifa; |
2626 | LIST_HEAD(keep_list); | 2653 | LIST_HEAD(keep_list); |
2654 | int state; | ||
2627 | 2655 | ||
2628 | ASSERT_RTNL(); | 2656 | ASSERT_RTNL(); |
2629 | 2657 | ||
@@ -2664,7 +2692,6 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2664 | ifa = list_first_entry(&idev->tempaddr_list, | 2692 | ifa = list_first_entry(&idev->tempaddr_list, |
2665 | struct inet6_ifaddr, tmp_list); | 2693 | struct inet6_ifaddr, tmp_list); |
2666 | list_del(&ifa->tmp_list); | 2694 | list_del(&ifa->tmp_list); |
2667 | ifa->dead = 1; | ||
2668 | write_unlock_bh(&idev->lock); | 2695 | write_unlock_bh(&idev->lock); |
2669 | spin_lock_bh(&ifa->lock); | 2696 | spin_lock_bh(&ifa->lock); |
2670 | 2697 | ||
@@ -2702,23 +2729,35 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2702 | 2729 | ||
2703 | /* Flag it for later restoration when link comes up */ | 2730 | /* Flag it for later restoration when link comes up */ |
2704 | ifa->flags |= IFA_F_TENTATIVE; | 2731 | ifa->flags |= IFA_F_TENTATIVE; |
2705 | in6_ifa_hold(ifa); | 2732 | ifa->state = INET6_IFADDR_STATE_DAD; |
2733 | |||
2706 | write_unlock_bh(&idev->lock); | 2734 | write_unlock_bh(&idev->lock); |
2735 | |||
2736 | in6_ifa_hold(ifa); | ||
2707 | } else { | 2737 | } else { |
2708 | list_del(&ifa->if_list); | 2738 | list_del(&ifa->if_list); |
2709 | ifa->dead = 1; | ||
2710 | write_unlock_bh(&idev->lock); | ||
2711 | 2739 | ||
2712 | /* clear hash table */ | 2740 | /* clear hash table */ |
2713 | spin_lock_bh(&addrconf_hash_lock); | 2741 | spin_lock_bh(&addrconf_hash_lock); |
2714 | hlist_del_init_rcu(&ifa->addr_lst); | 2742 | hlist_del_init_rcu(&ifa->addr_lst); |
2715 | spin_unlock_bh(&addrconf_hash_lock); | 2743 | spin_unlock_bh(&addrconf_hash_lock); |
2744 | |||
2745 | write_unlock_bh(&idev->lock); | ||
2746 | spin_lock_bh(&ifa->state_lock); | ||
2747 | state = ifa->state; | ||
2748 | ifa->state = INET6_IFADDR_STATE_DEAD; | ||
2749 | spin_unlock_bh(&ifa->state_lock); | ||
2750 | |||
2751 | if (state == INET6_IFADDR_STATE_DEAD) | ||
2752 | goto put_ifa; | ||
2716 | } | 2753 | } |
2717 | 2754 | ||
2718 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 2755 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
2719 | if (ifa->dead) | 2756 | if (ifa->state == INET6_IFADDR_STATE_DEAD) |
2720 | atomic_notifier_call_chain(&inet6addr_chain, | 2757 | atomic_notifier_call_chain(&inet6addr_chain, |
2721 | NETDEV_DOWN, ifa); | 2758 | NETDEV_DOWN, ifa); |
2759 | |||
2760 | put_ifa: | ||
2722 | in6_ifa_put(ifa); | 2761 | in6_ifa_put(ifa); |
2723 | 2762 | ||
2724 | write_lock_bh(&idev->lock); | 2763 | write_lock_bh(&idev->lock); |
@@ -2814,10 +2853,10 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) | |||
2814 | net_srandom(ifp->addr.s6_addr32[3]); | 2853 | net_srandom(ifp->addr.s6_addr32[3]); |
2815 | 2854 | ||
2816 | read_lock_bh(&idev->lock); | 2855 | read_lock_bh(&idev->lock); |
2817 | if (ifp->dead) | 2856 | spin_lock(&ifp->lock); |
2857 | if (ifp->state == INET6_IFADDR_STATE_DEAD) | ||
2818 | goto out; | 2858 | goto out; |
2819 | 2859 | ||
2820 | spin_lock(&ifp->lock); | ||
2821 | if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || | 2860 | if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || |
2822 | idev->cnf.accept_dad < 1 || | 2861 | idev->cnf.accept_dad < 1 || |
2823 | !(ifp->flags&IFA_F_TENTATIVE) || | 2862 | !(ifp->flags&IFA_F_TENTATIVE) || |
@@ -2851,8 +2890,8 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) | |||
2851 | ip6_ins_rt(ifp->rt); | 2890 | ip6_ins_rt(ifp->rt); |
2852 | 2891 | ||
2853 | addrconf_dad_kick(ifp); | 2892 | addrconf_dad_kick(ifp); |
2854 | spin_unlock(&ifp->lock); | ||
2855 | out: | 2893 | out: |
2894 | spin_unlock(&ifp->lock); | ||
2856 | read_unlock_bh(&idev->lock); | 2895 | read_unlock_bh(&idev->lock); |
2857 | } | 2896 | } |
2858 | 2897 | ||
@@ -2862,6 +2901,9 @@ static void addrconf_dad_timer(unsigned long data) | |||
2862 | struct inet6_dev *idev = ifp->idev; | 2901 | struct inet6_dev *idev = ifp->idev; |
2863 | struct in6_addr mcaddr; | 2902 | struct in6_addr mcaddr; |
2864 | 2903 | ||
2904 | if (!ifp->probes && addrconf_dad_end(ifp)) | ||
2905 | goto out; | ||
2906 | |||
2865 | read_lock(&idev->lock); | 2907 | read_lock(&idev->lock); |
2866 | if (idev->dead || !(idev->if_flags & IF_READY)) { | 2908 | if (idev->dead || !(idev->if_flags & IF_READY)) { |
2867 | read_unlock(&idev->lock); | 2909 | read_unlock(&idev->lock); |
@@ -2869,6 +2911,12 @@ static void addrconf_dad_timer(unsigned long data) | |||
2869 | } | 2911 | } |
2870 | 2912 | ||
2871 | spin_lock(&ifp->lock); | 2913 | spin_lock(&ifp->lock); |
2914 | if (ifp->state == INET6_IFADDR_STATE_DEAD) { | ||
2915 | spin_unlock(&ifp->lock); | ||
2916 | read_unlock(&idev->lock); | ||
2917 | goto out; | ||
2918 | } | ||
2919 | |||
2872 | if (ifp->probes == 0) { | 2920 | if (ifp->probes == 0) { |
2873 | /* | 2921 | /* |
2874 | * DAD was successful | 2922 | * DAD was successful |
@@ -2935,12 +2983,10 @@ static void addrconf_dad_run(struct inet6_dev *idev) | |||
2935 | read_lock_bh(&idev->lock); | 2983 | read_lock_bh(&idev->lock); |
2936 | list_for_each_entry(ifp, &idev->addr_list, if_list) { | 2984 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
2937 | spin_lock(&ifp->lock); | 2985 | spin_lock(&ifp->lock); |
2938 | if (!(ifp->flags & IFA_F_TENTATIVE)) { | 2986 | if (ifp->flags & IFA_F_TENTATIVE && |
2939 | spin_unlock(&ifp->lock); | 2987 | ifp->state == INET6_IFADDR_STATE_DAD) |
2940 | continue; | 2988 | addrconf_dad_kick(ifp); |
2941 | } | ||
2942 | spin_unlock(&ifp->lock); | 2989 | spin_unlock(&ifp->lock); |
2943 | addrconf_dad_kick(ifp); | ||
2944 | } | 2990 | } |
2945 | read_unlock_bh(&idev->lock); | 2991 | read_unlock_bh(&idev->lock); |
2946 | } | 2992 | } |
@@ -4049,7 +4095,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
4049 | addrconf_leave_solict(ifp->idev, &ifp->addr); | 4095 | addrconf_leave_solict(ifp->idev, &ifp->addr); |
4050 | dst_hold(&ifp->rt->u.dst); | 4096 | dst_hold(&ifp->rt->u.dst); |
4051 | 4097 | ||
4052 | if (ifp->dead && ip6_del_rt(ifp->rt)) | 4098 | if (ifp->state == INET6_IFADDR_STATE_DEAD && |
4099 | ip6_del_rt(ifp->rt)) | ||
4053 | dst_free(&ifp->rt->u.dst); | 4100 | dst_free(&ifp->rt->u.dst); |
4054 | break; | 4101 | break; |
4055 | } | 4102 | } |
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index ae404c9a746c..8c4348cb1950 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
@@ -422,10 +422,6 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
422 | ifal->ifal_prefixlen > 128) | 422 | ifal->ifal_prefixlen > 128) |
423 | return -EINVAL; | 423 | return -EINVAL; |
424 | 424 | ||
425 | if (ifal->ifal_index && | ||
426 | !__dev_get_by_index(net, ifal->ifal_index)) | ||
427 | return -EINVAL; | ||
428 | |||
429 | if (!tb[IFAL_ADDRESS]) | 425 | if (!tb[IFAL_ADDRESS]) |
430 | return -EINVAL; | 426 | return -EINVAL; |
431 | 427 | ||
@@ -441,6 +437,10 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
441 | 437 | ||
442 | switch(nlh->nlmsg_type) { | 438 | switch(nlh->nlmsg_type) { |
443 | case RTM_NEWADDRLABEL: | 439 | case RTM_NEWADDRLABEL: |
440 | if (ifal->ifal_index && | ||
441 | !__dev_get_by_index(net, ifal->ifal_index)) | ||
442 | return -EINVAL; | ||
443 | |||
444 | err = ip6addrlbl_add(net, pfx, ifal->ifal_prefixlen, | 444 | err = ip6addrlbl_add(net, pfx, ifal->ifal_prefixlen, |
445 | ifal->ifal_index, label, | 445 | ifal->ifal_index, label, |
446 | nlh->nlmsg_flags & NLM_F_REPLACE); | 446 | nlh->nlmsg_flags & NLM_F_REPLACE); |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index d2df3144429b..e733942dafe1 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -200,7 +200,7 @@ lookup_protocol: | |||
200 | 200 | ||
201 | inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk); | 201 | inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk); |
202 | np->hop_limit = -1; | 202 | np->hop_limit = -1; |
203 | np->mcast_hops = -1; | 203 | np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; |
204 | np->mc_loop = 1; | 204 | np->mc_loop = 1; |
205 | np->pmtudisc = IPV6_PMTUDISC_WANT; | 205 | np->pmtudisc = IPV6_PMTUDISC_WANT; |
206 | np->ipv6only = net->ipv6.sysctl.bindv6only; | 206 | np->ipv6only = net->ipv6.sysctl.bindv6only; |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 5959230bc6c1..712684687c9a 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -222,6 +222,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, | |||
222 | if (!skb) | 222 | if (!skb) |
223 | return; | 223 | return; |
224 | 224 | ||
225 | skb->protocol = htons(ETH_P_IPV6); | ||
226 | |||
225 | serr = SKB_EXT_ERR(skb); | 227 | serr = SKB_EXT_ERR(skb); |
226 | serr->ee.ee_errno = err; | 228 | serr->ee.ee_errno = err; |
227 | serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6; | 229 | serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6; |
@@ -255,6 +257,8 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info) | |||
255 | if (!skb) | 257 | if (!skb) |
256 | return; | 258 | return; |
257 | 259 | ||
260 | skb->protocol = htons(ETH_P_IPV6); | ||
261 | |||
258 | skb_put(skb, sizeof(struct ipv6hdr)); | 262 | skb_put(skb, sizeof(struct ipv6hdr)); |
259 | skb_reset_network_header(skb); | 263 | skb_reset_network_header(skb); |
260 | iph = ipv6_hdr(skb); | 264 | iph = ipv6_hdr(skb); |
@@ -358,7 +362,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
358 | sin->sin6_flowinfo = 0; | 362 | sin->sin6_flowinfo = 0; |
359 | sin->sin6_port = serr->port; | 363 | sin->sin6_port = serr->port; |
360 | sin->sin6_scope_id = 0; | 364 | sin->sin6_scope_id = 0; |
361 | if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) { | 365 | if (skb->protocol == htons(ETH_P_IPV6)) { |
362 | ipv6_addr_copy(&sin->sin6_addr, | 366 | ipv6_addr_copy(&sin->sin6_addr, |
363 | (struct in6_addr *)(nh + serr->addr_offset)); | 367 | (struct in6_addr *)(nh + serr->addr_offset)); |
364 | if (np->sndflow) | 368 | if (np->sndflow) |
@@ -380,7 +384,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
380 | sin->sin6_family = AF_INET6; | 384 | sin->sin6_family = AF_INET6; |
381 | sin->sin6_flowinfo = 0; | 385 | sin->sin6_flowinfo = 0; |
382 | sin->sin6_scope_id = 0; | 386 | sin->sin6_scope_id = 0; |
383 | if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) { | 387 | if (skb->protocol == htons(ETH_P_IPV6)) { |
384 | ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr); | 388 | ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr); |
385 | if (np->rxopt.all) | 389 | if (np->rxopt.all) |
386 | datagram_recv_ctl(sk, msg, skb); | 390 | datagram_recv_ctl(sk, msg, skb); |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index ce7992982557..03e62f94ff8e 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -483,7 +483,7 @@ route_done: | |||
483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, | 483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, |
484 | MSG_DONTWAIT, np->dontfrag); | 484 | MSG_DONTWAIT, np->dontfrag); |
485 | if (err) { | 485 | if (err) { |
486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
487 | ip6_flush_pending_frames(sk); | 487 | ip6_flush_pending_frames(sk); |
488 | goto out_put; | 488 | goto out_put; |
489 | } | 489 | } |
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
565 | np->dontfrag); | 565 | np->dontfrag); |
566 | 566 | ||
567 | if (err) { | 567 | if (err) { |
568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
569 | ip6_flush_pending_frames(sk); | 569 | ip6_flush_pending_frames(sk); |
570 | goto out_put; | 570 | goto out_put; |
571 | } | 571 | } |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 6aa7ee1295c2..a83e9209cecc 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -143,7 +143,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
143 | /* Must drop socket now because of tproxy. */ | 143 | /* Must drop socket now because of tproxy. */ |
144 | skb_orphan(skb); | 144 | skb_orphan(skb); |
145 | 145 | ||
146 | return NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, dev, NULL, | 146 | return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL, |
147 | ip6_rcv_finish); | 147 | ip6_rcv_finish); |
148 | err: | 148 | err: |
149 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); | 149 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); |
@@ -236,7 +236,7 @@ discard: | |||
236 | 236 | ||
237 | int ip6_input(struct sk_buff *skb) | 237 | int ip6_input(struct sk_buff *skb) |
238 | { | 238 | { |
239 | return NF_HOOK(PF_INET6, NF_INET_LOCAL_IN, skb, skb->dev, NULL, | 239 | return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, skb, skb->dev, NULL, |
240 | ip6_input_finish); | 240 | ip6_input_finish); |
241 | } | 241 | } |
242 | 242 | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index e7a5f17d5e95..89425af0684c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -67,8 +67,8 @@ int __ip6_local_out(struct sk_buff *skb) | |||
67 | len = 0; | 67 | len = 0; |
68 | ipv6_hdr(skb)->payload_len = htons(len); | 68 | ipv6_hdr(skb)->payload_len = htons(len); |
69 | 69 | ||
70 | return nf_hook(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, | 70 | return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, |
71 | dst_output); | 71 | skb_dst(skb)->dev, dst_output); |
72 | } | 72 | } |
73 | 73 | ||
74 | int ip6_local_out(struct sk_buff *skb) | 74 | int ip6_local_out(struct sk_buff *skb) |
@@ -83,22 +83,6 @@ int ip6_local_out(struct sk_buff *skb) | |||
83 | } | 83 | } |
84 | EXPORT_SYMBOL_GPL(ip6_local_out); | 84 | EXPORT_SYMBOL_GPL(ip6_local_out); |
85 | 85 | ||
86 | static int ip6_output_finish(struct sk_buff *skb) | ||
87 | { | ||
88 | struct dst_entry *dst = skb_dst(skb); | ||
89 | |||
90 | if (dst->hh) | ||
91 | return neigh_hh_output(dst->hh, skb); | ||
92 | else if (dst->neighbour) | ||
93 | return dst->neighbour->output(skb); | ||
94 | |||
95 | IP6_INC_STATS_BH(dev_net(dst->dev), | ||
96 | ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); | ||
97 | kfree_skb(skb); | ||
98 | return -EINVAL; | ||
99 | |||
100 | } | ||
101 | |||
102 | /* dev_loopback_xmit for use with netfilter. */ | 86 | /* dev_loopback_xmit for use with netfilter. */ |
103 | static int ip6_dev_loopback_xmit(struct sk_buff *newskb) | 87 | static int ip6_dev_loopback_xmit(struct sk_buff *newskb) |
104 | { | 88 | { |
@@ -112,8 +96,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) | |||
112 | return 0; | 96 | return 0; |
113 | } | 97 | } |
114 | 98 | ||
115 | 99 | static int ip6_finish_output2(struct sk_buff *skb) | |
116 | static int ip6_output2(struct sk_buff *skb) | ||
117 | { | 100 | { |
118 | struct dst_entry *dst = skb_dst(skb); | 101 | struct dst_entry *dst = skb_dst(skb); |
119 | struct net_device *dev = dst->dev; | 102 | struct net_device *dev = dst->dev; |
@@ -125,7 +108,7 @@ static int ip6_output2(struct sk_buff *skb) | |||
125 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); | 108 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); |
126 | 109 | ||
127 | if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && | 110 | if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && |
128 | ((mroute6_socket(dev_net(dev)) && | 111 | ((mroute6_socket(dev_net(dev), skb) && |
129 | !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || | 112 | !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || |
130 | ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, | 113 | ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, |
131 | &ipv6_hdr(skb)->saddr))) { | 114 | &ipv6_hdr(skb)->saddr))) { |
@@ -135,8 +118,8 @@ static int ip6_output2(struct sk_buff *skb) | |||
135 | is not supported in any case. | 118 | is not supported in any case. |
136 | */ | 119 | */ |
137 | if (newskb) | 120 | if (newskb) |
138 | NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, newskb, | 121 | NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, |
139 | NULL, newskb->dev, | 122 | newskb, NULL, newskb->dev, |
140 | ip6_dev_loopback_xmit); | 123 | ip6_dev_loopback_xmit); |
141 | 124 | ||
142 | if (ipv6_hdr(skb)->hop_limit == 0) { | 125 | if (ipv6_hdr(skb)->hop_limit == 0) { |
@@ -151,8 +134,15 @@ static int ip6_output2(struct sk_buff *skb) | |||
151 | skb->len); | 134 | skb->len); |
152 | } | 135 | } |
153 | 136 | ||
154 | return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev, | 137 | if (dst->hh) |
155 | ip6_output_finish); | 138 | return neigh_hh_output(dst->hh, skb); |
139 | else if (dst->neighbour) | ||
140 | return dst->neighbour->output(skb); | ||
141 | |||
142 | IP6_INC_STATS_BH(dev_net(dst->dev), | ||
143 | ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); | ||
144 | kfree_skb(skb); | ||
145 | return -EINVAL; | ||
156 | } | 146 | } |
157 | 147 | ||
158 | static inline int ip6_skb_dst_mtu(struct sk_buff *skb) | 148 | static inline int ip6_skb_dst_mtu(struct sk_buff *skb) |
@@ -163,21 +153,29 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb) | |||
163 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); | 153 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); |
164 | } | 154 | } |
165 | 155 | ||
156 | static int ip6_finish_output(struct sk_buff *skb) | ||
157 | { | ||
158 | if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || | ||
159 | dst_allfrag(skb_dst(skb))) | ||
160 | return ip6_fragment(skb, ip6_finish_output2); | ||
161 | else | ||
162 | return ip6_finish_output2(skb); | ||
163 | } | ||
164 | |||
166 | int ip6_output(struct sk_buff *skb) | 165 | int ip6_output(struct sk_buff *skb) |
167 | { | 166 | { |
167 | struct net_device *dev = skb_dst(skb)->dev; | ||
168 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); | 168 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); |
169 | if (unlikely(idev->cnf.disable_ipv6)) { | 169 | if (unlikely(idev->cnf.disable_ipv6)) { |
170 | IP6_INC_STATS(dev_net(skb_dst(skb)->dev), idev, | 170 | IP6_INC_STATS(dev_net(dev), idev, |
171 | IPSTATS_MIB_OUTDISCARDS); | 171 | IPSTATS_MIB_OUTDISCARDS); |
172 | kfree_skb(skb); | 172 | kfree_skb(skb); |
173 | return 0; | 173 | return 0; |
174 | } | 174 | } |
175 | 175 | ||
176 | if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || | 176 | return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev, |
177 | dst_allfrag(skb_dst(skb))) | 177 | ip6_finish_output, |
178 | return ip6_fragment(skb, ip6_output2); | 178 | !(IP6CB(skb)->flags & IP6SKB_REROUTED)); |
179 | else | ||
180 | return ip6_output2(skb); | ||
181 | } | 179 | } |
182 | 180 | ||
183 | /* | 181 | /* |
@@ -256,8 +254,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
256 | if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { | 254 | if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { |
257 | IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), | 255 | IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), |
258 | IPSTATS_MIB_OUT, skb->len); | 256 | IPSTATS_MIB_OUT, skb->len); |
259 | return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, | 257 | return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, |
260 | dst_output); | 258 | dst->dev, dst_output); |
261 | } | 259 | } |
262 | 260 | ||
263 | if (net_ratelimit()) | 261 | if (net_ratelimit()) |
@@ -509,7 +507,7 @@ int ip6_forward(struct sk_buff *skb) | |||
509 | if (mtu < IPV6_MIN_MTU) | 507 | if (mtu < IPV6_MIN_MTU) |
510 | mtu = IPV6_MIN_MTU; | 508 | mtu = IPV6_MIN_MTU; |
511 | 509 | ||
512 | if (skb->len > mtu) { | 510 | if (skb->len > mtu && !skb_is_gso(skb)) { |
513 | /* Again, force OUTPUT device used as source address */ | 511 | /* Again, force OUTPUT device used as source address */ |
514 | skb->dev = dst->dev; | 512 | skb->dev = dst->dev; |
515 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 513 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
@@ -533,7 +531,7 @@ int ip6_forward(struct sk_buff *skb) | |||
533 | hdr->hop_limit--; | 531 | hdr->hop_limit--; |
534 | 532 | ||
535 | IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); | 533 | IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); |
536 | return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dst->dev, | 534 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, |
537 | ip6_forward_finish); | 535 | ip6_forward_finish); |
538 | 536 | ||
539 | error: | 537 | error: |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 2599870747ec..8f39893d8081 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -723,14 +723,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
723 | skb->protocol = htons(protocol); | 723 | skb->protocol = htons(protocol); |
724 | skb->pkt_type = PACKET_HOST; | 724 | skb->pkt_type = PACKET_HOST; |
725 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); | 725 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); |
726 | skb->dev = t->dev; | ||
727 | skb_dst_drop(skb); | ||
728 | nf_reset(skb); | ||
729 | 726 | ||
730 | dscp_ecn_decapsulate(t, ipv6h, skb); | 727 | skb_tunnel_rx(skb, t->dev); |
731 | 728 | ||
732 | t->dev->stats.rx_packets++; | 729 | dscp_ecn_decapsulate(t, ipv6h, skb); |
733 | t->dev->stats.rx_bytes += skb->len; | ||
734 | netif_rx(skb); | 730 | netif_rx(skb); |
735 | rcu_read_unlock(); | 731 | rcu_read_unlock(); |
736 | return 0; | 732 | return 0; |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 3e333268db89..66078dad7fe8 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/if_arp.h> | 42 | #include <linux/if_arp.h> |
43 | #include <net/checksum.h> | 43 | #include <net/checksum.h> |
44 | #include <net/netlink.h> | 44 | #include <net/netlink.h> |
45 | #include <net/fib_rules.h> | ||
45 | 46 | ||
46 | #include <net/ipv6.h> | 47 | #include <net/ipv6.h> |
47 | #include <net/ip6_route.h> | 48 | #include <net/ip6_route.h> |
@@ -51,6 +52,34 @@ | |||
51 | #include <linux/netfilter_ipv6.h> | 52 | #include <linux/netfilter_ipv6.h> |
52 | #include <net/ip6_checksum.h> | 53 | #include <net/ip6_checksum.h> |
53 | 54 | ||
55 | struct mr6_table { | ||
56 | struct list_head list; | ||
57 | #ifdef CONFIG_NET_NS | ||
58 | struct net *net; | ||
59 | #endif | ||
60 | u32 id; | ||
61 | struct sock *mroute6_sk; | ||
62 | struct timer_list ipmr_expire_timer; | ||
63 | struct list_head mfc6_unres_queue; | ||
64 | struct list_head mfc6_cache_array[MFC6_LINES]; | ||
65 | struct mif_device vif6_table[MAXMIFS]; | ||
66 | int maxvif; | ||
67 | atomic_t cache_resolve_queue_len; | ||
68 | int mroute_do_assert; | ||
69 | int mroute_do_pim; | ||
70 | #ifdef CONFIG_IPV6_PIMSM_V2 | ||
71 | int mroute_reg_vif_num; | ||
72 | #endif | ||
73 | }; | ||
74 | |||
75 | struct ip6mr_rule { | ||
76 | struct fib_rule common; | ||
77 | }; | ||
78 | |||
79 | struct ip6mr_result { | ||
80 | struct mr6_table *mrt; | ||
81 | }; | ||
82 | |||
54 | /* Big lock, protecting vif table, mrt cache and mroute socket state. | 83 | /* Big lock, protecting vif table, mrt cache and mroute socket state. |
55 | Note that the changes are semaphored via rtnl_lock. | 84 | Note that the changes are semaphored via rtnl_lock. |
56 | */ | 85 | */ |
@@ -61,9 +90,7 @@ static DEFINE_RWLOCK(mrt_lock); | |||
61 | * Multicast router control variables | 90 | * Multicast router control variables |
62 | */ | 91 | */ |
63 | 92 | ||
64 | #define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL) | 93 | #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL) |
65 | |||
66 | static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */ | ||
67 | 94 | ||
68 | /* Special spinlock for queue of unresolved entries */ | 95 | /* Special spinlock for queue of unresolved entries */ |
69 | static DEFINE_SPINLOCK(mfc_unres_lock); | 96 | static DEFINE_SPINLOCK(mfc_unres_lock); |
@@ -78,20 +105,235 @@ static DEFINE_SPINLOCK(mfc_unres_lock); | |||
78 | 105 | ||
79 | static struct kmem_cache *mrt_cachep __read_mostly; | 106 | static struct kmem_cache *mrt_cachep __read_mostly; |
80 | 107 | ||
81 | static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache); | 108 | static struct mr6_table *ip6mr_new_table(struct net *net, u32 id); |
82 | static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, | 109 | static void ip6mr_free_table(struct mr6_table *mrt); |
110 | |||
111 | static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, | ||
112 | struct sk_buff *skb, struct mfc6_cache *cache); | ||
113 | static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, | ||
83 | mifi_t mifi, int assert); | 114 | mifi_t mifi, int assert); |
84 | static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm); | 115 | static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, |
85 | static void mroute_clean_tables(struct net *net); | 116 | struct mfc6_cache *c, struct rtmsg *rtm); |
117 | static int ip6mr_rtm_dumproute(struct sk_buff *skb, | ||
118 | struct netlink_callback *cb); | ||
119 | static void mroute_clean_tables(struct mr6_table *mrt); | ||
120 | static void ipmr_expire_process(unsigned long arg); | ||
121 | |||
122 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | ||
123 | #define ip6mr_for_each_table(mrt, net) \ | ||
124 | list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) | ||
125 | |||
126 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) | ||
127 | { | ||
128 | struct mr6_table *mrt; | ||
129 | |||
130 | ip6mr_for_each_table(mrt, net) { | ||
131 | if (mrt->id == id) | ||
132 | return mrt; | ||
133 | } | ||
134 | return NULL; | ||
135 | } | ||
136 | |||
137 | static int ip6mr_fib_lookup(struct net *net, struct flowi *flp, | ||
138 | struct mr6_table **mrt) | ||
139 | { | ||
140 | struct ip6mr_result res; | ||
141 | struct fib_lookup_arg arg = { .result = &res, }; | ||
142 | int err; | ||
143 | |||
144 | err = fib_rules_lookup(net->ipv6.mr6_rules_ops, flp, 0, &arg); | ||
145 | if (err < 0) | ||
146 | return err; | ||
147 | *mrt = res.mrt; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp, | ||
152 | int flags, struct fib_lookup_arg *arg) | ||
153 | { | ||
154 | struct ip6mr_result *res = arg->result; | ||
155 | struct mr6_table *mrt; | ||
156 | |||
157 | switch (rule->action) { | ||
158 | case FR_ACT_TO_TBL: | ||
159 | break; | ||
160 | case FR_ACT_UNREACHABLE: | ||
161 | return -ENETUNREACH; | ||
162 | case FR_ACT_PROHIBIT: | ||
163 | return -EACCES; | ||
164 | case FR_ACT_BLACKHOLE: | ||
165 | default: | ||
166 | return -EINVAL; | ||
167 | } | ||
168 | |||
169 | mrt = ip6mr_get_table(rule->fr_net, rule->table); | ||
170 | if (mrt == NULL) | ||
171 | return -EAGAIN; | ||
172 | res->mrt = mrt; | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags) | ||
177 | { | ||
178 | return 1; | ||
179 | } | ||
180 | |||
181 | static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = { | ||
182 | FRA_GENERIC_POLICY, | ||
183 | }; | ||
184 | |||
185 | static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | ||
186 | struct fib_rule_hdr *frh, struct nlattr **tb) | ||
187 | { | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, | ||
192 | struct nlattr **tb) | ||
193 | { | ||
194 | return 1; | ||
195 | } | ||
196 | |||
197 | static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | ||
198 | struct fib_rule_hdr *frh) | ||
199 | { | ||
200 | frh->dst_len = 0; | ||
201 | frh->src_len = 0; | ||
202 | frh->tos = 0; | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = { | ||
207 | .family = RTNL_FAMILY_IP6MR, | ||
208 | .rule_size = sizeof(struct ip6mr_rule), | ||
209 | .addr_size = sizeof(struct in6_addr), | ||
210 | .action = ip6mr_rule_action, | ||
211 | .match = ip6mr_rule_match, | ||
212 | .configure = ip6mr_rule_configure, | ||
213 | .compare = ip6mr_rule_compare, | ||
214 | .default_pref = fib_default_rule_pref, | ||
215 | .fill = ip6mr_rule_fill, | ||
216 | .nlgroup = RTNLGRP_IPV6_RULE, | ||
217 | .policy = ip6mr_rule_policy, | ||
218 | .owner = THIS_MODULE, | ||
219 | }; | ||
220 | |||
221 | static int __net_init ip6mr_rules_init(struct net *net) | ||
222 | { | ||
223 | struct fib_rules_ops *ops; | ||
224 | struct mr6_table *mrt; | ||
225 | int err; | ||
226 | |||
227 | ops = fib_rules_register(&ip6mr_rules_ops_template, net); | ||
228 | if (IS_ERR(ops)) | ||
229 | return PTR_ERR(ops); | ||
230 | |||
231 | INIT_LIST_HEAD(&net->ipv6.mr6_tables); | ||
232 | |||
233 | mrt = ip6mr_new_table(net, RT6_TABLE_DFLT); | ||
234 | if (mrt == NULL) { | ||
235 | err = -ENOMEM; | ||
236 | goto err1; | ||
237 | } | ||
238 | |||
239 | err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0); | ||
240 | if (err < 0) | ||
241 | goto err2; | ||
242 | |||
243 | net->ipv6.mr6_rules_ops = ops; | ||
244 | return 0; | ||
245 | |||
246 | err2: | ||
247 | kfree(mrt); | ||
248 | err1: | ||
249 | fib_rules_unregister(ops); | ||
250 | return err; | ||
251 | } | ||
252 | |||
253 | static void __net_exit ip6mr_rules_exit(struct net *net) | ||
254 | { | ||
255 | struct mr6_table *mrt, *next; | ||
256 | |||
257 | list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) { | ||
258 | list_del(&mrt->list); | ||
259 | ip6mr_free_table(mrt); | ||
260 | } | ||
261 | fib_rules_unregister(net->ipv6.mr6_rules_ops); | ||
262 | } | ||
263 | #else | ||
264 | #define ip6mr_for_each_table(mrt, net) \ | ||
265 | for (mrt = net->ipv6.mrt6; mrt; mrt = NULL) | ||
266 | |||
267 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) | ||
268 | { | ||
269 | return net->ipv6.mrt6; | ||
270 | } | ||
271 | |||
272 | static int ip6mr_fib_lookup(struct net *net, struct flowi *flp, | ||
273 | struct mr6_table **mrt) | ||
274 | { | ||
275 | *mrt = net->ipv6.mrt6; | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | static int __net_init ip6mr_rules_init(struct net *net) | ||
280 | { | ||
281 | net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT); | ||
282 | return net->ipv6.mrt6 ? 0 : -ENOMEM; | ||
283 | } | ||
284 | |||
285 | static void __net_exit ip6mr_rules_exit(struct net *net) | ||
286 | { | ||
287 | ip6mr_free_table(net->ipv6.mrt6); | ||
288 | } | ||
289 | #endif | ||
290 | |||
291 | static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) | ||
292 | { | ||
293 | struct mr6_table *mrt; | ||
294 | unsigned int i; | ||
86 | 295 | ||
87 | static struct timer_list ipmr_expire_timer; | 296 | mrt = ip6mr_get_table(net, id); |
297 | if (mrt != NULL) | ||
298 | return mrt; | ||
88 | 299 | ||
300 | mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); | ||
301 | if (mrt == NULL) | ||
302 | return NULL; | ||
303 | mrt->id = id; | ||
304 | write_pnet(&mrt->net, net); | ||
305 | |||
306 | /* Forwarding cache */ | ||
307 | for (i = 0; i < MFC6_LINES; i++) | ||
308 | INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]); | ||
309 | |||
310 | INIT_LIST_HEAD(&mrt->mfc6_unres_queue); | ||
311 | |||
312 | setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, | ||
313 | (unsigned long)mrt); | ||
314 | |||
315 | #ifdef CONFIG_IPV6_PIMSM_V2 | ||
316 | mrt->mroute_reg_vif_num = -1; | ||
317 | #endif | ||
318 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | ||
319 | list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables); | ||
320 | #endif | ||
321 | return mrt; | ||
322 | } | ||
323 | |||
324 | static void ip6mr_free_table(struct mr6_table *mrt) | ||
325 | { | ||
326 | del_timer(&mrt->ipmr_expire_timer); | ||
327 | mroute_clean_tables(mrt); | ||
328 | kfree(mrt); | ||
329 | } | ||
89 | 330 | ||
90 | #ifdef CONFIG_PROC_FS | 331 | #ifdef CONFIG_PROC_FS |
91 | 332 | ||
92 | struct ipmr_mfc_iter { | 333 | struct ipmr_mfc_iter { |
93 | struct seq_net_private p; | 334 | struct seq_net_private p; |
94 | struct mfc6_cache **cache; | 335 | struct mr6_table *mrt; |
336 | struct list_head *cache; | ||
95 | int ct; | 337 | int ct; |
96 | }; | 338 | }; |
97 | 339 | ||
@@ -99,22 +341,22 @@ struct ipmr_mfc_iter { | |||
99 | static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, | 341 | static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, |
100 | struct ipmr_mfc_iter *it, loff_t pos) | 342 | struct ipmr_mfc_iter *it, loff_t pos) |
101 | { | 343 | { |
344 | struct mr6_table *mrt = it->mrt; | ||
102 | struct mfc6_cache *mfc; | 345 | struct mfc6_cache *mfc; |
103 | 346 | ||
104 | it->cache = net->ipv6.mfc6_cache_array; | ||
105 | read_lock(&mrt_lock); | 347 | read_lock(&mrt_lock); |
106 | for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) | 348 | for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) { |
107 | for (mfc = net->ipv6.mfc6_cache_array[it->ct]; | 349 | it->cache = &mrt->mfc6_cache_array[it->ct]; |
108 | mfc; mfc = mfc->next) | 350 | list_for_each_entry(mfc, it->cache, list) |
109 | if (pos-- == 0) | 351 | if (pos-- == 0) |
110 | return mfc; | 352 | return mfc; |
353 | } | ||
111 | read_unlock(&mrt_lock); | 354 | read_unlock(&mrt_lock); |
112 | 355 | ||
113 | it->cache = &mfc_unres_queue; | ||
114 | spin_lock_bh(&mfc_unres_lock); | 356 | spin_lock_bh(&mfc_unres_lock); |
115 | for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) | 357 | it->cache = &mrt->mfc6_unres_queue; |
116 | if (net_eq(mfc6_net(mfc), net) && | 358 | list_for_each_entry(mfc, it->cache, list) |
117 | pos-- == 0) | 359 | if (pos-- == 0) |
118 | return mfc; | 360 | return mfc; |
119 | spin_unlock_bh(&mfc_unres_lock); | 361 | spin_unlock_bh(&mfc_unres_lock); |
120 | 362 | ||
@@ -122,15 +364,13 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, | |||
122 | return NULL; | 364 | return NULL; |
123 | } | 365 | } |
124 | 366 | ||
125 | |||
126 | |||
127 | |||
128 | /* | 367 | /* |
129 | * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif | 368 | * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif |
130 | */ | 369 | */ |
131 | 370 | ||
132 | struct ipmr_vif_iter { | 371 | struct ipmr_vif_iter { |
133 | struct seq_net_private p; | 372 | struct seq_net_private p; |
373 | struct mr6_table *mrt; | ||
134 | int ct; | 374 | int ct; |
135 | }; | 375 | }; |
136 | 376 | ||
@@ -138,11 +378,13 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net, | |||
138 | struct ipmr_vif_iter *iter, | 378 | struct ipmr_vif_iter *iter, |
139 | loff_t pos) | 379 | loff_t pos) |
140 | { | 380 | { |
141 | for (iter->ct = 0; iter->ct < net->ipv6.maxvif; ++iter->ct) { | 381 | struct mr6_table *mrt = iter->mrt; |
142 | if (!MIF_EXISTS(net, iter->ct)) | 382 | |
383 | for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { | ||
384 | if (!MIF_EXISTS(mrt, iter->ct)) | ||
143 | continue; | 385 | continue; |
144 | if (pos-- == 0) | 386 | if (pos-- == 0) |
145 | return &net->ipv6.vif6_table[iter->ct]; | 387 | return &mrt->vif6_table[iter->ct]; |
146 | } | 388 | } |
147 | return NULL; | 389 | return NULL; |
148 | } | 390 | } |
@@ -150,7 +392,15 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net, | |||
150 | static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos) | 392 | static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos) |
151 | __acquires(mrt_lock) | 393 | __acquires(mrt_lock) |
152 | { | 394 | { |
395 | struct ipmr_vif_iter *iter = seq->private; | ||
153 | struct net *net = seq_file_net(seq); | 396 | struct net *net = seq_file_net(seq); |
397 | struct mr6_table *mrt; | ||
398 | |||
399 | mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); | ||
400 | if (mrt == NULL) | ||
401 | return ERR_PTR(-ENOENT); | ||
402 | |||
403 | iter->mrt = mrt; | ||
154 | 404 | ||
155 | read_lock(&mrt_lock); | 405 | read_lock(&mrt_lock); |
156 | return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1) | 406 | return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1) |
@@ -161,15 +411,16 @@ static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
161 | { | 411 | { |
162 | struct ipmr_vif_iter *iter = seq->private; | 412 | struct ipmr_vif_iter *iter = seq->private; |
163 | struct net *net = seq_file_net(seq); | 413 | struct net *net = seq_file_net(seq); |
414 | struct mr6_table *mrt = iter->mrt; | ||
164 | 415 | ||
165 | ++*pos; | 416 | ++*pos; |
166 | if (v == SEQ_START_TOKEN) | 417 | if (v == SEQ_START_TOKEN) |
167 | return ip6mr_vif_seq_idx(net, iter, 0); | 418 | return ip6mr_vif_seq_idx(net, iter, 0); |
168 | 419 | ||
169 | while (++iter->ct < net->ipv6.maxvif) { | 420 | while (++iter->ct < mrt->maxvif) { |
170 | if (!MIF_EXISTS(net, iter->ct)) | 421 | if (!MIF_EXISTS(mrt, iter->ct)) |
171 | continue; | 422 | continue; |
172 | return &net->ipv6.vif6_table[iter->ct]; | 423 | return &mrt->vif6_table[iter->ct]; |
173 | } | 424 | } |
174 | return NULL; | 425 | return NULL; |
175 | } | 426 | } |
@@ -182,7 +433,8 @@ static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v) | |||
182 | 433 | ||
183 | static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) | 434 | static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) |
184 | { | 435 | { |
185 | struct net *net = seq_file_net(seq); | 436 | struct ipmr_vif_iter *iter = seq->private; |
437 | struct mr6_table *mrt = iter->mrt; | ||
186 | 438 | ||
187 | if (v == SEQ_START_TOKEN) { | 439 | if (v == SEQ_START_TOKEN) { |
188 | seq_puts(seq, | 440 | seq_puts(seq, |
@@ -193,7 +445,7 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) | |||
193 | 445 | ||
194 | seq_printf(seq, | 446 | seq_printf(seq, |
195 | "%2td %-10s %8ld %7ld %8ld %7ld %05X\n", | 447 | "%2td %-10s %8ld %7ld %8ld %7ld %05X\n", |
196 | vif - net->ipv6.vif6_table, | 448 | vif - mrt->vif6_table, |
197 | name, vif->bytes_in, vif->pkt_in, | 449 | name, vif->bytes_in, vif->pkt_in, |
198 | vif->bytes_out, vif->pkt_out, | 450 | vif->bytes_out, vif->pkt_out, |
199 | vif->flags); | 451 | vif->flags); |
@@ -224,8 +476,15 @@ static const struct file_operations ip6mr_vif_fops = { | |||
224 | 476 | ||
225 | static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) | 477 | static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) |
226 | { | 478 | { |
479 | struct ipmr_mfc_iter *it = seq->private; | ||
227 | struct net *net = seq_file_net(seq); | 480 | struct net *net = seq_file_net(seq); |
481 | struct mr6_table *mrt; | ||
482 | |||
483 | mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); | ||
484 | if (mrt == NULL) | ||
485 | return ERR_PTR(-ENOENT); | ||
228 | 486 | ||
487 | it->mrt = mrt; | ||
229 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) | 488 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) |
230 | : SEQ_START_TOKEN; | 489 | : SEQ_START_TOKEN; |
231 | } | 490 | } |
@@ -235,35 +494,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
235 | struct mfc6_cache *mfc = v; | 494 | struct mfc6_cache *mfc = v; |
236 | struct ipmr_mfc_iter *it = seq->private; | 495 | struct ipmr_mfc_iter *it = seq->private; |
237 | struct net *net = seq_file_net(seq); | 496 | struct net *net = seq_file_net(seq); |
497 | struct mr6_table *mrt = it->mrt; | ||
238 | 498 | ||
239 | ++*pos; | 499 | ++*pos; |
240 | 500 | ||
241 | if (v == SEQ_START_TOKEN) | 501 | if (v == SEQ_START_TOKEN) |
242 | return ipmr_mfc_seq_idx(net, seq->private, 0); | 502 | return ipmr_mfc_seq_idx(net, seq->private, 0); |
243 | 503 | ||
244 | if (mfc->next) | 504 | if (mfc->list.next != it->cache) |
245 | return mfc->next; | 505 | return list_entry(mfc->list.next, struct mfc6_cache, list); |
246 | 506 | ||
247 | if (it->cache == &mfc_unres_queue) | 507 | if (it->cache == &mrt->mfc6_unres_queue) |
248 | goto end_of_list; | 508 | goto end_of_list; |
249 | 509 | ||
250 | BUG_ON(it->cache != net->ipv6.mfc6_cache_array); | 510 | BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]); |
251 | 511 | ||
252 | while (++it->ct < MFC6_LINES) { | 512 | while (++it->ct < MFC6_LINES) { |
253 | mfc = net->ipv6.mfc6_cache_array[it->ct]; | 513 | it->cache = &mrt->mfc6_cache_array[it->ct]; |
254 | if (mfc) | 514 | if (list_empty(it->cache)) |
255 | return mfc; | 515 | continue; |
516 | return list_first_entry(it->cache, struct mfc6_cache, list); | ||
256 | } | 517 | } |
257 | 518 | ||
258 | /* exhausted cache_array, show unresolved */ | 519 | /* exhausted cache_array, show unresolved */ |
259 | read_unlock(&mrt_lock); | 520 | read_unlock(&mrt_lock); |
260 | it->cache = &mfc_unres_queue; | 521 | it->cache = &mrt->mfc6_unres_queue; |
261 | it->ct = 0; | 522 | it->ct = 0; |
262 | 523 | ||
263 | spin_lock_bh(&mfc_unres_lock); | 524 | spin_lock_bh(&mfc_unres_lock); |
264 | mfc = mfc_unres_queue; | 525 | if (!list_empty(it->cache)) |
265 | if (mfc) | 526 | return list_first_entry(it->cache, struct mfc6_cache, list); |
266 | return mfc; | ||
267 | 527 | ||
268 | end_of_list: | 528 | end_of_list: |
269 | spin_unlock_bh(&mfc_unres_lock); | 529 | spin_unlock_bh(&mfc_unres_lock); |
@@ -275,18 +535,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
275 | static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) | 535 | static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) |
276 | { | 536 | { |
277 | struct ipmr_mfc_iter *it = seq->private; | 537 | struct ipmr_mfc_iter *it = seq->private; |
278 | struct net *net = seq_file_net(seq); | 538 | struct mr6_table *mrt = it->mrt; |
279 | 539 | ||
280 | if (it->cache == &mfc_unres_queue) | 540 | if (it->cache == &mrt->mfc6_unres_queue) |
281 | spin_unlock_bh(&mfc_unres_lock); | 541 | spin_unlock_bh(&mfc_unres_lock); |
282 | else if (it->cache == net->ipv6.mfc6_cache_array) | 542 | else if (it->cache == mrt->mfc6_cache_array) |
283 | read_unlock(&mrt_lock); | 543 | read_unlock(&mrt_lock); |
284 | } | 544 | } |
285 | 545 | ||
286 | static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | 546 | static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) |
287 | { | 547 | { |
288 | int n; | 548 | int n; |
289 | struct net *net = seq_file_net(seq); | ||
290 | 549 | ||
291 | if (v == SEQ_START_TOKEN) { | 550 | if (v == SEQ_START_TOKEN) { |
292 | seq_puts(seq, | 551 | seq_puts(seq, |
@@ -296,19 +555,20 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | |||
296 | } else { | 555 | } else { |
297 | const struct mfc6_cache *mfc = v; | 556 | const struct mfc6_cache *mfc = v; |
298 | const struct ipmr_mfc_iter *it = seq->private; | 557 | const struct ipmr_mfc_iter *it = seq->private; |
558 | struct mr6_table *mrt = it->mrt; | ||
299 | 559 | ||
300 | seq_printf(seq, "%pI6 %pI6 %-3hd", | 560 | seq_printf(seq, "%pI6 %pI6 %-3hd", |
301 | &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, | 561 | &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, |
302 | mfc->mf6c_parent); | 562 | mfc->mf6c_parent); |
303 | 563 | ||
304 | if (it->cache != &mfc_unres_queue) { | 564 | if (it->cache != &mrt->mfc6_unres_queue) { |
305 | seq_printf(seq, " %8lu %8lu %8lu", | 565 | seq_printf(seq, " %8lu %8lu %8lu", |
306 | mfc->mfc_un.res.pkt, | 566 | mfc->mfc_un.res.pkt, |
307 | mfc->mfc_un.res.bytes, | 567 | mfc->mfc_un.res.bytes, |
308 | mfc->mfc_un.res.wrong_if); | 568 | mfc->mfc_un.res.wrong_if); |
309 | for (n = mfc->mfc_un.res.minvif; | 569 | for (n = mfc->mfc_un.res.minvif; |
310 | n < mfc->mfc_un.res.maxvif; n++) { | 570 | n < mfc->mfc_un.res.maxvif; n++) { |
311 | if (MIF_EXISTS(net, n) && | 571 | if (MIF_EXISTS(mrt, n) && |
312 | mfc->mfc_un.res.ttls[n] < 255) | 572 | mfc->mfc_un.res.ttls[n] < 255) |
313 | seq_printf(seq, | 573 | seq_printf(seq, |
314 | " %2d:%-3d", | 574 | " %2d:%-3d", |
@@ -355,7 +615,12 @@ static int pim6_rcv(struct sk_buff *skb) | |||
355 | struct ipv6hdr *encap; | 615 | struct ipv6hdr *encap; |
356 | struct net_device *reg_dev = NULL; | 616 | struct net_device *reg_dev = NULL; |
357 | struct net *net = dev_net(skb->dev); | 617 | struct net *net = dev_net(skb->dev); |
358 | int reg_vif_num = net->ipv6.mroute_reg_vif_num; | 618 | struct mr6_table *mrt; |
619 | struct flowi fl = { | ||
620 | .iif = skb->dev->ifindex, | ||
621 | .mark = skb->mark, | ||
622 | }; | ||
623 | int reg_vif_num; | ||
359 | 624 | ||
360 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) | 625 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) |
361 | goto drop; | 626 | goto drop; |
@@ -378,9 +643,13 @@ static int pim6_rcv(struct sk_buff *skb) | |||
378 | ntohs(encap->payload_len) + sizeof(*pim) > skb->len) | 643 | ntohs(encap->payload_len) + sizeof(*pim) > skb->len) |
379 | goto drop; | 644 | goto drop; |
380 | 645 | ||
646 | if (ip6mr_fib_lookup(net, &fl, &mrt) < 0) | ||
647 | goto drop; | ||
648 | reg_vif_num = mrt->mroute_reg_vif_num; | ||
649 | |||
381 | read_lock(&mrt_lock); | 650 | read_lock(&mrt_lock); |
382 | if (reg_vif_num >= 0) | 651 | if (reg_vif_num >= 0) |
383 | reg_dev = net->ipv6.vif6_table[reg_vif_num].dev; | 652 | reg_dev = mrt->vif6_table[reg_vif_num].dev; |
384 | if (reg_dev) | 653 | if (reg_dev) |
385 | dev_hold(reg_dev); | 654 | dev_hold(reg_dev); |
386 | read_unlock(&mrt_lock); | 655 | read_unlock(&mrt_lock); |
@@ -391,14 +660,12 @@ static int pim6_rcv(struct sk_buff *skb) | |||
391 | skb->mac_header = skb->network_header; | 660 | skb->mac_header = skb->network_header; |
392 | skb_pull(skb, (u8 *)encap - skb->data); | 661 | skb_pull(skb, (u8 *)encap - skb->data); |
393 | skb_reset_network_header(skb); | 662 | skb_reset_network_header(skb); |
394 | skb->dev = reg_dev; | ||
395 | skb->protocol = htons(ETH_P_IPV6); | 663 | skb->protocol = htons(ETH_P_IPV6); |
396 | skb->ip_summed = 0; | 664 | skb->ip_summed = 0; |
397 | skb->pkt_type = PACKET_HOST; | 665 | skb->pkt_type = PACKET_HOST; |
398 | skb_dst_drop(skb); | 666 | |
399 | reg_dev->stats.rx_bytes += skb->len; | 667 | skb_tunnel_rx(skb, reg_dev); |
400 | reg_dev->stats.rx_packets++; | 668 | |
401 | nf_reset(skb); | ||
402 | netif_rx(skb); | 669 | netif_rx(skb); |
403 | dev_put(reg_dev); | 670 | dev_put(reg_dev); |
404 | return 0; | 671 | return 0; |
@@ -417,12 +684,22 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, | |||
417 | struct net_device *dev) | 684 | struct net_device *dev) |
418 | { | 685 | { |
419 | struct net *net = dev_net(dev); | 686 | struct net *net = dev_net(dev); |
687 | struct mr6_table *mrt; | ||
688 | struct flowi fl = { | ||
689 | .oif = dev->ifindex, | ||
690 | .iif = skb->skb_iif, | ||
691 | .mark = skb->mark, | ||
692 | }; | ||
693 | int err; | ||
694 | |||
695 | err = ip6mr_fib_lookup(net, &fl, &mrt); | ||
696 | if (err < 0) | ||
697 | return err; | ||
420 | 698 | ||
421 | read_lock(&mrt_lock); | 699 | read_lock(&mrt_lock); |
422 | dev->stats.tx_bytes += skb->len; | 700 | dev->stats.tx_bytes += skb->len; |
423 | dev->stats.tx_packets++; | 701 | dev->stats.tx_packets++; |
424 | ip6mr_cache_report(net, skb, net->ipv6.mroute_reg_vif_num, | 702 | ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT); |
425 | MRT6MSG_WHOLEPKT); | ||
426 | read_unlock(&mrt_lock); | 703 | read_unlock(&mrt_lock); |
427 | kfree_skb(skb); | 704 | kfree_skb(skb); |
428 | return NETDEV_TX_OK; | 705 | return NETDEV_TX_OK; |
@@ -442,11 +719,17 @@ static void reg_vif_setup(struct net_device *dev) | |||
442 | dev->features |= NETIF_F_NETNS_LOCAL; | 719 | dev->features |= NETIF_F_NETNS_LOCAL; |
443 | } | 720 | } |
444 | 721 | ||
445 | static struct net_device *ip6mr_reg_vif(struct net *net) | 722 | static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt) |
446 | { | 723 | { |
447 | struct net_device *dev; | 724 | struct net_device *dev; |
725 | char name[IFNAMSIZ]; | ||
448 | 726 | ||
449 | dev = alloc_netdev(0, "pim6reg", reg_vif_setup); | 727 | if (mrt->id == RT6_TABLE_DFLT) |
728 | sprintf(name, "pim6reg"); | ||
729 | else | ||
730 | sprintf(name, "pim6reg%u", mrt->id); | ||
731 | |||
732 | dev = alloc_netdev(0, name, reg_vif_setup); | ||
450 | if (dev == NULL) | 733 | if (dev == NULL) |
451 | return NULL; | 734 | return NULL; |
452 | 735 | ||
@@ -478,15 +761,16 @@ failure: | |||
478 | * Delete a VIF entry | 761 | * Delete a VIF entry |
479 | */ | 762 | */ |
480 | 763 | ||
481 | static int mif6_delete(struct net *net, int vifi, struct list_head *head) | 764 | static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) |
482 | { | 765 | { |
483 | struct mif_device *v; | 766 | struct mif_device *v; |
484 | struct net_device *dev; | 767 | struct net_device *dev; |
485 | struct inet6_dev *in6_dev; | 768 | struct inet6_dev *in6_dev; |
486 | if (vifi < 0 || vifi >= net->ipv6.maxvif) | 769 | |
770 | if (vifi < 0 || vifi >= mrt->maxvif) | ||
487 | return -EADDRNOTAVAIL; | 771 | return -EADDRNOTAVAIL; |
488 | 772 | ||
489 | v = &net->ipv6.vif6_table[vifi]; | 773 | v = &mrt->vif6_table[vifi]; |
490 | 774 | ||
491 | write_lock_bh(&mrt_lock); | 775 | write_lock_bh(&mrt_lock); |
492 | dev = v->dev; | 776 | dev = v->dev; |
@@ -498,17 +782,17 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head) | |||
498 | } | 782 | } |
499 | 783 | ||
500 | #ifdef CONFIG_IPV6_PIMSM_V2 | 784 | #ifdef CONFIG_IPV6_PIMSM_V2 |
501 | if (vifi == net->ipv6.mroute_reg_vif_num) | 785 | if (vifi == mrt->mroute_reg_vif_num) |
502 | net->ipv6.mroute_reg_vif_num = -1; | 786 | mrt->mroute_reg_vif_num = -1; |
503 | #endif | 787 | #endif |
504 | 788 | ||
505 | if (vifi + 1 == net->ipv6.maxvif) { | 789 | if (vifi + 1 == mrt->maxvif) { |
506 | int tmp; | 790 | int tmp; |
507 | for (tmp = vifi - 1; tmp >= 0; tmp--) { | 791 | for (tmp = vifi - 1; tmp >= 0; tmp--) { |
508 | if (MIF_EXISTS(net, tmp)) | 792 | if (MIF_EXISTS(mrt, tmp)) |
509 | break; | 793 | break; |
510 | } | 794 | } |
511 | net->ipv6.maxvif = tmp + 1; | 795 | mrt->maxvif = tmp + 1; |
512 | } | 796 | } |
513 | 797 | ||
514 | write_unlock_bh(&mrt_lock); | 798 | write_unlock_bh(&mrt_lock); |
@@ -528,7 +812,6 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head) | |||
528 | 812 | ||
529 | static inline void ip6mr_cache_free(struct mfc6_cache *c) | 813 | static inline void ip6mr_cache_free(struct mfc6_cache *c) |
530 | { | 814 | { |
531 | release_net(mfc6_net(c)); | ||
532 | kmem_cache_free(mrt_cachep, c); | 815 | kmem_cache_free(mrt_cachep, c); |
533 | } | 816 | } |
534 | 817 | ||
@@ -536,12 +819,12 @@ static inline void ip6mr_cache_free(struct mfc6_cache *c) | |||
536 | and reporting error to netlink readers. | 819 | and reporting error to netlink readers. |
537 | */ | 820 | */ |
538 | 821 | ||
539 | static void ip6mr_destroy_unres(struct mfc6_cache *c) | 822 | static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c) |
540 | { | 823 | { |
824 | struct net *net = read_pnet(&mrt->net); | ||
541 | struct sk_buff *skb; | 825 | struct sk_buff *skb; |
542 | struct net *net = mfc6_net(c); | ||
543 | 826 | ||
544 | atomic_dec(&net->ipv6.cache_resolve_queue_len); | 827 | atomic_dec(&mrt->cache_resolve_queue_len); |
545 | 828 | ||
546 | while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { | 829 | while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { |
547 | if (ipv6_hdr(skb)->version == 0) { | 830 | if (ipv6_hdr(skb)->version == 0) { |
@@ -559,60 +842,59 @@ static void ip6mr_destroy_unres(struct mfc6_cache *c) | |||
559 | } | 842 | } |
560 | 843 | ||
561 | 844 | ||
562 | /* Single timer process for all the unresolved queue. */ | 845 | /* Timer process for all the unresolved queue. */ |
563 | 846 | ||
564 | static void ipmr_do_expire_process(unsigned long dummy) | 847 | static void ipmr_do_expire_process(struct mr6_table *mrt) |
565 | { | 848 | { |
566 | unsigned long now = jiffies; | 849 | unsigned long now = jiffies; |
567 | unsigned long expires = 10 * HZ; | 850 | unsigned long expires = 10 * HZ; |
568 | struct mfc6_cache *c, **cp; | 851 | struct mfc6_cache *c, *next; |
569 | |||
570 | cp = &mfc_unres_queue; | ||
571 | 852 | ||
572 | while ((c = *cp) != NULL) { | 853 | list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) { |
573 | if (time_after(c->mfc_un.unres.expires, now)) { | 854 | if (time_after(c->mfc_un.unres.expires, now)) { |
574 | /* not yet... */ | 855 | /* not yet... */ |
575 | unsigned long interval = c->mfc_un.unres.expires - now; | 856 | unsigned long interval = c->mfc_un.unres.expires - now; |
576 | if (interval < expires) | 857 | if (interval < expires) |
577 | expires = interval; | 858 | expires = interval; |
578 | cp = &c->next; | ||
579 | continue; | 859 | continue; |
580 | } | 860 | } |
581 | 861 | ||
582 | *cp = c->next; | 862 | list_del(&c->list); |
583 | ip6mr_destroy_unres(c); | 863 | ip6mr_destroy_unres(mrt, c); |
584 | } | 864 | } |
585 | 865 | ||
586 | if (mfc_unres_queue != NULL) | 866 | if (!list_empty(&mrt->mfc6_unres_queue)) |
587 | mod_timer(&ipmr_expire_timer, jiffies + expires); | 867 | mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); |
588 | } | 868 | } |
589 | 869 | ||
590 | static void ipmr_expire_process(unsigned long dummy) | 870 | static void ipmr_expire_process(unsigned long arg) |
591 | { | 871 | { |
872 | struct mr6_table *mrt = (struct mr6_table *)arg; | ||
873 | |||
592 | if (!spin_trylock(&mfc_unres_lock)) { | 874 | if (!spin_trylock(&mfc_unres_lock)) { |
593 | mod_timer(&ipmr_expire_timer, jiffies + 1); | 875 | mod_timer(&mrt->ipmr_expire_timer, jiffies + 1); |
594 | return; | 876 | return; |
595 | } | 877 | } |
596 | 878 | ||
597 | if (mfc_unres_queue != NULL) | 879 | if (!list_empty(&mrt->mfc6_unres_queue)) |
598 | ipmr_do_expire_process(dummy); | 880 | ipmr_do_expire_process(mrt); |
599 | 881 | ||
600 | spin_unlock(&mfc_unres_lock); | 882 | spin_unlock(&mfc_unres_lock); |
601 | } | 883 | } |
602 | 884 | ||
603 | /* Fill oifs list. It is called under write locked mrt_lock. */ | 885 | /* Fill oifs list. It is called under write locked mrt_lock. */ |
604 | 886 | ||
605 | static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls) | 887 | static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache, |
888 | unsigned char *ttls) | ||
606 | { | 889 | { |
607 | int vifi; | 890 | int vifi; |
608 | struct net *net = mfc6_net(cache); | ||
609 | 891 | ||
610 | cache->mfc_un.res.minvif = MAXMIFS; | 892 | cache->mfc_un.res.minvif = MAXMIFS; |
611 | cache->mfc_un.res.maxvif = 0; | 893 | cache->mfc_un.res.maxvif = 0; |
612 | memset(cache->mfc_un.res.ttls, 255, MAXMIFS); | 894 | memset(cache->mfc_un.res.ttls, 255, MAXMIFS); |
613 | 895 | ||
614 | for (vifi = 0; vifi < net->ipv6.maxvif; vifi++) { | 896 | for (vifi = 0; vifi < mrt->maxvif; vifi++) { |
615 | if (MIF_EXISTS(net, vifi) && | 897 | if (MIF_EXISTS(mrt, vifi) && |
616 | ttls[vifi] && ttls[vifi] < 255) { | 898 | ttls[vifi] && ttls[vifi] < 255) { |
617 | cache->mfc_un.res.ttls[vifi] = ttls[vifi]; | 899 | cache->mfc_un.res.ttls[vifi] = ttls[vifi]; |
618 | if (cache->mfc_un.res.minvif > vifi) | 900 | if (cache->mfc_un.res.minvif > vifi) |
@@ -623,16 +905,17 @@ static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttl | |||
623 | } | 905 | } |
624 | } | 906 | } |
625 | 907 | ||
626 | static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) | 908 | static int mif6_add(struct net *net, struct mr6_table *mrt, |
909 | struct mif6ctl *vifc, int mrtsock) | ||
627 | { | 910 | { |
628 | int vifi = vifc->mif6c_mifi; | 911 | int vifi = vifc->mif6c_mifi; |
629 | struct mif_device *v = &net->ipv6.vif6_table[vifi]; | 912 | struct mif_device *v = &mrt->vif6_table[vifi]; |
630 | struct net_device *dev; | 913 | struct net_device *dev; |
631 | struct inet6_dev *in6_dev; | 914 | struct inet6_dev *in6_dev; |
632 | int err; | 915 | int err; |
633 | 916 | ||
634 | /* Is vif busy ? */ | 917 | /* Is vif busy ? */ |
635 | if (MIF_EXISTS(net, vifi)) | 918 | if (MIF_EXISTS(mrt, vifi)) |
636 | return -EADDRINUSE; | 919 | return -EADDRINUSE; |
637 | 920 | ||
638 | switch (vifc->mif6c_flags) { | 921 | switch (vifc->mif6c_flags) { |
@@ -642,9 +925,9 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) | |||
642 | * Special Purpose VIF in PIM | 925 | * Special Purpose VIF in PIM |
643 | * All the packets will be sent to the daemon | 926 | * All the packets will be sent to the daemon |
644 | */ | 927 | */ |
645 | if (net->ipv6.mroute_reg_vif_num >= 0) | 928 | if (mrt->mroute_reg_vif_num >= 0) |
646 | return -EADDRINUSE; | 929 | return -EADDRINUSE; |
647 | dev = ip6mr_reg_vif(net); | 930 | dev = ip6mr_reg_vif(net, mrt); |
648 | if (!dev) | 931 | if (!dev) |
649 | return -ENOBUFS; | 932 | return -ENOBUFS; |
650 | err = dev_set_allmulti(dev, 1); | 933 | err = dev_set_allmulti(dev, 1); |
@@ -694,50 +977,48 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) | |||
694 | v->dev = dev; | 977 | v->dev = dev; |
695 | #ifdef CONFIG_IPV6_PIMSM_V2 | 978 | #ifdef CONFIG_IPV6_PIMSM_V2 |
696 | if (v->flags & MIFF_REGISTER) | 979 | if (v->flags & MIFF_REGISTER) |
697 | net->ipv6.mroute_reg_vif_num = vifi; | 980 | mrt->mroute_reg_vif_num = vifi; |
698 | #endif | 981 | #endif |
699 | if (vifi + 1 > net->ipv6.maxvif) | 982 | if (vifi + 1 > mrt->maxvif) |
700 | net->ipv6.maxvif = vifi + 1; | 983 | mrt->maxvif = vifi + 1; |
701 | write_unlock_bh(&mrt_lock); | 984 | write_unlock_bh(&mrt_lock); |
702 | return 0; | 985 | return 0; |
703 | } | 986 | } |
704 | 987 | ||
705 | static struct mfc6_cache *ip6mr_cache_find(struct net *net, | 988 | static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt, |
706 | struct in6_addr *origin, | 989 | struct in6_addr *origin, |
707 | struct in6_addr *mcastgrp) | 990 | struct in6_addr *mcastgrp) |
708 | { | 991 | { |
709 | int line = MFC6_HASH(mcastgrp, origin); | 992 | int line = MFC6_HASH(mcastgrp, origin); |
710 | struct mfc6_cache *c; | 993 | struct mfc6_cache *c; |
711 | 994 | ||
712 | for (c = net->ipv6.mfc6_cache_array[line]; c; c = c->next) { | 995 | list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) { |
713 | if (ipv6_addr_equal(&c->mf6c_origin, origin) && | 996 | if (ipv6_addr_equal(&c->mf6c_origin, origin) && |
714 | ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) | 997 | ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) |
715 | break; | 998 | return c; |
716 | } | 999 | } |
717 | return c; | 1000 | return NULL; |
718 | } | 1001 | } |
719 | 1002 | ||
720 | /* | 1003 | /* |
721 | * Allocate a multicast cache entry | 1004 | * Allocate a multicast cache entry |
722 | */ | 1005 | */ |
723 | static struct mfc6_cache *ip6mr_cache_alloc(struct net *net) | 1006 | static struct mfc6_cache *ip6mr_cache_alloc(void) |
724 | { | 1007 | { |
725 | struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); | 1008 | struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); |
726 | if (c == NULL) | 1009 | if (c == NULL) |
727 | return NULL; | 1010 | return NULL; |
728 | c->mfc_un.res.minvif = MAXMIFS; | 1011 | c->mfc_un.res.minvif = MAXMIFS; |
729 | mfc6_net_set(c, net); | ||
730 | return c; | 1012 | return c; |
731 | } | 1013 | } |
732 | 1014 | ||
733 | static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net) | 1015 | static struct mfc6_cache *ip6mr_cache_alloc_unres(void) |
734 | { | 1016 | { |
735 | struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); | 1017 | struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); |
736 | if (c == NULL) | 1018 | if (c == NULL) |
737 | return NULL; | 1019 | return NULL; |
738 | skb_queue_head_init(&c->mfc_un.unres.unresolved); | 1020 | skb_queue_head_init(&c->mfc_un.unres.unresolved); |
739 | c->mfc_un.unres.expires = jiffies + 10 * HZ; | 1021 | c->mfc_un.unres.expires = jiffies + 10 * HZ; |
740 | mfc6_net_set(c, net); | ||
741 | return c; | 1022 | return c; |
742 | } | 1023 | } |
743 | 1024 | ||
@@ -745,7 +1026,8 @@ static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net) | |||
745 | * A cache entry has gone into a resolved state from queued | 1026 | * A cache entry has gone into a resolved state from queued |
746 | */ | 1027 | */ |
747 | 1028 | ||
748 | static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) | 1029 | static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt, |
1030 | struct mfc6_cache *uc, struct mfc6_cache *c) | ||
749 | { | 1031 | { |
750 | struct sk_buff *skb; | 1032 | struct sk_buff *skb; |
751 | 1033 | ||
@@ -758,7 +1040,7 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) | |||
758 | int err; | 1040 | int err; |
759 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); | 1041 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); |
760 | 1042 | ||
761 | if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { | 1043 | if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { |
762 | nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; | 1044 | nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; |
763 | } else { | 1045 | } else { |
764 | nlh->nlmsg_type = NLMSG_ERROR; | 1046 | nlh->nlmsg_type = NLMSG_ERROR; |
@@ -766,9 +1048,9 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) | |||
766 | skb_trim(skb, nlh->nlmsg_len); | 1048 | skb_trim(skb, nlh->nlmsg_len); |
767 | ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE; | 1049 | ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE; |
768 | } | 1050 | } |
769 | err = rtnl_unicast(skb, mfc6_net(uc), NETLINK_CB(skb).pid); | 1051 | err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid); |
770 | } else | 1052 | } else |
771 | ip6_mr_forward(skb, c); | 1053 | ip6_mr_forward(net, mrt, skb, c); |
772 | } | 1054 | } |
773 | } | 1055 | } |
774 | 1056 | ||
@@ -779,8 +1061,8 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) | |||
779 | * Called under mrt_lock. | 1061 | * Called under mrt_lock. |
780 | */ | 1062 | */ |
781 | 1063 | ||
782 | static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | 1064 | static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, |
783 | int assert) | 1065 | mifi_t mifi, int assert) |
784 | { | 1066 | { |
785 | struct sk_buff *skb; | 1067 | struct sk_buff *skb; |
786 | struct mrt6msg *msg; | 1068 | struct mrt6msg *msg; |
@@ -816,7 +1098,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | |||
816 | msg = (struct mrt6msg *)skb_transport_header(skb); | 1098 | msg = (struct mrt6msg *)skb_transport_header(skb); |
817 | msg->im6_mbz = 0; | 1099 | msg->im6_mbz = 0; |
818 | msg->im6_msgtype = MRT6MSG_WHOLEPKT; | 1100 | msg->im6_msgtype = MRT6MSG_WHOLEPKT; |
819 | msg->im6_mif = net->ipv6.mroute_reg_vif_num; | 1101 | msg->im6_mif = mrt->mroute_reg_vif_num; |
820 | msg->im6_pad = 0; | 1102 | msg->im6_pad = 0; |
821 | ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); | 1103 | ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); |
822 | ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); | 1104 | ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); |
@@ -851,7 +1133,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | |||
851 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1133 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
852 | } | 1134 | } |
853 | 1135 | ||
854 | if (net->ipv6.mroute6_sk == NULL) { | 1136 | if (mrt->mroute6_sk == NULL) { |
855 | kfree_skb(skb); | 1137 | kfree_skb(skb); |
856 | return -EINVAL; | 1138 | return -EINVAL; |
857 | } | 1139 | } |
@@ -859,7 +1141,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | |||
859 | /* | 1141 | /* |
860 | * Deliver to user space multicast routing algorithms | 1142 | * Deliver to user space multicast routing algorithms |
861 | */ | 1143 | */ |
862 | ret = sock_queue_rcv_skb(net->ipv6.mroute6_sk, skb); | 1144 | ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb); |
863 | if (ret < 0) { | 1145 | if (ret < 0) { |
864 | if (net_ratelimit()) | 1146 | if (net_ratelimit()) |
865 | printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n"); | 1147 | printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n"); |
@@ -874,26 +1156,28 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | |||
874 | */ | 1156 | */ |
875 | 1157 | ||
876 | static int | 1158 | static int |
877 | ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) | 1159 | ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb) |
878 | { | 1160 | { |
1161 | bool found = false; | ||
879 | int err; | 1162 | int err; |
880 | struct mfc6_cache *c; | 1163 | struct mfc6_cache *c; |
881 | 1164 | ||
882 | spin_lock_bh(&mfc_unres_lock); | 1165 | spin_lock_bh(&mfc_unres_lock); |
883 | for (c = mfc_unres_queue; c; c = c->next) { | 1166 | list_for_each_entry(c, &mrt->mfc6_unres_queue, list) { |
884 | if (net_eq(mfc6_net(c), net) && | 1167 | if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && |
885 | ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && | 1168 | ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) { |
886 | ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) | 1169 | found = true; |
887 | break; | 1170 | break; |
1171 | } | ||
888 | } | 1172 | } |
889 | 1173 | ||
890 | if (c == NULL) { | 1174 | if (!found) { |
891 | /* | 1175 | /* |
892 | * Create a new entry if allowable | 1176 | * Create a new entry if allowable |
893 | */ | 1177 | */ |
894 | 1178 | ||
895 | if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 || | 1179 | if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || |
896 | (c = ip6mr_cache_alloc_unres(net)) == NULL) { | 1180 | (c = ip6mr_cache_alloc_unres()) == NULL) { |
897 | spin_unlock_bh(&mfc_unres_lock); | 1181 | spin_unlock_bh(&mfc_unres_lock); |
898 | 1182 | ||
899 | kfree_skb(skb); | 1183 | kfree_skb(skb); |
@@ -910,7 +1194,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) | |||
910 | /* | 1194 | /* |
911 | * Reflect first query at pim6sd | 1195 | * Reflect first query at pim6sd |
912 | */ | 1196 | */ |
913 | err = ip6mr_cache_report(net, skb, mifi, MRT6MSG_NOCACHE); | 1197 | err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE); |
914 | if (err < 0) { | 1198 | if (err < 0) { |
915 | /* If the report failed throw the cache entry | 1199 | /* If the report failed throw the cache entry |
916 | out - Brad Parker | 1200 | out - Brad Parker |
@@ -922,11 +1206,10 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) | |||
922 | return err; | 1206 | return err; |
923 | } | 1207 | } |
924 | 1208 | ||
925 | atomic_inc(&net->ipv6.cache_resolve_queue_len); | 1209 | atomic_inc(&mrt->cache_resolve_queue_len); |
926 | c->next = mfc_unres_queue; | 1210 | list_add(&c->list, &mrt->mfc6_unres_queue); |
927 | mfc_unres_queue = c; | ||
928 | 1211 | ||
929 | ipmr_do_expire_process(1); | 1212 | ipmr_do_expire_process(mrt); |
930 | } | 1213 | } |
931 | 1214 | ||
932 | /* | 1215 | /* |
@@ -948,19 +1231,18 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) | |||
948 | * MFC6 cache manipulation by user space | 1231 | * MFC6 cache manipulation by user space |
949 | */ | 1232 | */ |
950 | 1233 | ||
951 | static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc) | 1234 | static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc) |
952 | { | 1235 | { |
953 | int line; | 1236 | int line; |
954 | struct mfc6_cache *c, **cp; | 1237 | struct mfc6_cache *c, *next; |
955 | 1238 | ||
956 | line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); | 1239 | line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); |
957 | 1240 | ||
958 | for (cp = &net->ipv6.mfc6_cache_array[line]; | 1241 | list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) { |
959 | (c = *cp) != NULL; cp = &c->next) { | ||
960 | if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && | 1242 | if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && |
961 | ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { | 1243 | ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { |
962 | write_lock_bh(&mrt_lock); | 1244 | write_lock_bh(&mrt_lock); |
963 | *cp = c->next; | 1245 | list_del(&c->list); |
964 | write_unlock_bh(&mrt_lock); | 1246 | write_unlock_bh(&mrt_lock); |
965 | 1247 | ||
966 | ip6mr_cache_free(c); | 1248 | ip6mr_cache_free(c); |
@@ -975,6 +1257,7 @@ static int ip6mr_device_event(struct notifier_block *this, | |||
975 | { | 1257 | { |
976 | struct net_device *dev = ptr; | 1258 | struct net_device *dev = ptr; |
977 | struct net *net = dev_net(dev); | 1259 | struct net *net = dev_net(dev); |
1260 | struct mr6_table *mrt; | ||
978 | struct mif_device *v; | 1261 | struct mif_device *v; |
979 | int ct; | 1262 | int ct; |
980 | LIST_HEAD(list); | 1263 | LIST_HEAD(list); |
@@ -982,10 +1265,12 @@ static int ip6mr_device_event(struct notifier_block *this, | |||
982 | if (event != NETDEV_UNREGISTER) | 1265 | if (event != NETDEV_UNREGISTER) |
983 | return NOTIFY_DONE; | 1266 | return NOTIFY_DONE; |
984 | 1267 | ||
985 | v = &net->ipv6.vif6_table[0]; | 1268 | ip6mr_for_each_table(mrt, net) { |
986 | for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) { | 1269 | v = &mrt->vif6_table[0]; |
987 | if (v->dev == dev) | 1270 | for (ct = 0; ct < mrt->maxvif; ct++, v++) { |
988 | mif6_delete(net, ct, &list); | 1271 | if (v->dev == dev) |
1272 | mif6_delete(mrt, ct, &list); | ||
1273 | } | ||
989 | } | 1274 | } |
990 | unregister_netdevice_many(&list); | 1275 | unregister_netdevice_many(&list); |
991 | 1276 | ||
@@ -1002,26 +1287,11 @@ static struct notifier_block ip6_mr_notifier = { | |||
1002 | 1287 | ||
1003 | static int __net_init ip6mr_net_init(struct net *net) | 1288 | static int __net_init ip6mr_net_init(struct net *net) |
1004 | { | 1289 | { |
1005 | int err = 0; | 1290 | int err; |
1006 | net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device), | ||
1007 | GFP_KERNEL); | ||
1008 | if (!net->ipv6.vif6_table) { | ||
1009 | err = -ENOMEM; | ||
1010 | goto fail; | ||
1011 | } | ||
1012 | |||
1013 | /* Forwarding cache */ | ||
1014 | net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES, | ||
1015 | sizeof(struct mfc6_cache *), | ||
1016 | GFP_KERNEL); | ||
1017 | if (!net->ipv6.mfc6_cache_array) { | ||
1018 | err = -ENOMEM; | ||
1019 | goto fail_mfc6_cache; | ||
1020 | } | ||
1021 | 1291 | ||
1022 | #ifdef CONFIG_IPV6_PIMSM_V2 | 1292 | err = ip6mr_rules_init(net); |
1023 | net->ipv6.mroute_reg_vif_num = -1; | 1293 | if (err < 0) |
1024 | #endif | 1294 | goto fail; |
1025 | 1295 | ||
1026 | #ifdef CONFIG_PROC_FS | 1296 | #ifdef CONFIG_PROC_FS |
1027 | err = -ENOMEM; | 1297 | err = -ENOMEM; |
@@ -1030,16 +1300,15 @@ static int __net_init ip6mr_net_init(struct net *net) | |||
1030 | if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops)) | 1300 | if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops)) |
1031 | goto proc_cache_fail; | 1301 | goto proc_cache_fail; |
1032 | #endif | 1302 | #endif |
1303 | |||
1033 | return 0; | 1304 | return 0; |
1034 | 1305 | ||
1035 | #ifdef CONFIG_PROC_FS | 1306 | #ifdef CONFIG_PROC_FS |
1036 | proc_cache_fail: | 1307 | proc_cache_fail: |
1037 | proc_net_remove(net, "ip6_mr_vif"); | 1308 | proc_net_remove(net, "ip6_mr_vif"); |
1038 | proc_vif_fail: | 1309 | proc_vif_fail: |
1039 | kfree(net->ipv6.mfc6_cache_array); | 1310 | ip6mr_rules_exit(net); |
1040 | #endif | 1311 | #endif |
1041 | fail_mfc6_cache: | ||
1042 | kfree(net->ipv6.vif6_table); | ||
1043 | fail: | 1312 | fail: |
1044 | return err; | 1313 | return err; |
1045 | } | 1314 | } |
@@ -1050,9 +1319,7 @@ static void __net_exit ip6mr_net_exit(struct net *net) | |||
1050 | proc_net_remove(net, "ip6_mr_cache"); | 1319 | proc_net_remove(net, "ip6_mr_cache"); |
1051 | proc_net_remove(net, "ip6_mr_vif"); | 1320 | proc_net_remove(net, "ip6_mr_vif"); |
1052 | #endif | 1321 | #endif |
1053 | mroute_clean_tables(net); | 1322 | ip6mr_rules_exit(net); |
1054 | kfree(net->ipv6.mfc6_cache_array); | ||
1055 | kfree(net->ipv6.vif6_table); | ||
1056 | } | 1323 | } |
1057 | 1324 | ||
1058 | static struct pernet_operations ip6mr_net_ops = { | 1325 | static struct pernet_operations ip6mr_net_ops = { |
@@ -1075,7 +1342,6 @@ int __init ip6_mr_init(void) | |||
1075 | if (err) | 1342 | if (err) |
1076 | goto reg_pernet_fail; | 1343 | goto reg_pernet_fail; |
1077 | 1344 | ||
1078 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); | ||
1079 | err = register_netdevice_notifier(&ip6_mr_notifier); | 1345 | err = register_netdevice_notifier(&ip6_mr_notifier); |
1080 | if (err) | 1346 | if (err) |
1081 | goto reg_notif_fail; | 1347 | goto reg_notif_fail; |
@@ -1086,13 +1352,13 @@ int __init ip6_mr_init(void) | |||
1086 | goto add_proto_fail; | 1352 | goto add_proto_fail; |
1087 | } | 1353 | } |
1088 | #endif | 1354 | #endif |
1355 | rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, ip6mr_rtm_dumproute); | ||
1089 | return 0; | 1356 | return 0; |
1090 | #ifdef CONFIG_IPV6_PIMSM_V2 | 1357 | #ifdef CONFIG_IPV6_PIMSM_V2 |
1091 | add_proto_fail: | 1358 | add_proto_fail: |
1092 | unregister_netdevice_notifier(&ip6_mr_notifier); | 1359 | unregister_netdevice_notifier(&ip6_mr_notifier); |
1093 | #endif | 1360 | #endif |
1094 | reg_notif_fail: | 1361 | reg_notif_fail: |
1095 | del_timer(&ipmr_expire_timer); | ||
1096 | unregister_pernet_subsys(&ip6mr_net_ops); | 1362 | unregister_pernet_subsys(&ip6mr_net_ops); |
1097 | reg_pernet_fail: | 1363 | reg_pernet_fail: |
1098 | kmem_cache_destroy(mrt_cachep); | 1364 | kmem_cache_destroy(mrt_cachep); |
@@ -1102,15 +1368,16 @@ reg_pernet_fail: | |||
1102 | void ip6_mr_cleanup(void) | 1368 | void ip6_mr_cleanup(void) |
1103 | { | 1369 | { |
1104 | unregister_netdevice_notifier(&ip6_mr_notifier); | 1370 | unregister_netdevice_notifier(&ip6_mr_notifier); |
1105 | del_timer(&ipmr_expire_timer); | ||
1106 | unregister_pernet_subsys(&ip6mr_net_ops); | 1371 | unregister_pernet_subsys(&ip6mr_net_ops); |
1107 | kmem_cache_destroy(mrt_cachep); | 1372 | kmem_cache_destroy(mrt_cachep); |
1108 | } | 1373 | } |
1109 | 1374 | ||
1110 | static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | 1375 | static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, |
1376 | struct mf6cctl *mfc, int mrtsock) | ||
1111 | { | 1377 | { |
1378 | bool found = false; | ||
1112 | int line; | 1379 | int line; |
1113 | struct mfc6_cache *uc, *c, **cp; | 1380 | struct mfc6_cache *uc, *c; |
1114 | unsigned char ttls[MAXMIFS]; | 1381 | unsigned char ttls[MAXMIFS]; |
1115 | int i; | 1382 | int i; |
1116 | 1383 | ||
@@ -1126,17 +1393,18 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1126 | 1393 | ||
1127 | line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); | 1394 | line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); |
1128 | 1395 | ||
1129 | for (cp = &net->ipv6.mfc6_cache_array[line]; | 1396 | list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) { |
1130 | (c = *cp) != NULL; cp = &c->next) { | ||
1131 | if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && | 1397 | if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && |
1132 | ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) | 1398 | ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { |
1399 | found = true; | ||
1133 | break; | 1400 | break; |
1401 | } | ||
1134 | } | 1402 | } |
1135 | 1403 | ||
1136 | if (c != NULL) { | 1404 | if (found) { |
1137 | write_lock_bh(&mrt_lock); | 1405 | write_lock_bh(&mrt_lock); |
1138 | c->mf6c_parent = mfc->mf6cc_parent; | 1406 | c->mf6c_parent = mfc->mf6cc_parent; |
1139 | ip6mr_update_thresholds(c, ttls); | 1407 | ip6mr_update_thresholds(mrt, c, ttls); |
1140 | if (!mrtsock) | 1408 | if (!mrtsock) |
1141 | c->mfc_flags |= MFC_STATIC; | 1409 | c->mfc_flags |= MFC_STATIC; |
1142 | write_unlock_bh(&mrt_lock); | 1410 | write_unlock_bh(&mrt_lock); |
@@ -1146,43 +1414,42 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1146 | if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr)) | 1414 | if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr)) |
1147 | return -EINVAL; | 1415 | return -EINVAL; |
1148 | 1416 | ||
1149 | c = ip6mr_cache_alloc(net); | 1417 | c = ip6mr_cache_alloc(); |
1150 | if (c == NULL) | 1418 | if (c == NULL) |
1151 | return -ENOMEM; | 1419 | return -ENOMEM; |
1152 | 1420 | ||
1153 | c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; | 1421 | c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; |
1154 | c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; | 1422 | c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; |
1155 | c->mf6c_parent = mfc->mf6cc_parent; | 1423 | c->mf6c_parent = mfc->mf6cc_parent; |
1156 | ip6mr_update_thresholds(c, ttls); | 1424 | ip6mr_update_thresholds(mrt, c, ttls); |
1157 | if (!mrtsock) | 1425 | if (!mrtsock) |
1158 | c->mfc_flags |= MFC_STATIC; | 1426 | c->mfc_flags |= MFC_STATIC; |
1159 | 1427 | ||
1160 | write_lock_bh(&mrt_lock); | 1428 | write_lock_bh(&mrt_lock); |
1161 | c->next = net->ipv6.mfc6_cache_array[line]; | 1429 | list_add(&c->list, &mrt->mfc6_cache_array[line]); |
1162 | net->ipv6.mfc6_cache_array[line] = c; | ||
1163 | write_unlock_bh(&mrt_lock); | 1430 | write_unlock_bh(&mrt_lock); |
1164 | 1431 | ||
1165 | /* | 1432 | /* |
1166 | * Check to see if we resolved a queued list. If so we | 1433 | * Check to see if we resolved a queued list. If so we |
1167 | * need to send on the frames and tidy up. | 1434 | * need to send on the frames and tidy up. |
1168 | */ | 1435 | */ |
1436 | found = false; | ||
1169 | spin_lock_bh(&mfc_unres_lock); | 1437 | spin_lock_bh(&mfc_unres_lock); |
1170 | for (cp = &mfc_unres_queue; (uc = *cp) != NULL; | 1438 | list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) { |
1171 | cp = &uc->next) { | 1439 | if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && |
1172 | if (net_eq(mfc6_net(uc), net) && | ||
1173 | ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && | ||
1174 | ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { | 1440 | ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { |
1175 | *cp = uc->next; | 1441 | list_del(&uc->list); |
1176 | atomic_dec(&net->ipv6.cache_resolve_queue_len); | 1442 | atomic_dec(&mrt->cache_resolve_queue_len); |
1443 | found = true; | ||
1177 | break; | 1444 | break; |
1178 | } | 1445 | } |
1179 | } | 1446 | } |
1180 | if (mfc_unres_queue == NULL) | 1447 | if (list_empty(&mrt->mfc6_unres_queue)) |
1181 | del_timer(&ipmr_expire_timer); | 1448 | del_timer(&mrt->ipmr_expire_timer); |
1182 | spin_unlock_bh(&mfc_unres_lock); | 1449 | spin_unlock_bh(&mfc_unres_lock); |
1183 | 1450 | ||
1184 | if (uc) { | 1451 | if (found) { |
1185 | ip6mr_cache_resolve(uc, c); | 1452 | ip6mr_cache_resolve(net, mrt, uc, c); |
1186 | ip6mr_cache_free(uc); | 1453 | ip6mr_cache_free(uc); |
1187 | } | 1454 | } |
1188 | return 0; | 1455 | return 0; |
@@ -1192,17 +1459,18 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1192 | * Close the multicast socket, and clear the vif tables etc | 1459 | * Close the multicast socket, and clear the vif tables etc |
1193 | */ | 1460 | */ |
1194 | 1461 | ||
1195 | static void mroute_clean_tables(struct net *net) | 1462 | static void mroute_clean_tables(struct mr6_table *mrt) |
1196 | { | 1463 | { |
1197 | int i; | 1464 | int i; |
1198 | LIST_HEAD(list); | 1465 | LIST_HEAD(list); |
1466 | struct mfc6_cache *c, *next; | ||
1199 | 1467 | ||
1200 | /* | 1468 | /* |
1201 | * Shut down all active vif entries | 1469 | * Shut down all active vif entries |
1202 | */ | 1470 | */ |
1203 | for (i = 0; i < net->ipv6.maxvif; i++) { | 1471 | for (i = 0; i < mrt->maxvif; i++) { |
1204 | if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC)) | 1472 | if (!(mrt->vif6_table[i].flags & VIFF_STATIC)) |
1205 | mif6_delete(net, i, &list); | 1473 | mif6_delete(mrt, i, &list); |
1206 | } | 1474 | } |
1207 | unregister_netdevice_many(&list); | 1475 | unregister_netdevice_many(&list); |
1208 | 1476 | ||
@@ -1210,48 +1478,36 @@ static void mroute_clean_tables(struct net *net) | |||
1210 | * Wipe the cache | 1478 | * Wipe the cache |
1211 | */ | 1479 | */ |
1212 | for (i = 0; i < MFC6_LINES; i++) { | 1480 | for (i = 0; i < MFC6_LINES; i++) { |
1213 | struct mfc6_cache *c, **cp; | 1481 | list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { |
1214 | 1482 | if (c->mfc_flags & MFC_STATIC) | |
1215 | cp = &net->ipv6.mfc6_cache_array[i]; | ||
1216 | while ((c = *cp) != NULL) { | ||
1217 | if (c->mfc_flags & MFC_STATIC) { | ||
1218 | cp = &c->next; | ||
1219 | continue; | 1483 | continue; |
1220 | } | ||
1221 | write_lock_bh(&mrt_lock); | 1484 | write_lock_bh(&mrt_lock); |
1222 | *cp = c->next; | 1485 | list_del(&c->list); |
1223 | write_unlock_bh(&mrt_lock); | 1486 | write_unlock_bh(&mrt_lock); |
1224 | 1487 | ||
1225 | ip6mr_cache_free(c); | 1488 | ip6mr_cache_free(c); |
1226 | } | 1489 | } |
1227 | } | 1490 | } |
1228 | 1491 | ||
1229 | if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) { | 1492 | if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { |
1230 | struct mfc6_cache *c, **cp; | ||
1231 | |||
1232 | spin_lock_bh(&mfc_unres_lock); | 1493 | spin_lock_bh(&mfc_unres_lock); |
1233 | cp = &mfc_unres_queue; | 1494 | list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) { |
1234 | while ((c = *cp) != NULL) { | 1495 | list_del(&c->list); |
1235 | if (!net_eq(mfc6_net(c), net)) { | 1496 | ip6mr_destroy_unres(mrt, c); |
1236 | cp = &c->next; | ||
1237 | continue; | ||
1238 | } | ||
1239 | *cp = c->next; | ||
1240 | ip6mr_destroy_unres(c); | ||
1241 | } | 1497 | } |
1242 | spin_unlock_bh(&mfc_unres_lock); | 1498 | spin_unlock_bh(&mfc_unres_lock); |
1243 | } | 1499 | } |
1244 | } | 1500 | } |
1245 | 1501 | ||
1246 | static int ip6mr_sk_init(struct sock *sk) | 1502 | static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk) |
1247 | { | 1503 | { |
1248 | int err = 0; | 1504 | int err = 0; |
1249 | struct net *net = sock_net(sk); | 1505 | struct net *net = sock_net(sk); |
1250 | 1506 | ||
1251 | rtnl_lock(); | 1507 | rtnl_lock(); |
1252 | write_lock_bh(&mrt_lock); | 1508 | write_lock_bh(&mrt_lock); |
1253 | if (likely(net->ipv6.mroute6_sk == NULL)) { | 1509 | if (likely(mrt->mroute6_sk == NULL)) { |
1254 | net->ipv6.mroute6_sk = sk; | 1510 | mrt->mroute6_sk = sk; |
1255 | net->ipv6.devconf_all->mc_forwarding++; | 1511 | net->ipv6.devconf_all->mc_forwarding++; |
1256 | } | 1512 | } |
1257 | else | 1513 | else |
@@ -1265,24 +1521,43 @@ static int ip6mr_sk_init(struct sock *sk) | |||
1265 | 1521 | ||
1266 | int ip6mr_sk_done(struct sock *sk) | 1522 | int ip6mr_sk_done(struct sock *sk) |
1267 | { | 1523 | { |
1268 | int err = 0; | 1524 | int err = -EACCES; |
1269 | struct net *net = sock_net(sk); | 1525 | struct net *net = sock_net(sk); |
1526 | struct mr6_table *mrt; | ||
1270 | 1527 | ||
1271 | rtnl_lock(); | 1528 | rtnl_lock(); |
1272 | if (sk == net->ipv6.mroute6_sk) { | 1529 | ip6mr_for_each_table(mrt, net) { |
1273 | write_lock_bh(&mrt_lock); | 1530 | if (sk == mrt->mroute6_sk) { |
1274 | net->ipv6.mroute6_sk = NULL; | 1531 | write_lock_bh(&mrt_lock); |
1275 | net->ipv6.devconf_all->mc_forwarding--; | 1532 | mrt->mroute6_sk = NULL; |
1276 | write_unlock_bh(&mrt_lock); | 1533 | net->ipv6.devconf_all->mc_forwarding--; |
1534 | write_unlock_bh(&mrt_lock); | ||
1277 | 1535 | ||
1278 | mroute_clean_tables(net); | 1536 | mroute_clean_tables(mrt); |
1279 | } else | 1537 | err = 0; |
1280 | err = -EACCES; | 1538 | break; |
1539 | } | ||
1540 | } | ||
1281 | rtnl_unlock(); | 1541 | rtnl_unlock(); |
1282 | 1542 | ||
1283 | return err; | 1543 | return err; |
1284 | } | 1544 | } |
1285 | 1545 | ||
1546 | struct sock *mroute6_socket(struct net *net, struct sk_buff *skb) | ||
1547 | { | ||
1548 | struct mr6_table *mrt; | ||
1549 | struct flowi fl = { | ||
1550 | .iif = skb->skb_iif, | ||
1551 | .oif = skb->dev->ifindex, | ||
1552 | .mark = skb->mark, | ||
1553 | }; | ||
1554 | |||
1555 | if (ip6mr_fib_lookup(net, &fl, &mrt) < 0) | ||
1556 | return NULL; | ||
1557 | |||
1558 | return mrt->mroute6_sk; | ||
1559 | } | ||
1560 | |||
1286 | /* | 1561 | /* |
1287 | * Socket options and virtual interface manipulation. The whole | 1562 | * Socket options and virtual interface manipulation. The whole |
1288 | * virtual interface system is a complete heap, but unfortunately | 1563 | * virtual interface system is a complete heap, but unfortunately |
@@ -1297,9 +1572,14 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1297 | struct mf6cctl mfc; | 1572 | struct mf6cctl mfc; |
1298 | mifi_t mifi; | 1573 | mifi_t mifi; |
1299 | struct net *net = sock_net(sk); | 1574 | struct net *net = sock_net(sk); |
1575 | struct mr6_table *mrt; | ||
1576 | |||
1577 | mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); | ||
1578 | if (mrt == NULL) | ||
1579 | return -ENOENT; | ||
1300 | 1580 | ||
1301 | if (optname != MRT6_INIT) { | 1581 | if (optname != MRT6_INIT) { |
1302 | if (sk != net->ipv6.mroute6_sk && !capable(CAP_NET_ADMIN)) | 1582 | if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN)) |
1303 | return -EACCES; | 1583 | return -EACCES; |
1304 | } | 1584 | } |
1305 | 1585 | ||
@@ -1311,7 +1591,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1311 | if (optlen < sizeof(int)) | 1591 | if (optlen < sizeof(int)) |
1312 | return -EINVAL; | 1592 | return -EINVAL; |
1313 | 1593 | ||
1314 | return ip6mr_sk_init(sk); | 1594 | return ip6mr_sk_init(mrt, sk); |
1315 | 1595 | ||
1316 | case MRT6_DONE: | 1596 | case MRT6_DONE: |
1317 | return ip6mr_sk_done(sk); | 1597 | return ip6mr_sk_done(sk); |
@@ -1324,7 +1604,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1324 | if (vif.mif6c_mifi >= MAXMIFS) | 1604 | if (vif.mif6c_mifi >= MAXMIFS) |
1325 | return -ENFILE; | 1605 | return -ENFILE; |
1326 | rtnl_lock(); | 1606 | rtnl_lock(); |
1327 | ret = mif6_add(net, &vif, sk == net->ipv6.mroute6_sk); | 1607 | ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk); |
1328 | rtnl_unlock(); | 1608 | rtnl_unlock(); |
1329 | return ret; | 1609 | return ret; |
1330 | 1610 | ||
@@ -1334,7 +1614,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1334 | if (copy_from_user(&mifi, optval, sizeof(mifi_t))) | 1614 | if (copy_from_user(&mifi, optval, sizeof(mifi_t))) |
1335 | return -EFAULT; | 1615 | return -EFAULT; |
1336 | rtnl_lock(); | 1616 | rtnl_lock(); |
1337 | ret = mif6_delete(net, mifi, NULL); | 1617 | ret = mif6_delete(mrt, mifi, NULL); |
1338 | rtnl_unlock(); | 1618 | rtnl_unlock(); |
1339 | return ret; | 1619 | return ret; |
1340 | 1620 | ||
@@ -1350,10 +1630,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1350 | return -EFAULT; | 1630 | return -EFAULT; |
1351 | rtnl_lock(); | 1631 | rtnl_lock(); |
1352 | if (optname == MRT6_DEL_MFC) | 1632 | if (optname == MRT6_DEL_MFC) |
1353 | ret = ip6mr_mfc_delete(net, &mfc); | 1633 | ret = ip6mr_mfc_delete(mrt, &mfc); |
1354 | else | 1634 | else |
1355 | ret = ip6mr_mfc_add(net, &mfc, | 1635 | ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk); |
1356 | sk == net->ipv6.mroute6_sk); | ||
1357 | rtnl_unlock(); | 1636 | rtnl_unlock(); |
1358 | return ret; | 1637 | return ret; |
1359 | 1638 | ||
@@ -1365,7 +1644,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1365 | int v; | 1644 | int v; |
1366 | if (get_user(v, (int __user *)optval)) | 1645 | if (get_user(v, (int __user *)optval)) |
1367 | return -EFAULT; | 1646 | return -EFAULT; |
1368 | net->ipv6.mroute_do_assert = !!v; | 1647 | mrt->mroute_do_assert = !!v; |
1369 | return 0; | 1648 | return 0; |
1370 | } | 1649 | } |
1371 | 1650 | ||
@@ -1378,15 +1657,36 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1378 | v = !!v; | 1657 | v = !!v; |
1379 | rtnl_lock(); | 1658 | rtnl_lock(); |
1380 | ret = 0; | 1659 | ret = 0; |
1381 | if (v != net->ipv6.mroute_do_pim) { | 1660 | if (v != mrt->mroute_do_pim) { |
1382 | net->ipv6.mroute_do_pim = v; | 1661 | mrt->mroute_do_pim = v; |
1383 | net->ipv6.mroute_do_assert = v; | 1662 | mrt->mroute_do_assert = v; |
1384 | } | 1663 | } |
1385 | rtnl_unlock(); | 1664 | rtnl_unlock(); |
1386 | return ret; | 1665 | return ret; |
1387 | } | 1666 | } |
1388 | 1667 | ||
1389 | #endif | 1668 | #endif |
1669 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | ||
1670 | case MRT6_TABLE: | ||
1671 | { | ||
1672 | u32 v; | ||
1673 | |||
1674 | if (optlen != sizeof(u32)) | ||
1675 | return -EINVAL; | ||
1676 | if (get_user(v, (u32 __user *)optval)) | ||
1677 | return -EFAULT; | ||
1678 | if (sk == mrt->mroute6_sk) | ||
1679 | return -EBUSY; | ||
1680 | |||
1681 | rtnl_lock(); | ||
1682 | ret = 0; | ||
1683 | if (!ip6mr_new_table(net, v)) | ||
1684 | ret = -ENOMEM; | ||
1685 | raw6_sk(sk)->ip6mr_table = v; | ||
1686 | rtnl_unlock(); | ||
1687 | return ret; | ||
1688 | } | ||
1689 | #endif | ||
1390 | /* | 1690 | /* |
1391 | * Spurious command, or MRT6_VERSION which you cannot | 1691 | * Spurious command, or MRT6_VERSION which you cannot |
1392 | * set. | 1692 | * set. |
@@ -1406,6 +1706,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, | |||
1406 | int olr; | 1706 | int olr; |
1407 | int val; | 1707 | int val; |
1408 | struct net *net = sock_net(sk); | 1708 | struct net *net = sock_net(sk); |
1709 | struct mr6_table *mrt; | ||
1710 | |||
1711 | mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); | ||
1712 | if (mrt == NULL) | ||
1713 | return -ENOENT; | ||
1409 | 1714 | ||
1410 | switch (optname) { | 1715 | switch (optname) { |
1411 | case MRT6_VERSION: | 1716 | case MRT6_VERSION: |
@@ -1413,11 +1718,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, | |||
1413 | break; | 1718 | break; |
1414 | #ifdef CONFIG_IPV6_PIMSM_V2 | 1719 | #ifdef CONFIG_IPV6_PIMSM_V2 |
1415 | case MRT6_PIM: | 1720 | case MRT6_PIM: |
1416 | val = net->ipv6.mroute_do_pim; | 1721 | val = mrt->mroute_do_pim; |
1417 | break; | 1722 | break; |
1418 | #endif | 1723 | #endif |
1419 | case MRT6_ASSERT: | 1724 | case MRT6_ASSERT: |
1420 | val = net->ipv6.mroute_do_assert; | 1725 | val = mrt->mroute_do_assert; |
1421 | break; | 1726 | break; |
1422 | default: | 1727 | default: |
1423 | return -ENOPROTOOPT; | 1728 | return -ENOPROTOOPT; |
@@ -1448,16 +1753,21 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1448 | struct mif_device *vif; | 1753 | struct mif_device *vif; |
1449 | struct mfc6_cache *c; | 1754 | struct mfc6_cache *c; |
1450 | struct net *net = sock_net(sk); | 1755 | struct net *net = sock_net(sk); |
1756 | struct mr6_table *mrt; | ||
1757 | |||
1758 | mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); | ||
1759 | if (mrt == NULL) | ||
1760 | return -ENOENT; | ||
1451 | 1761 | ||
1452 | switch (cmd) { | 1762 | switch (cmd) { |
1453 | case SIOCGETMIFCNT_IN6: | 1763 | case SIOCGETMIFCNT_IN6: |
1454 | if (copy_from_user(&vr, arg, sizeof(vr))) | 1764 | if (copy_from_user(&vr, arg, sizeof(vr))) |
1455 | return -EFAULT; | 1765 | return -EFAULT; |
1456 | if (vr.mifi >= net->ipv6.maxvif) | 1766 | if (vr.mifi >= mrt->maxvif) |
1457 | return -EINVAL; | 1767 | return -EINVAL; |
1458 | read_lock(&mrt_lock); | 1768 | read_lock(&mrt_lock); |
1459 | vif = &net->ipv6.vif6_table[vr.mifi]; | 1769 | vif = &mrt->vif6_table[vr.mifi]; |
1460 | if (MIF_EXISTS(net, vr.mifi)) { | 1770 | if (MIF_EXISTS(mrt, vr.mifi)) { |
1461 | vr.icount = vif->pkt_in; | 1771 | vr.icount = vif->pkt_in; |
1462 | vr.ocount = vif->pkt_out; | 1772 | vr.ocount = vif->pkt_out; |
1463 | vr.ibytes = vif->bytes_in; | 1773 | vr.ibytes = vif->bytes_in; |
@@ -1475,7 +1785,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1475 | return -EFAULT; | 1785 | return -EFAULT; |
1476 | 1786 | ||
1477 | read_lock(&mrt_lock); | 1787 | read_lock(&mrt_lock); |
1478 | c = ip6mr_cache_find(net, &sr.src.sin6_addr, &sr.grp.sin6_addr); | 1788 | c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); |
1479 | if (c) { | 1789 | if (c) { |
1480 | sr.pktcnt = c->mfc_un.res.pkt; | 1790 | sr.pktcnt = c->mfc_un.res.pkt; |
1481 | sr.bytecnt = c->mfc_un.res.bytes; | 1791 | sr.bytecnt = c->mfc_un.res.bytes; |
@@ -1505,11 +1815,11 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb) | |||
1505 | * Processing handlers for ip6mr_forward | 1815 | * Processing handlers for ip6mr_forward |
1506 | */ | 1816 | */ |
1507 | 1817 | ||
1508 | static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) | 1818 | static int ip6mr_forward2(struct net *net, struct mr6_table *mrt, |
1819 | struct sk_buff *skb, struct mfc6_cache *c, int vifi) | ||
1509 | { | 1820 | { |
1510 | struct ipv6hdr *ipv6h; | 1821 | struct ipv6hdr *ipv6h; |
1511 | struct net *net = mfc6_net(c); | 1822 | struct mif_device *vif = &mrt->vif6_table[vifi]; |
1512 | struct mif_device *vif = &net->ipv6.vif6_table[vifi]; | ||
1513 | struct net_device *dev; | 1823 | struct net_device *dev; |
1514 | struct dst_entry *dst; | 1824 | struct dst_entry *dst; |
1515 | struct flowi fl; | 1825 | struct flowi fl; |
@@ -1523,7 +1833,7 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) | |||
1523 | vif->bytes_out += skb->len; | 1833 | vif->bytes_out += skb->len; |
1524 | vif->dev->stats.tx_bytes += skb->len; | 1834 | vif->dev->stats.tx_bytes += skb->len; |
1525 | vif->dev->stats.tx_packets++; | 1835 | vif->dev->stats.tx_packets++; |
1526 | ip6mr_cache_report(net, skb, vifi, MRT6MSG_WHOLEPKT); | 1836 | ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT); |
1527 | goto out_free; | 1837 | goto out_free; |
1528 | } | 1838 | } |
1529 | #endif | 1839 | #endif |
@@ -1570,7 +1880,7 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) | |||
1570 | 1880 | ||
1571 | IP6CB(skb)->flags |= IP6SKB_FORWARDED; | 1881 | IP6CB(skb)->flags |= IP6SKB_FORWARDED; |
1572 | 1882 | ||
1573 | return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev, | 1883 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev, |
1574 | ip6mr_forward2_finish); | 1884 | ip6mr_forward2_finish); |
1575 | 1885 | ||
1576 | out_free: | 1886 | out_free: |
@@ -1578,22 +1888,22 @@ out_free: | |||
1578 | return 0; | 1888 | return 0; |
1579 | } | 1889 | } |
1580 | 1890 | ||
1581 | static int ip6mr_find_vif(struct net_device *dev) | 1891 | static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev) |
1582 | { | 1892 | { |
1583 | struct net *net = dev_net(dev); | ||
1584 | int ct; | 1893 | int ct; |
1585 | for (ct = net->ipv6.maxvif - 1; ct >= 0; ct--) { | 1894 | |
1586 | if (net->ipv6.vif6_table[ct].dev == dev) | 1895 | for (ct = mrt->maxvif - 1; ct >= 0; ct--) { |
1896 | if (mrt->vif6_table[ct].dev == dev) | ||
1587 | break; | 1897 | break; |
1588 | } | 1898 | } |
1589 | return ct; | 1899 | return ct; |
1590 | } | 1900 | } |
1591 | 1901 | ||
1592 | static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) | 1902 | static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, |
1903 | struct sk_buff *skb, struct mfc6_cache *cache) | ||
1593 | { | 1904 | { |
1594 | int psend = -1; | 1905 | int psend = -1; |
1595 | int vif, ct; | 1906 | int vif, ct; |
1596 | struct net *net = mfc6_net(cache); | ||
1597 | 1907 | ||
1598 | vif = cache->mf6c_parent; | 1908 | vif = cache->mf6c_parent; |
1599 | cache->mfc_un.res.pkt++; | 1909 | cache->mfc_un.res.pkt++; |
@@ -1602,30 +1912,30 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) | |||
1602 | /* | 1912 | /* |
1603 | * Wrong interface: drop packet and (maybe) send PIM assert. | 1913 | * Wrong interface: drop packet and (maybe) send PIM assert. |
1604 | */ | 1914 | */ |
1605 | if (net->ipv6.vif6_table[vif].dev != skb->dev) { | 1915 | if (mrt->vif6_table[vif].dev != skb->dev) { |
1606 | int true_vifi; | 1916 | int true_vifi; |
1607 | 1917 | ||
1608 | cache->mfc_un.res.wrong_if++; | 1918 | cache->mfc_un.res.wrong_if++; |
1609 | true_vifi = ip6mr_find_vif(skb->dev); | 1919 | true_vifi = ip6mr_find_vif(mrt, skb->dev); |
1610 | 1920 | ||
1611 | if (true_vifi >= 0 && net->ipv6.mroute_do_assert && | 1921 | if (true_vifi >= 0 && mrt->mroute_do_assert && |
1612 | /* pimsm uses asserts, when switching from RPT to SPT, | 1922 | /* pimsm uses asserts, when switching from RPT to SPT, |
1613 | so that we cannot check that packet arrived on an oif. | 1923 | so that we cannot check that packet arrived on an oif. |
1614 | It is bad, but otherwise we would need to move pretty | 1924 | It is bad, but otherwise we would need to move pretty |
1615 | large chunk of pimd to kernel. Ough... --ANK | 1925 | large chunk of pimd to kernel. Ough... --ANK |
1616 | */ | 1926 | */ |
1617 | (net->ipv6.mroute_do_pim || | 1927 | (mrt->mroute_do_pim || |
1618 | cache->mfc_un.res.ttls[true_vifi] < 255) && | 1928 | cache->mfc_un.res.ttls[true_vifi] < 255) && |
1619 | time_after(jiffies, | 1929 | time_after(jiffies, |
1620 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { | 1930 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { |
1621 | cache->mfc_un.res.last_assert = jiffies; | 1931 | cache->mfc_un.res.last_assert = jiffies; |
1622 | ip6mr_cache_report(net, skb, true_vifi, MRT6MSG_WRONGMIF); | 1932 | ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF); |
1623 | } | 1933 | } |
1624 | goto dont_forward; | 1934 | goto dont_forward; |
1625 | } | 1935 | } |
1626 | 1936 | ||
1627 | net->ipv6.vif6_table[vif].pkt_in++; | 1937 | mrt->vif6_table[vif].pkt_in++; |
1628 | net->ipv6.vif6_table[vif].bytes_in += skb->len; | 1938 | mrt->vif6_table[vif].bytes_in += skb->len; |
1629 | 1939 | ||
1630 | /* | 1940 | /* |
1631 | * Forward the frame | 1941 | * Forward the frame |
@@ -1635,13 +1945,13 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) | |||
1635 | if (psend != -1) { | 1945 | if (psend != -1) { |
1636 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 1946 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
1637 | if (skb2) | 1947 | if (skb2) |
1638 | ip6mr_forward2(skb2, cache, psend); | 1948 | ip6mr_forward2(net, mrt, skb2, cache, psend); |
1639 | } | 1949 | } |
1640 | psend = ct; | 1950 | psend = ct; |
1641 | } | 1951 | } |
1642 | } | 1952 | } |
1643 | if (psend != -1) { | 1953 | if (psend != -1) { |
1644 | ip6mr_forward2(skb, cache, psend); | 1954 | ip6mr_forward2(net, mrt, skb, cache, psend); |
1645 | return 0; | 1955 | return 0; |
1646 | } | 1956 | } |
1647 | 1957 | ||
@@ -1659,9 +1969,19 @@ int ip6_mr_input(struct sk_buff *skb) | |||
1659 | { | 1969 | { |
1660 | struct mfc6_cache *cache; | 1970 | struct mfc6_cache *cache; |
1661 | struct net *net = dev_net(skb->dev); | 1971 | struct net *net = dev_net(skb->dev); |
1972 | struct mr6_table *mrt; | ||
1973 | struct flowi fl = { | ||
1974 | .iif = skb->dev->ifindex, | ||
1975 | .mark = skb->mark, | ||
1976 | }; | ||
1977 | int err; | ||
1978 | |||
1979 | err = ip6mr_fib_lookup(net, &fl, &mrt); | ||
1980 | if (err < 0) | ||
1981 | return err; | ||
1662 | 1982 | ||
1663 | read_lock(&mrt_lock); | 1983 | read_lock(&mrt_lock); |
1664 | cache = ip6mr_cache_find(net, | 1984 | cache = ip6mr_cache_find(mrt, |
1665 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); | 1985 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); |
1666 | 1986 | ||
1667 | /* | 1987 | /* |
@@ -1670,9 +1990,9 @@ int ip6_mr_input(struct sk_buff *skb) | |||
1670 | if (cache == NULL) { | 1990 | if (cache == NULL) { |
1671 | int vif; | 1991 | int vif; |
1672 | 1992 | ||
1673 | vif = ip6mr_find_vif(skb->dev); | 1993 | vif = ip6mr_find_vif(mrt, skb->dev); |
1674 | if (vif >= 0) { | 1994 | if (vif >= 0) { |
1675 | int err = ip6mr_cache_unresolved(net, vif, skb); | 1995 | int err = ip6mr_cache_unresolved(mrt, vif, skb); |
1676 | read_unlock(&mrt_lock); | 1996 | read_unlock(&mrt_lock); |
1677 | 1997 | ||
1678 | return err; | 1998 | return err; |
@@ -1682,7 +2002,7 @@ int ip6_mr_input(struct sk_buff *skb) | |||
1682 | return -ENODEV; | 2002 | return -ENODEV; |
1683 | } | 2003 | } |
1684 | 2004 | ||
1685 | ip6_mr_forward(skb, cache); | 2005 | ip6_mr_forward(net, mrt, skb, cache); |
1686 | 2006 | ||
1687 | read_unlock(&mrt_lock); | 2007 | read_unlock(&mrt_lock); |
1688 | 2008 | ||
@@ -1690,32 +2010,31 @@ int ip6_mr_input(struct sk_buff *skb) | |||
1690 | } | 2010 | } |
1691 | 2011 | ||
1692 | 2012 | ||
1693 | static int | 2013 | static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, |
1694 | ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) | 2014 | struct mfc6_cache *c, struct rtmsg *rtm) |
1695 | { | 2015 | { |
1696 | int ct; | 2016 | int ct; |
1697 | struct rtnexthop *nhp; | 2017 | struct rtnexthop *nhp; |
1698 | struct net *net = mfc6_net(c); | ||
1699 | u8 *b = skb_tail_pointer(skb); | 2018 | u8 *b = skb_tail_pointer(skb); |
1700 | struct rtattr *mp_head; | 2019 | struct rtattr *mp_head; |
1701 | 2020 | ||
1702 | /* If cache is unresolved, don't try to parse IIF and OIF */ | 2021 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
1703 | if (c->mf6c_parent > MAXMIFS) | 2022 | if (c->mf6c_parent >= MAXMIFS) |
1704 | return -ENOENT; | 2023 | return -ENOENT; |
1705 | 2024 | ||
1706 | if (MIF_EXISTS(net, c->mf6c_parent)) | 2025 | if (MIF_EXISTS(mrt, c->mf6c_parent)) |
1707 | RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex); | 2026 | RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex); |
1708 | 2027 | ||
1709 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); | 2028 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
1710 | 2029 | ||
1711 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { | 2030 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { |
1712 | if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { | 2031 | if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { |
1713 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) | 2032 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) |
1714 | goto rtattr_failure; | 2033 | goto rtattr_failure; |
1715 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 2034 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
1716 | nhp->rtnh_flags = 0; | 2035 | nhp->rtnh_flags = 0; |
1717 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; | 2036 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; |
1718 | nhp->rtnh_ifindex = net->ipv6.vif6_table[ct].dev->ifindex; | 2037 | nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex; |
1719 | nhp->rtnh_len = sizeof(*nhp); | 2038 | nhp->rtnh_len = sizeof(*nhp); |
1720 | } | 2039 | } |
1721 | } | 2040 | } |
@@ -1733,11 +2052,16 @@ int ip6mr_get_route(struct net *net, | |||
1733 | struct sk_buff *skb, struct rtmsg *rtm, int nowait) | 2052 | struct sk_buff *skb, struct rtmsg *rtm, int nowait) |
1734 | { | 2053 | { |
1735 | int err; | 2054 | int err; |
2055 | struct mr6_table *mrt; | ||
1736 | struct mfc6_cache *cache; | 2056 | struct mfc6_cache *cache; |
1737 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); | 2057 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); |
1738 | 2058 | ||
2059 | mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); | ||
2060 | if (mrt == NULL) | ||
2061 | return -ENOENT; | ||
2062 | |||
1739 | read_lock(&mrt_lock); | 2063 | read_lock(&mrt_lock); |
1740 | cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); | 2064 | cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); |
1741 | 2065 | ||
1742 | if (!cache) { | 2066 | if (!cache) { |
1743 | struct sk_buff *skb2; | 2067 | struct sk_buff *skb2; |
@@ -1751,7 +2075,7 @@ int ip6mr_get_route(struct net *net, | |||
1751 | } | 2075 | } |
1752 | 2076 | ||
1753 | dev = skb->dev; | 2077 | dev = skb->dev; |
1754 | if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) { | 2078 | if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) { |
1755 | read_unlock(&mrt_lock); | 2079 | read_unlock(&mrt_lock); |
1756 | return -ENODEV; | 2080 | return -ENODEV; |
1757 | } | 2081 | } |
@@ -1780,7 +2104,7 @@ int ip6mr_get_route(struct net *net, | |||
1780 | ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr); | 2104 | ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr); |
1781 | ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr); | 2105 | ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr); |
1782 | 2106 | ||
1783 | err = ip6mr_cache_unresolved(net, vif, skb2); | 2107 | err = ip6mr_cache_unresolved(mrt, vif, skb2); |
1784 | read_unlock(&mrt_lock); | 2108 | read_unlock(&mrt_lock); |
1785 | 2109 | ||
1786 | return err; | 2110 | return err; |
@@ -1789,8 +2113,88 @@ int ip6mr_get_route(struct net *net, | |||
1789 | if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) | 2113 | if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) |
1790 | cache->mfc_flags |= MFC_NOTIFY; | 2114 | cache->mfc_flags |= MFC_NOTIFY; |
1791 | 2115 | ||
1792 | err = ip6mr_fill_mroute(skb, cache, rtm); | 2116 | err = __ip6mr_fill_mroute(mrt, skb, cache, rtm); |
1793 | read_unlock(&mrt_lock); | 2117 | read_unlock(&mrt_lock); |
1794 | return err; | 2118 | return err; |
1795 | } | 2119 | } |
1796 | 2120 | ||
2121 | static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, | ||
2122 | u32 pid, u32 seq, struct mfc6_cache *c) | ||
2123 | { | ||
2124 | struct nlmsghdr *nlh; | ||
2125 | struct rtmsg *rtm; | ||
2126 | |||
2127 | nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); | ||
2128 | if (nlh == NULL) | ||
2129 | return -EMSGSIZE; | ||
2130 | |||
2131 | rtm = nlmsg_data(nlh); | ||
2132 | rtm->rtm_family = RTNL_FAMILY_IPMR; | ||
2133 | rtm->rtm_dst_len = 128; | ||
2134 | rtm->rtm_src_len = 128; | ||
2135 | rtm->rtm_tos = 0; | ||
2136 | rtm->rtm_table = mrt->id; | ||
2137 | NLA_PUT_U32(skb, RTA_TABLE, mrt->id); | ||
2138 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; | ||
2139 | rtm->rtm_protocol = RTPROT_UNSPEC; | ||
2140 | rtm->rtm_flags = 0; | ||
2141 | |||
2142 | NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin); | ||
2143 | NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp); | ||
2144 | |||
2145 | if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0) | ||
2146 | goto nla_put_failure; | ||
2147 | |||
2148 | return nlmsg_end(skb, nlh); | ||
2149 | |||
2150 | nla_put_failure: | ||
2151 | nlmsg_cancel(skb, nlh); | ||
2152 | return -EMSGSIZE; | ||
2153 | } | ||
2154 | |||
2155 | static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) | ||
2156 | { | ||
2157 | struct net *net = sock_net(skb->sk); | ||
2158 | struct mr6_table *mrt; | ||
2159 | struct mfc6_cache *mfc; | ||
2160 | unsigned int t = 0, s_t; | ||
2161 | unsigned int h = 0, s_h; | ||
2162 | unsigned int e = 0, s_e; | ||
2163 | |||
2164 | s_t = cb->args[0]; | ||
2165 | s_h = cb->args[1]; | ||
2166 | s_e = cb->args[2]; | ||
2167 | |||
2168 | read_lock(&mrt_lock); | ||
2169 | ip6mr_for_each_table(mrt, net) { | ||
2170 | if (t < s_t) | ||
2171 | goto next_table; | ||
2172 | if (t > s_t) | ||
2173 | s_h = 0; | ||
2174 | for (h = s_h; h < MFC6_LINES; h++) { | ||
2175 | list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) { | ||
2176 | if (e < s_e) | ||
2177 | goto next_entry; | ||
2178 | if (ip6mr_fill_mroute(mrt, skb, | ||
2179 | NETLINK_CB(cb->skb).pid, | ||
2180 | cb->nlh->nlmsg_seq, | ||
2181 | mfc) < 0) | ||
2182 | goto done; | ||
2183 | next_entry: | ||
2184 | e++; | ||
2185 | } | ||
2186 | e = s_e = 0; | ||
2187 | } | ||
2188 | s_h = 0; | ||
2189 | next_table: | ||
2190 | t++; | ||
2191 | } | ||
2192 | done: | ||
2193 | read_unlock(&mrt_lock); | ||
2194 | |||
2195 | cb->args[2] = e; | ||
2196 | cb->args[1] = h; | ||
2197 | cb->args[0] = t; | ||
2198 | |||
2199 | return skb->len; | ||
2200 | } | ||
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 006aee683a0f..ab1622d7d409 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) | |||
1356 | IPV6_TLV_PADN, 0 }; | 1356 | IPV6_TLV_PADN, 0 }; |
1357 | 1357 | ||
1358 | /* we assume size > sizeof(ra) here */ | 1358 | /* we assume size > sizeof(ra) here */ |
1359 | skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); | 1359 | size += LL_ALLOCATED_SPACE(dev); |
1360 | /* limit our allocations to order-0 page */ | ||
1361 | size = min_t(int, size, SKB_MAX_ORDER(0, 0)); | ||
1362 | skb = sock_alloc_send_skb(sk, size, 1, &err); | ||
1360 | 1363 | ||
1361 | if (!skb) | 1364 | if (!skb) |
1362 | return NULL; | 1365 | return NULL; |
@@ -1428,7 +1431,7 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1428 | 1431 | ||
1429 | payload_len = skb->len; | 1432 | payload_len = skb->len; |
1430 | 1433 | ||
1431 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, | 1434 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, |
1432 | dst_output); | 1435 | dst_output); |
1433 | out: | 1436 | out: |
1434 | if (!err) { | 1437 | if (!err) { |
@@ -1793,7 +1796,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1793 | goto err_out; | 1796 | goto err_out; |
1794 | 1797 | ||
1795 | skb_dst_set(skb, dst); | 1798 | skb_dst_set(skb, dst); |
1796 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, | 1799 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, |
1797 | dst_output); | 1800 | dst_output); |
1798 | out: | 1801 | out: |
1799 | if (!err) { | 1802 | if (!err) { |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index da0a4d2adc69..0abdc242ddb7 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -536,7 +536,7 @@ void ndisc_send_skb(struct sk_buff *skb, | |||
536 | idev = in6_dev_get(dst->dev); | 536 | idev = in6_dev_get(dst->dev); |
537 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); | 537 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); |
538 | 538 | ||
539 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, | 539 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, |
540 | dst_output); | 540 | dst_output); |
541 | if (!err) { | 541 | if (!err) { |
542 | ICMP6MSGOUT_INC_STATS(net, idev, type); | 542 | ICMP6MSGOUT_INC_STATS(net, idev, type); |
@@ -890,8 +890,6 @@ out: | |||
890 | in6_ifa_put(ifp); | 890 | in6_ifa_put(ifp); |
891 | else | 891 | else |
892 | in6_dev_put(idev); | 892 | in6_dev_put(idev); |
893 | |||
894 | return; | ||
895 | } | 893 | } |
896 | 894 | ||
897 | static void ndisc_recv_na(struct sk_buff *skb) | 895 | static void ndisc_recv_na(struct sk_buff *skb) |
@@ -1618,7 +1616,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1618 | skb_dst_set(buff, dst); | 1616 | skb_dst_set(buff, dst); |
1619 | idev = in6_dev_get(dst->dev); | 1617 | idev = in6_dev_get(dst->dev); |
1620 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); | 1618 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); |
1621 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, | 1619 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, |
1622 | dst_output); | 1620 | dst_output); |
1623 | if (!err) { | 1621 | if (!err) { |
1624 | ICMP6MSGOUT_INC_STATS(net, idev, NDISC_REDIRECT); | 1622 | ICMP6MSGOUT_INC_STATS(net, idev, NDISC_REDIRECT); |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index d5ed92b14346..a74951c039b6 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -25,20 +25,6 @@ int ip6_route_me_harder(struct sk_buff *skb) | |||
25 | }; | 25 | }; |
26 | 26 | ||
27 | dst = ip6_route_output(net, skb->sk, &fl); | 27 | dst = ip6_route_output(net, skb->sk, &fl); |
28 | |||
29 | #ifdef CONFIG_XFRM | ||
30 | if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && | ||
31 | xfrm_decode_session(skb, &fl, AF_INET6) == 0) { | ||
32 | struct dst_entry *dst2 = skb_dst(skb); | ||
33 | |||
34 | if (xfrm_lookup(net, &dst2, &fl, skb->sk, 0)) { | ||
35 | skb_dst_set(skb, NULL); | ||
36 | return -1; | ||
37 | } | ||
38 | skb_dst_set(skb, dst2); | ||
39 | } | ||
40 | #endif | ||
41 | |||
42 | if (dst->error) { | 28 | if (dst->error) { |
43 | IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); | 29 | IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); |
44 | LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); | 30 | LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); |
@@ -50,6 +36,17 @@ int ip6_route_me_harder(struct sk_buff *skb) | |||
50 | skb_dst_drop(skb); | 36 | skb_dst_drop(skb); |
51 | 37 | ||
52 | skb_dst_set(skb, dst); | 38 | skb_dst_set(skb, dst); |
39 | |||
40 | #ifdef CONFIG_XFRM | ||
41 | if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && | ||
42 | xfrm_decode_session(skb, &fl, AF_INET6) == 0) { | ||
43 | skb_dst_set(skb, NULL); | ||
44 | if (xfrm_lookup(net, &dst, &fl, skb->sk, 0)) | ||
45 | return -1; | ||
46 | skb_dst_set(skb, dst); | ||
47 | } | ||
48 | #endif | ||
49 | |||
53 | return 0; | 50 | return 0; |
54 | } | 51 | } |
55 | EXPORT_SYMBOL(ip6_route_me_harder); | 52 | EXPORT_SYMBOL(ip6_route_me_harder); |
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 6a68a74d14a3..8c201743d96d 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
@@ -162,8 +162,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) | |||
162 | break; | 162 | break; |
163 | 163 | ||
164 | case IPQ_COPY_PACKET: | 164 | case IPQ_COPY_PACKET: |
165 | if ((entry->skb->ip_summed == CHECKSUM_PARTIAL || | 165 | if (entry->skb->ip_summed == CHECKSUM_PARTIAL && |
166 | entry->skb->ip_summed == CHECKSUM_COMPLETE) && | ||
167 | (*errp = skb_checksum_help(entry->skb))) { | 166 | (*errp = skb_checksum_help(entry->skb))) { |
168 | read_unlock_bh(&queue_lock); | 167 | read_unlock_bh(&queue_lock); |
169 | return NULL; | 168 | return NULL; |
@@ -463,7 +462,6 @@ __ipq_rcv_skb(struct sk_buff *skb) | |||
463 | 462 | ||
464 | if (flags & NLM_F_ACK) | 463 | if (flags & NLM_F_ACK) |
465 | netlink_ack(skb, nlh, 0); | 464 | netlink_ack(skb, nlh, 0); |
466 | return; | ||
467 | } | 465 | } |
468 | 466 | ||
469 | static void | 467 | static void |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 9210e312edf1..9d2d68f0e605 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -40,24 +40,19 @@ MODULE_DESCRIPTION("IPv6 packet filter"); | |||
40 | /*#define DEBUG_IP_FIREWALL_USER*/ | 40 | /*#define DEBUG_IP_FIREWALL_USER*/ |
41 | 41 | ||
42 | #ifdef DEBUG_IP_FIREWALL | 42 | #ifdef DEBUG_IP_FIREWALL |
43 | #define dprintf(format, args...) printk(format , ## args) | 43 | #define dprintf(format, args...) pr_info(format , ## args) |
44 | #else | 44 | #else |
45 | #define dprintf(format, args...) | 45 | #define dprintf(format, args...) |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #ifdef DEBUG_IP_FIREWALL_USER | 48 | #ifdef DEBUG_IP_FIREWALL_USER |
49 | #define duprintf(format, args...) printk(format , ## args) | 49 | #define duprintf(format, args...) pr_info(format , ## args) |
50 | #else | 50 | #else |
51 | #define duprintf(format, args...) | 51 | #define duprintf(format, args...) |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | #ifdef CONFIG_NETFILTER_DEBUG | 54 | #ifdef CONFIG_NETFILTER_DEBUG |
55 | #define IP_NF_ASSERT(x) \ | 55 | #define IP_NF_ASSERT(x) WARN_ON(!(x)) |
56 | do { \ | ||
57 | if (!(x)) \ | ||
58 | printk("IP_NF_ASSERT: %s:%s:%u\n", \ | ||
59 | __func__, __FILE__, __LINE__); \ | ||
60 | } while(0) | ||
61 | #else | 56 | #else |
62 | #define IP_NF_ASSERT(x) | 57 | #define IP_NF_ASSERT(x) |
63 | #endif | 58 | #endif |
@@ -197,30 +192,14 @@ ip6_checkentry(const struct ip6t_ip6 *ipv6) | |||
197 | } | 192 | } |
198 | 193 | ||
199 | static unsigned int | 194 | static unsigned int |
200 | ip6t_error(struct sk_buff *skb, const struct xt_target_param *par) | 195 | ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) |
201 | { | 196 | { |
202 | if (net_ratelimit()) | 197 | if (net_ratelimit()) |
203 | printk("ip6_tables: error: `%s'\n", | 198 | pr_info("error: `%s'\n", (const char *)par->targinfo); |
204 | (const char *)par->targinfo); | ||
205 | 199 | ||
206 | return NF_DROP; | 200 | return NF_DROP; |
207 | } | 201 | } |
208 | 202 | ||
209 | /* Performance critical - called for every packet */ | ||
210 | static inline bool | ||
211 | do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb, | ||
212 | struct xt_match_param *par) | ||
213 | { | ||
214 | par->match = m->u.kernel.match; | ||
215 | par->matchinfo = m->data; | ||
216 | |||
217 | /* Stop iteration if it doesn't match */ | ||
218 | if (!m->u.kernel.match->match(skb, par)) | ||
219 | return true; | ||
220 | else | ||
221 | return false; | ||
222 | } | ||
223 | |||
224 | static inline struct ip6t_entry * | 203 | static inline struct ip6t_entry * |
225 | get_entry(const void *base, unsigned int offset) | 204 | get_entry(const void *base, unsigned int offset) |
226 | { | 205 | { |
@@ -352,18 +331,15 @@ ip6t_do_table(struct sk_buff *skb, | |||
352 | const struct net_device *out, | 331 | const struct net_device *out, |
353 | struct xt_table *table) | 332 | struct xt_table *table) |
354 | { | 333 | { |
355 | #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom | ||
356 | |||
357 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 334 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
358 | bool hotdrop = false; | ||
359 | /* Initializing verdict to NF_DROP keeps gcc happy. */ | 335 | /* Initializing verdict to NF_DROP keeps gcc happy. */ |
360 | unsigned int verdict = NF_DROP; | 336 | unsigned int verdict = NF_DROP; |
361 | const char *indev, *outdev; | 337 | const char *indev, *outdev; |
362 | const void *table_base; | 338 | const void *table_base; |
363 | struct ip6t_entry *e, *back; | 339 | struct ip6t_entry *e, **jumpstack; |
340 | unsigned int *stackptr, origptr, cpu; | ||
364 | const struct xt_table_info *private; | 341 | const struct xt_table_info *private; |
365 | struct xt_match_param mtpar; | 342 | struct xt_action_param acpar; |
366 | struct xt_target_param tgpar; | ||
367 | 343 | ||
368 | /* Initialization */ | 344 | /* Initialization */ |
369 | indev = in ? in->name : nulldevname; | 345 | indev = in ? in->name : nulldevname; |
@@ -374,39 +350,42 @@ ip6t_do_table(struct sk_buff *skb, | |||
374 | * things we don't know, ie. tcp syn flag or ports). If the | 350 | * things we don't know, ie. tcp syn flag or ports). If the |
375 | * rule is also a fragment-specific rule, non-fragments won't | 351 | * rule is also a fragment-specific rule, non-fragments won't |
376 | * match it. */ | 352 | * match it. */ |
377 | mtpar.hotdrop = &hotdrop; | 353 | acpar.hotdrop = false; |
378 | mtpar.in = tgpar.in = in; | 354 | acpar.in = in; |
379 | mtpar.out = tgpar.out = out; | 355 | acpar.out = out; |
380 | mtpar.family = tgpar.family = NFPROTO_IPV6; | 356 | acpar.family = NFPROTO_IPV6; |
381 | mtpar.hooknum = tgpar.hooknum = hook; | 357 | acpar.hooknum = hook; |
382 | 358 | ||
383 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 359 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
384 | 360 | ||
385 | xt_info_rdlock_bh(); | 361 | xt_info_rdlock_bh(); |
386 | private = table->private; | 362 | private = table->private; |
387 | table_base = private->entries[smp_processor_id()]; | 363 | cpu = smp_processor_id(); |
364 | table_base = private->entries[cpu]; | ||
365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; | ||
366 | stackptr = per_cpu_ptr(private->stackptr, cpu); | ||
367 | origptr = *stackptr; | ||
388 | 368 | ||
389 | e = get_entry(table_base, private->hook_entry[hook]); | 369 | e = get_entry(table_base, private->hook_entry[hook]); |
390 | 370 | ||
391 | /* For return from builtin chain */ | ||
392 | back = get_entry(table_base, private->underflow[hook]); | ||
393 | |||
394 | do { | 371 | do { |
395 | const struct ip6t_entry_target *t; | 372 | const struct ip6t_entry_target *t; |
396 | const struct xt_entry_match *ematch; | 373 | const struct xt_entry_match *ematch; |
397 | 374 | ||
398 | IP_NF_ASSERT(e); | 375 | IP_NF_ASSERT(e); |
399 | IP_NF_ASSERT(back); | ||
400 | if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, | 376 | if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, |
401 | &mtpar.thoff, &mtpar.fragoff, &hotdrop)) { | 377 | &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { |
402 | no_match: | 378 | no_match: |
403 | e = ip6t_next_entry(e); | 379 | e = ip6t_next_entry(e); |
404 | continue; | 380 | continue; |
405 | } | 381 | } |
406 | 382 | ||
407 | xt_ematch_foreach(ematch, e) | 383 | xt_ematch_foreach(ematch, e) { |
408 | if (do_match(ematch, skb, &mtpar) != 0) | 384 | acpar.match = ematch->u.kernel.match; |
385 | acpar.matchinfo = ematch->data; | ||
386 | if (!acpar.match->match(skb, &acpar)) | ||
409 | goto no_match; | 387 | goto no_match; |
388 | } | ||
410 | 389 | ||
411 | ADD_COUNTER(e->counters, | 390 | ADD_COUNTER(e->counters, |
412 | ntohs(ipv6_hdr(skb)->payload_len) + | 391 | ntohs(ipv6_hdr(skb)->payload_len) + |
@@ -433,62 +412,47 @@ ip6t_do_table(struct sk_buff *skb, | |||
433 | verdict = (unsigned)(-v) - 1; | 412 | verdict = (unsigned)(-v) - 1; |
434 | break; | 413 | break; |
435 | } | 414 | } |
436 | e = back; | 415 | if (*stackptr == 0) |
437 | back = get_entry(table_base, back->comefrom); | 416 | e = get_entry(table_base, |
417 | private->underflow[hook]); | ||
418 | else | ||
419 | e = ip6t_next_entry(jumpstack[--*stackptr]); | ||
438 | continue; | 420 | continue; |
439 | } | 421 | } |
440 | if (table_base + v != ip6t_next_entry(e) && | 422 | if (table_base + v != ip6t_next_entry(e) && |
441 | !(e->ipv6.flags & IP6T_F_GOTO)) { | 423 | !(e->ipv6.flags & IP6T_F_GOTO)) { |
442 | /* Save old back ptr in next entry */ | 424 | if (*stackptr >= private->stacksize) { |
443 | struct ip6t_entry *next = ip6t_next_entry(e); | 425 | verdict = NF_DROP; |
444 | next->comefrom = (void *)back - table_base; | 426 | break; |
445 | /* set back pointer to next entry */ | 427 | } |
446 | back = next; | 428 | jumpstack[(*stackptr)++] = e; |
447 | } | 429 | } |
448 | 430 | ||
449 | e = get_entry(table_base, v); | 431 | e = get_entry(table_base, v); |
450 | continue; | 432 | continue; |
451 | } | 433 | } |
452 | 434 | ||
453 | /* Targets which reenter must return | 435 | acpar.target = t->u.kernel.target; |
454 | abs. verdicts */ | 436 | acpar.targinfo = t->data; |
455 | tgpar.target = t->u.kernel.target; | ||
456 | tgpar.targinfo = t->data; | ||
457 | |||
458 | #ifdef CONFIG_NETFILTER_DEBUG | ||
459 | tb_comefrom = 0xeeeeeeec; | ||
460 | #endif | ||
461 | verdict = t->u.kernel.target->target(skb, &tgpar); | ||
462 | 437 | ||
463 | #ifdef CONFIG_NETFILTER_DEBUG | 438 | verdict = t->u.kernel.target->target(skb, &acpar); |
464 | if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) { | ||
465 | printk("Target %s reentered!\n", | ||
466 | t->u.kernel.target->name); | ||
467 | verdict = NF_DROP; | ||
468 | } | ||
469 | tb_comefrom = 0x57acc001; | ||
470 | #endif | ||
471 | if (verdict == IP6T_CONTINUE) | 439 | if (verdict == IP6T_CONTINUE) |
472 | e = ip6t_next_entry(e); | 440 | e = ip6t_next_entry(e); |
473 | else | 441 | else |
474 | /* Verdict */ | 442 | /* Verdict */ |
475 | break; | 443 | break; |
476 | } while (!hotdrop); | 444 | } while (!acpar.hotdrop); |
477 | 445 | ||
478 | #ifdef CONFIG_NETFILTER_DEBUG | ||
479 | tb_comefrom = NETFILTER_LINK_POISON; | ||
480 | #endif | ||
481 | xt_info_rdunlock_bh(); | 446 | xt_info_rdunlock_bh(); |
447 | *stackptr = origptr; | ||
482 | 448 | ||
483 | #ifdef DEBUG_ALLOW_ALL | 449 | #ifdef DEBUG_ALLOW_ALL |
484 | return NF_ACCEPT; | 450 | return NF_ACCEPT; |
485 | #else | 451 | #else |
486 | if (hotdrop) | 452 | if (acpar.hotdrop) |
487 | return NF_DROP; | 453 | return NF_DROP; |
488 | else return verdict; | 454 | else return verdict; |
489 | #endif | 455 | #endif |
490 | |||
491 | #undef tb_comefrom | ||
492 | } | 456 | } |
493 | 457 | ||
494 | /* Figures out from what hook each rule can be called: returns 0 if | 458 | /* Figures out from what hook each rule can be called: returns 0 if |
@@ -517,7 +481,7 @@ mark_source_chains(const struct xt_table_info *newinfo, | |||
517 | int visited = e->comefrom & (1 << hook); | 481 | int visited = e->comefrom & (1 << hook); |
518 | 482 | ||
519 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { | 483 | if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { |
520 | printk("iptables: loop hook %u pos %u %08X.\n", | 484 | pr_err("iptables: loop hook %u pos %u %08X.\n", |
521 | hook, pos, e->comefrom); | 485 | hook, pos, e->comefrom); |
522 | return 0; | 486 | return 0; |
523 | } | 487 | } |
@@ -661,12 +625,11 @@ find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par) | |||
661 | struct xt_match *match; | 625 | struct xt_match *match; |
662 | int ret; | 626 | int ret; |
663 | 627 | ||
664 | match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name, | 628 | match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, |
665 | m->u.user.revision), | 629 | m->u.user.revision); |
666 | "ip6t_%s", m->u.user.name); | 630 | if (IS_ERR(match)) { |
667 | if (IS_ERR(match) || !match) { | ||
668 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); | 631 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); |
669 | return match ? PTR_ERR(match) : -ENOENT; | 632 | return PTR_ERR(match); |
670 | } | 633 | } |
671 | m->u.kernel.match = match; | 634 | m->u.kernel.match = match; |
672 | 635 | ||
@@ -734,13 +697,11 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, | |||
734 | } | 697 | } |
735 | 698 | ||
736 | t = ip6t_get_target(e); | 699 | t = ip6t_get_target(e); |
737 | target = try_then_request_module(xt_find_target(AF_INET6, | 700 | target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, |
738 | t->u.user.name, | 701 | t->u.user.revision); |
739 | t->u.user.revision), | 702 | if (IS_ERR(target)) { |
740 | "ip6t_%s", t->u.user.name); | ||
741 | if (IS_ERR(target) || !target) { | ||
742 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); | 703 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); |
743 | ret = target ? PTR_ERR(target) : -ENOENT; | 704 | ret = PTR_ERR(target); |
744 | goto cleanup_matches; | 705 | goto cleanup_matches; |
745 | } | 706 | } |
746 | t->u.kernel.target = target; | 707 | t->u.kernel.target = target; |
@@ -873,6 +834,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
873 | if (ret != 0) | 834 | if (ret != 0) |
874 | return ret; | 835 | return ret; |
875 | ++i; | 836 | ++i; |
837 | if (strcmp(ip6t_get_target(iter)->u.user.name, | ||
838 | XT_ERROR_TARGET) == 0) | ||
839 | ++newinfo->stacksize; | ||
876 | } | 840 | } |
877 | 841 | ||
878 | if (i != repl->num_entries) { | 842 | if (i != repl->num_entries) { |
@@ -1509,13 +1473,12 @@ compat_find_calc_match(struct ip6t_entry_match *m, | |||
1509 | { | 1473 | { |
1510 | struct xt_match *match; | 1474 | struct xt_match *match; |
1511 | 1475 | ||
1512 | match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name, | 1476 | match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, |
1513 | m->u.user.revision), | 1477 | m->u.user.revision); |
1514 | "ip6t_%s", m->u.user.name); | 1478 | if (IS_ERR(match)) { |
1515 | if (IS_ERR(match) || !match) { | ||
1516 | duprintf("compat_check_calc_match: `%s' not found\n", | 1479 | duprintf("compat_check_calc_match: `%s' not found\n", |
1517 | m->u.user.name); | 1480 | m->u.user.name); |
1518 | return match ? PTR_ERR(match) : -ENOENT; | 1481 | return PTR_ERR(match); |
1519 | } | 1482 | } |
1520 | m->u.kernel.match = match; | 1483 | m->u.kernel.match = match; |
1521 | *size += xt_compat_match_offset(match); | 1484 | *size += xt_compat_match_offset(match); |
@@ -1582,14 +1545,12 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, | |||
1582 | } | 1545 | } |
1583 | 1546 | ||
1584 | t = compat_ip6t_get_target(e); | 1547 | t = compat_ip6t_get_target(e); |
1585 | target = try_then_request_module(xt_find_target(AF_INET6, | 1548 | target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, |
1586 | t->u.user.name, | 1549 | t->u.user.revision); |
1587 | t->u.user.revision), | 1550 | if (IS_ERR(target)) { |
1588 | "ip6t_%s", t->u.user.name); | ||
1589 | if (IS_ERR(target) || !target) { | ||
1590 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", | 1551 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", |
1591 | t->u.user.name); | 1552 | t->u.user.name); |
1592 | ret = target ? PTR_ERR(target) : -ENOENT; | 1553 | ret = PTR_ERR(target); |
1593 | goto release_matches; | 1554 | goto release_matches; |
1594 | } | 1555 | } |
1595 | t->u.kernel.target = target; | 1556 | t->u.kernel.target = target; |
@@ -2127,8 +2088,7 @@ struct xt_table *ip6t_register_table(struct net *net, | |||
2127 | { | 2088 | { |
2128 | int ret; | 2089 | int ret; |
2129 | struct xt_table_info *newinfo; | 2090 | struct xt_table_info *newinfo; |
2130 | struct xt_table_info bootstrap | 2091 | struct xt_table_info bootstrap = {0}; |
2131 | = { 0, 0, 0, { 0 }, { 0 }, { } }; | ||
2132 | void *loc_cpu_entry; | 2092 | void *loc_cpu_entry; |
2133 | struct xt_table *new_table; | 2093 | struct xt_table *new_table; |
2134 | 2094 | ||
@@ -2188,7 +2148,7 @@ icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, | |||
2188 | } | 2148 | } |
2189 | 2149 | ||
2190 | static bool | 2150 | static bool |
2191 | icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par) | 2151 | icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) |
2192 | { | 2152 | { |
2193 | const struct icmp6hdr *ic; | 2153 | const struct icmp6hdr *ic; |
2194 | struct icmp6hdr _icmph; | 2154 | struct icmp6hdr _icmph; |
@@ -2204,7 +2164,7 @@ icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par) | |||
2204 | * can't. Hence, no choice but to drop. | 2164 | * can't. Hence, no choice but to drop. |
2205 | */ | 2165 | */ |
2206 | duprintf("Dropping evil ICMP tinygram.\n"); | 2166 | duprintf("Dropping evil ICMP tinygram.\n"); |
2207 | *par->hotdrop = true; | 2167 | par->hotdrop = true; |
2208 | return false; | 2168 | return false; |
2209 | } | 2169 | } |
2210 | 2170 | ||
@@ -2216,31 +2176,32 @@ icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par) | |||
2216 | } | 2176 | } |
2217 | 2177 | ||
2218 | /* Called when user tries to insert an entry of this type. */ | 2178 | /* Called when user tries to insert an entry of this type. */ |
2219 | static bool icmp6_checkentry(const struct xt_mtchk_param *par) | 2179 | static int icmp6_checkentry(const struct xt_mtchk_param *par) |
2220 | { | 2180 | { |
2221 | const struct ip6t_icmp *icmpinfo = par->matchinfo; | 2181 | const struct ip6t_icmp *icmpinfo = par->matchinfo; |
2222 | 2182 | ||
2223 | /* Must specify no unknown invflags */ | 2183 | /* Must specify no unknown invflags */ |
2224 | return !(icmpinfo->invflags & ~IP6T_ICMP_INV); | 2184 | return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0; |
2225 | } | 2185 | } |
2226 | 2186 | ||
2227 | /* The built-in targets: standard (NULL) and error. */ | 2187 | /* The built-in targets: standard (NULL) and error. */ |
2228 | static struct xt_target ip6t_standard_target __read_mostly = { | 2188 | static struct xt_target ip6t_builtin_tg[] __read_mostly = { |
2229 | .name = IP6T_STANDARD_TARGET, | 2189 | { |
2230 | .targetsize = sizeof(int), | 2190 | .name = IP6T_STANDARD_TARGET, |
2231 | .family = NFPROTO_IPV6, | 2191 | .targetsize = sizeof(int), |
2192 | .family = NFPROTO_IPV6, | ||
2232 | #ifdef CONFIG_COMPAT | 2193 | #ifdef CONFIG_COMPAT |
2233 | .compatsize = sizeof(compat_int_t), | 2194 | .compatsize = sizeof(compat_int_t), |
2234 | .compat_from_user = compat_standard_from_user, | 2195 | .compat_from_user = compat_standard_from_user, |
2235 | .compat_to_user = compat_standard_to_user, | 2196 | .compat_to_user = compat_standard_to_user, |
2236 | #endif | 2197 | #endif |
2237 | }; | 2198 | }, |
2238 | 2199 | { | |
2239 | static struct xt_target ip6t_error_target __read_mostly = { | 2200 | .name = IP6T_ERROR_TARGET, |
2240 | .name = IP6T_ERROR_TARGET, | 2201 | .target = ip6t_error, |
2241 | .target = ip6t_error, | 2202 | .targetsize = IP6T_FUNCTION_MAXNAMELEN, |
2242 | .targetsize = IP6T_FUNCTION_MAXNAMELEN, | 2203 | .family = NFPROTO_IPV6, |
2243 | .family = NFPROTO_IPV6, | 2204 | }, |
2244 | }; | 2205 | }; |
2245 | 2206 | ||
2246 | static struct nf_sockopt_ops ip6t_sockopts = { | 2207 | static struct nf_sockopt_ops ip6t_sockopts = { |
@@ -2260,13 +2221,15 @@ static struct nf_sockopt_ops ip6t_sockopts = { | |||
2260 | .owner = THIS_MODULE, | 2221 | .owner = THIS_MODULE, |
2261 | }; | 2222 | }; |
2262 | 2223 | ||
2263 | static struct xt_match icmp6_matchstruct __read_mostly = { | 2224 | static struct xt_match ip6t_builtin_mt[] __read_mostly = { |
2264 | .name = "icmp6", | 2225 | { |
2265 | .match = icmp6_match, | 2226 | .name = "icmp6", |
2266 | .matchsize = sizeof(struct ip6t_icmp), | 2227 | .match = icmp6_match, |
2267 | .checkentry = icmp6_checkentry, | 2228 | .matchsize = sizeof(struct ip6t_icmp), |
2268 | .proto = IPPROTO_ICMPV6, | 2229 | .checkentry = icmp6_checkentry, |
2269 | .family = NFPROTO_IPV6, | 2230 | .proto = IPPROTO_ICMPV6, |
2231 | .family = NFPROTO_IPV6, | ||
2232 | }, | ||
2270 | }; | 2233 | }; |
2271 | 2234 | ||
2272 | static int __net_init ip6_tables_net_init(struct net *net) | 2235 | static int __net_init ip6_tables_net_init(struct net *net) |
@@ -2293,13 +2256,10 @@ static int __init ip6_tables_init(void) | |||
2293 | goto err1; | 2256 | goto err1; |
2294 | 2257 | ||
2295 | /* Noone else will be downing sem now, so we won't sleep */ | 2258 | /* Noone else will be downing sem now, so we won't sleep */ |
2296 | ret = xt_register_target(&ip6t_standard_target); | 2259 | ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); |
2297 | if (ret < 0) | 2260 | if (ret < 0) |
2298 | goto err2; | 2261 | goto err2; |
2299 | ret = xt_register_target(&ip6t_error_target); | 2262 | ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); |
2300 | if (ret < 0) | ||
2301 | goto err3; | ||
2302 | ret = xt_register_match(&icmp6_matchstruct); | ||
2303 | if (ret < 0) | 2263 | if (ret < 0) |
2304 | goto err4; | 2264 | goto err4; |
2305 | 2265 | ||
@@ -2308,15 +2268,13 @@ static int __init ip6_tables_init(void) | |||
2308 | if (ret < 0) | 2268 | if (ret < 0) |
2309 | goto err5; | 2269 | goto err5; |
2310 | 2270 | ||
2311 | printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n"); | 2271 | pr_info("(C) 2000-2006 Netfilter Core Team\n"); |
2312 | return 0; | 2272 | return 0; |
2313 | 2273 | ||
2314 | err5: | 2274 | err5: |
2315 | xt_unregister_match(&icmp6_matchstruct); | 2275 | xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); |
2316 | err4: | 2276 | err4: |
2317 | xt_unregister_target(&ip6t_error_target); | 2277 | xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); |
2318 | err3: | ||
2319 | xt_unregister_target(&ip6t_standard_target); | ||
2320 | err2: | 2278 | err2: |
2321 | unregister_pernet_subsys(&ip6_tables_net_ops); | 2279 | unregister_pernet_subsys(&ip6_tables_net_ops); |
2322 | err1: | 2280 | err1: |
@@ -2327,10 +2285,8 @@ static void __exit ip6_tables_fini(void) | |||
2327 | { | 2285 | { |
2328 | nf_unregister_sockopt(&ip6t_sockopts); | 2286 | nf_unregister_sockopt(&ip6t_sockopts); |
2329 | 2287 | ||
2330 | xt_unregister_match(&icmp6_matchstruct); | 2288 | xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); |
2331 | xt_unregister_target(&ip6t_error_target); | 2289 | xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); |
2332 | xt_unregister_target(&ip6t_standard_target); | ||
2333 | |||
2334 | unregister_pernet_subsys(&ip6_tables_net_ops); | 2290 | unregister_pernet_subsys(&ip6_tables_net_ops); |
2335 | } | 2291 | } |
2336 | 2292 | ||
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index b285fdf19050..af4ee11f2066 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c | |||
@@ -9,9 +9,8 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
16 | #include <linux/if_arp.h> | 15 | #include <linux/if_arp.h> |
17 | #include <linux/ip.h> | 16 | #include <linux/ip.h> |
@@ -378,7 +377,7 @@ static struct nf_loginfo default_loginfo = { | |||
378 | .type = NF_LOG_TYPE_LOG, | 377 | .type = NF_LOG_TYPE_LOG, |
379 | .u = { | 378 | .u = { |
380 | .log = { | 379 | .log = { |
381 | .level = 0, | 380 | .level = 5, |
382 | .logflags = NF_LOG_MASK, | 381 | .logflags = NF_LOG_MASK, |
383 | }, | 382 | }, |
384 | }, | 383 | }, |
@@ -437,7 +436,7 @@ ip6t_log_packet(u_int8_t pf, | |||
437 | } | 436 | } |
438 | 437 | ||
439 | static unsigned int | 438 | static unsigned int |
440 | log_tg6(struct sk_buff *skb, const struct xt_target_param *par) | 439 | log_tg6(struct sk_buff *skb, const struct xt_action_param *par) |
441 | { | 440 | { |
442 | const struct ip6t_log_info *loginfo = par->targinfo; | 441 | const struct ip6t_log_info *loginfo = par->targinfo; |
443 | struct nf_loginfo li; | 442 | struct nf_loginfo li; |
@@ -452,20 +451,19 @@ log_tg6(struct sk_buff *skb, const struct xt_target_param *par) | |||
452 | } | 451 | } |
453 | 452 | ||
454 | 453 | ||
455 | static bool log_tg6_check(const struct xt_tgchk_param *par) | 454 | static int log_tg6_check(const struct xt_tgchk_param *par) |
456 | { | 455 | { |
457 | const struct ip6t_log_info *loginfo = par->targinfo; | 456 | const struct ip6t_log_info *loginfo = par->targinfo; |
458 | 457 | ||
459 | if (loginfo->level >= 8) { | 458 | if (loginfo->level >= 8) { |
460 | pr_debug("LOG: level %u >= 8\n", loginfo->level); | 459 | pr_debug("level %u >= 8\n", loginfo->level); |
461 | return false; | 460 | return -EINVAL; |
462 | } | 461 | } |
463 | if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { | 462 | if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { |
464 | pr_debug("LOG: prefix term %i\n", | 463 | pr_debug("prefix not null-terminated\n"); |
465 | loginfo->prefix[sizeof(loginfo->prefix)-1]); | 464 | return -EINVAL; |
466 | return false; | ||
467 | } | 465 | } |
468 | return true; | 466 | return 0; |
469 | } | 467 | } |
470 | 468 | ||
471 | static struct xt_target log_tg6_reg __read_mostly = { | 469 | static struct xt_target log_tg6_reg __read_mostly = { |
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 39b50c3768e8..47d227713758 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -14,6 +14,7 @@ | |||
14 | * as published by the Free Software Foundation; either version | 14 | * as published by the Free Software Foundation; either version |
15 | * 2 of the License, or (at your option) any later version. | 15 | * 2 of the License, or (at your option) any later version. |
16 | */ | 16 | */ |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
17 | 18 | ||
18 | #include <linux/gfp.h> | 19 | #include <linux/gfp.h> |
19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
@@ -50,7 +51,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
50 | 51 | ||
51 | if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || | 52 | if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || |
52 | (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { | 53 | (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { |
53 | pr_debug("ip6t_REJECT: addr is not unicast.\n"); | 54 | pr_debug("addr is not unicast.\n"); |
54 | return; | 55 | return; |
55 | } | 56 | } |
56 | 57 | ||
@@ -58,7 +59,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
58 | tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto); | 59 | tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto); |
59 | 60 | ||
60 | if ((tcphoff < 0) || (tcphoff > oldskb->len)) { | 61 | if ((tcphoff < 0) || (tcphoff > oldskb->len)) { |
61 | pr_debug("ip6t_REJECT: Can't get TCP header.\n"); | 62 | pr_debug("Cannot get TCP header.\n"); |
62 | return; | 63 | return; |
63 | } | 64 | } |
64 | 65 | ||
@@ -66,7 +67,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
66 | 67 | ||
67 | /* IP header checks: fragment, too short. */ | 68 | /* IP header checks: fragment, too short. */ |
68 | if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { | 69 | if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { |
69 | pr_debug("ip6t_REJECT: proto(%d) != IPPROTO_TCP, " | 70 | pr_debug("proto(%d) != IPPROTO_TCP, " |
70 | "or too short. otcplen = %d\n", | 71 | "or too short. otcplen = %d\n", |
71 | proto, otcplen); | 72 | proto, otcplen); |
72 | return; | 73 | return; |
@@ -77,14 +78,14 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
77 | 78 | ||
78 | /* No RST for RST. */ | 79 | /* No RST for RST. */ |
79 | if (otcph.rst) { | 80 | if (otcph.rst) { |
80 | pr_debug("ip6t_REJECT: RST is set\n"); | 81 | pr_debug("RST is set\n"); |
81 | return; | 82 | return; |
82 | } | 83 | } |
83 | 84 | ||
84 | /* Check checksum. */ | 85 | /* Check checksum. */ |
85 | if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP, | 86 | if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP, |
86 | skb_checksum(oldskb, tcphoff, otcplen, 0))) { | 87 | skb_checksum(oldskb, tcphoff, otcplen, 0))) { |
87 | pr_debug("ip6t_REJECT: TCP checksum is invalid\n"); | 88 | pr_debug("TCP checksum is invalid\n"); |
88 | return; | 89 | return; |
89 | } | 90 | } |
90 | 91 | ||
@@ -108,7 +109,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
108 | 109 | ||
109 | if (!nskb) { | 110 | if (!nskb) { |
110 | if (net_ratelimit()) | 111 | if (net_ratelimit()) |
111 | printk("ip6t_REJECT: Can't alloc skb\n"); | 112 | pr_debug("cannot alloc skb\n"); |
112 | dst_release(dst); | 113 | dst_release(dst); |
113 | return; | 114 | return; |
114 | } | 115 | } |
@@ -174,15 +175,12 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code, | |||
174 | } | 175 | } |
175 | 176 | ||
176 | static unsigned int | 177 | static unsigned int |
177 | reject_tg6(struct sk_buff *skb, const struct xt_target_param *par) | 178 | reject_tg6(struct sk_buff *skb, const struct xt_action_param *par) |
178 | { | 179 | { |
179 | const struct ip6t_reject_info *reject = par->targinfo; | 180 | const struct ip6t_reject_info *reject = par->targinfo; |
180 | struct net *net = dev_net((par->in != NULL) ? par->in : par->out); | 181 | struct net *net = dev_net((par->in != NULL) ? par->in : par->out); |
181 | 182 | ||
182 | pr_debug("%s: medium point\n", __func__); | 183 | pr_debug("%s: medium point\n", __func__); |
183 | /* WARNING: This code causes reentry within ip6tables. | ||
184 | This means that the ip6tables jump stack is now crap. We | ||
185 | must return an absolute verdict. --RR */ | ||
186 | switch (reject->with) { | 184 | switch (reject->with) { |
187 | case IP6T_ICMP6_NO_ROUTE: | 185 | case IP6T_ICMP6_NO_ROUTE: |
188 | send_unreach(net, skb, ICMPV6_NOROUTE, par->hooknum); | 186 | send_unreach(net, skb, ICMPV6_NOROUTE, par->hooknum); |
@@ -207,30 +205,30 @@ reject_tg6(struct sk_buff *skb, const struct xt_target_param *par) | |||
207 | break; | 205 | break; |
208 | default: | 206 | default: |
209 | if (net_ratelimit()) | 207 | if (net_ratelimit()) |
210 | printk(KERN_WARNING "ip6t_REJECT: case %u not handled yet\n", reject->with); | 208 | pr_info("case %u not handled yet\n", reject->with); |
211 | break; | 209 | break; |
212 | } | 210 | } |
213 | 211 | ||
214 | return NF_DROP; | 212 | return NF_DROP; |
215 | } | 213 | } |
216 | 214 | ||
217 | static bool reject_tg6_check(const struct xt_tgchk_param *par) | 215 | static int reject_tg6_check(const struct xt_tgchk_param *par) |
218 | { | 216 | { |
219 | const struct ip6t_reject_info *rejinfo = par->targinfo; | 217 | const struct ip6t_reject_info *rejinfo = par->targinfo; |
220 | const struct ip6t_entry *e = par->entryinfo; | 218 | const struct ip6t_entry *e = par->entryinfo; |
221 | 219 | ||
222 | if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { | 220 | if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { |
223 | printk("ip6t_REJECT: ECHOREPLY is not supported.\n"); | 221 | pr_info("ECHOREPLY is not supported.\n"); |
224 | return false; | 222 | return -EINVAL; |
225 | } else if (rejinfo->with == IP6T_TCP_RESET) { | 223 | } else if (rejinfo->with == IP6T_TCP_RESET) { |
226 | /* Must specify that it's a TCP packet */ | 224 | /* Must specify that it's a TCP packet */ |
227 | if (e->ipv6.proto != IPPROTO_TCP || | 225 | if (e->ipv6.proto != IPPROTO_TCP || |
228 | (e->ipv6.invflags & XT_INV_PROTO)) { | 226 | (e->ipv6.invflags & XT_INV_PROTO)) { |
229 | printk("ip6t_REJECT: TCP_RESET illegal for non-tcp\n"); | 227 | pr_info("TCP_RESET illegal for non-tcp\n"); |
230 | return false; | 228 | return -EINVAL; |
231 | } | 229 | } |
232 | } | 230 | } |
233 | return true; | 231 | return 0; |
234 | } | 232 | } |
235 | 233 | ||
236 | static struct xt_target reject_tg6_reg __read_mostly = { | 234 | static struct xt_target reject_tg6_reg __read_mostly = { |
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c index ac0b7c629d78..89cccc5a9c92 100644 --- a/net/ipv6/netfilter/ip6t_ah.c +++ b/net/ipv6/netfilter/ip6t_ah.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
12 | #include <linux/ip.h> | 12 | #include <linux/ip.h> |
@@ -29,14 +29,14 @@ spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert) | |||
29 | { | 29 | { |
30 | bool r; | 30 | bool r; |
31 | 31 | ||
32 | pr_debug("ah spi_match:%c 0x%x <= 0x%x <= 0x%x", | 32 | pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n", |
33 | invert ? '!' : ' ', min, spi, max); | 33 | invert ? '!' : ' ', min, spi, max); |
34 | r = (spi >= min && spi <= max) ^ invert; | 34 | r = (spi >= min && spi <= max) ^ invert; |
35 | pr_debug(" result %s\n", r ? "PASS" : "FAILED"); | 35 | pr_debug(" result %s\n", r ? "PASS" : "FAILED"); |
36 | return r; | 36 | return r; |
37 | } | 37 | } |
38 | 38 | ||
39 | static bool ah_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 39 | static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
40 | { | 40 | { |
41 | struct ip_auth_hdr _ah; | 41 | struct ip_auth_hdr _ah; |
42 | const struct ip_auth_hdr *ah; | 42 | const struct ip_auth_hdr *ah; |
@@ -48,13 +48,13 @@ static bool ah_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
48 | err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL); | 48 | err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL); |
49 | if (err < 0) { | 49 | if (err < 0) { |
50 | if (err != -ENOENT) | 50 | if (err != -ENOENT) |
51 | *par->hotdrop = true; | 51 | par->hotdrop = true; |
52 | return false; | 52 | return false; |
53 | } | 53 | } |
54 | 54 | ||
55 | ah = skb_header_pointer(skb, ptr, sizeof(_ah), &_ah); | 55 | ah = skb_header_pointer(skb, ptr, sizeof(_ah), &_ah); |
56 | if (ah == NULL) { | 56 | if (ah == NULL) { |
57 | *par->hotdrop = true; | 57 | par->hotdrop = true; |
58 | return false; | 58 | return false; |
59 | } | 59 | } |
60 | 60 | ||
@@ -87,15 +87,15 @@ static bool ah_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
87 | !(ahinfo->hdrres && ah->reserved); | 87 | !(ahinfo->hdrres && ah->reserved); |
88 | } | 88 | } |
89 | 89 | ||
90 | static bool ah_mt6_check(const struct xt_mtchk_param *par) | 90 | static int ah_mt6_check(const struct xt_mtchk_param *par) |
91 | { | 91 | { |
92 | const struct ip6t_ah *ahinfo = par->matchinfo; | 92 | const struct ip6t_ah *ahinfo = par->matchinfo; |
93 | 93 | ||
94 | if (ahinfo->invflags & ~IP6T_AH_INV_MASK) { | 94 | if (ahinfo->invflags & ~IP6T_AH_INV_MASK) { |
95 | pr_debug("ip6t_ah: unknown flags %X\n", ahinfo->invflags); | 95 | pr_debug("unknown flags %X\n", ahinfo->invflags); |
96 | return false; | 96 | return -EINVAL; |
97 | } | 97 | } |
98 | return true; | 98 | return 0; |
99 | } | 99 | } |
100 | 100 | ||
101 | static struct xt_match ah_mt6_reg __read_mostly = { | 101 | static struct xt_match ah_mt6_reg __read_mostly = { |
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c index ca287f6d2bce..aab0706908c5 100644 --- a/net/ipv6/netfilter/ip6t_eui64.c +++ b/net/ipv6/netfilter/ip6t_eui64.c | |||
@@ -20,14 +20,14 @@ MODULE_LICENSE("GPL"); | |||
20 | MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); | 20 | MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); |
21 | 21 | ||
22 | static bool | 22 | static bool |
23 | eui64_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 23 | eui64_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
24 | { | 24 | { |
25 | unsigned char eui64[8]; | 25 | unsigned char eui64[8]; |
26 | 26 | ||
27 | if (!(skb_mac_header(skb) >= skb->head && | 27 | if (!(skb_mac_header(skb) >= skb->head && |
28 | skb_mac_header(skb) + ETH_HLEN <= skb->data) && | 28 | skb_mac_header(skb) + ETH_HLEN <= skb->data) && |
29 | par->fragoff != 0) { | 29 | par->fragoff != 0) { |
30 | *par->hotdrop = true; | 30 | par->hotdrop = true; |
31 | return false; | 31 | return false; |
32 | } | 32 | } |
33 | 33 | ||
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c index 7b91c2598ed5..eda898fda6ca 100644 --- a/net/ipv6/netfilter/ip6t_frag.c +++ b/net/ipv6/netfilter/ip6t_frag.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
12 | #include <linux/ipv6.h> | 12 | #include <linux/ipv6.h> |
@@ -27,7 +27,7 @@ static inline bool | |||
27 | id_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert) | 27 | id_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert) |
28 | { | 28 | { |
29 | bool r; | 29 | bool r; |
30 | pr_debug("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ', | 30 | pr_debug("id_match:%c 0x%x <= 0x%x <= 0x%x\n", invert ? '!' : ' ', |
31 | min, id, max); | 31 | min, id, max); |
32 | r = (id >= min && id <= max) ^ invert; | 32 | r = (id >= min && id <= max) ^ invert; |
33 | pr_debug(" result %s\n", r ? "PASS" : "FAILED"); | 33 | pr_debug(" result %s\n", r ? "PASS" : "FAILED"); |
@@ -35,7 +35,7 @@ id_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert) | |||
35 | } | 35 | } |
36 | 36 | ||
37 | static bool | 37 | static bool |
38 | frag_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 38 | frag_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
39 | { | 39 | { |
40 | struct frag_hdr _frag; | 40 | struct frag_hdr _frag; |
41 | const struct frag_hdr *fh; | 41 | const struct frag_hdr *fh; |
@@ -46,13 +46,13 @@ frag_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
46 | err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL); | 46 | err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL); |
47 | if (err < 0) { | 47 | if (err < 0) { |
48 | if (err != -ENOENT) | 48 | if (err != -ENOENT) |
49 | *par->hotdrop = true; | 49 | par->hotdrop = true; |
50 | return false; | 50 | return false; |
51 | } | 51 | } |
52 | 52 | ||
53 | fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag); | 53 | fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag); |
54 | if (fh == NULL) { | 54 | if (fh == NULL) { |
55 | *par->hotdrop = true; | 55 | par->hotdrop = true; |
56 | return false; | 56 | return false; |
57 | } | 57 | } |
58 | 58 | ||
@@ -102,15 +102,15 @@ frag_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
102 | (ntohs(fh->frag_off) & IP6_MF)); | 102 | (ntohs(fh->frag_off) & IP6_MF)); |
103 | } | 103 | } |
104 | 104 | ||
105 | static bool frag_mt6_check(const struct xt_mtchk_param *par) | 105 | static int frag_mt6_check(const struct xt_mtchk_param *par) |
106 | { | 106 | { |
107 | const struct ip6t_frag *fraginfo = par->matchinfo; | 107 | const struct ip6t_frag *fraginfo = par->matchinfo; |
108 | 108 | ||
109 | if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) { | 109 | if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) { |
110 | pr_debug("ip6t_frag: unknown flags %X\n", fraginfo->invflags); | 110 | pr_debug("unknown flags %X\n", fraginfo->invflags); |
111 | return false; | 111 | return -EINVAL; |
112 | } | 112 | } |
113 | return true; | 113 | return 0; |
114 | } | 114 | } |
115 | 115 | ||
116 | static struct xt_match frag_mt6_reg __read_mostly = { | 116 | static struct xt_match frag_mt6_reg __read_mostly = { |
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index e60677519e40..59df051eaef6 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
12 | #include <linux/ipv6.h> | 12 | #include <linux/ipv6.h> |
@@ -41,8 +41,10 @@ MODULE_ALIAS("ip6t_dst"); | |||
41 | * 5 -> RTALERT 2 x x | 41 | * 5 -> RTALERT 2 x x |
42 | */ | 42 | */ |
43 | 43 | ||
44 | static struct xt_match hbh_mt6_reg[] __read_mostly; | ||
45 | |||
44 | static bool | 46 | static bool |
45 | hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 47 | hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
46 | { | 48 | { |
47 | struct ipv6_opt_hdr _optsh; | 49 | struct ipv6_opt_hdr _optsh; |
48 | const struct ipv6_opt_hdr *oh; | 50 | const struct ipv6_opt_hdr *oh; |
@@ -58,16 +60,18 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
58 | unsigned int optlen; | 60 | unsigned int optlen; |
59 | int err; | 61 | int err; |
60 | 62 | ||
61 | err = ipv6_find_hdr(skb, &ptr, par->match->data, NULL); | 63 | err = ipv6_find_hdr(skb, &ptr, |
64 | (par->match == &hbh_mt6_reg[0]) ? | ||
65 | NEXTHDR_HOP : NEXTHDR_DEST, NULL); | ||
62 | if (err < 0) { | 66 | if (err < 0) { |
63 | if (err != -ENOENT) | 67 | if (err != -ENOENT) |
64 | *par->hotdrop = true; | 68 | par->hotdrop = true; |
65 | return false; | 69 | return false; |
66 | } | 70 | } |
67 | 71 | ||
68 | oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); | 72 | oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); |
69 | if (oh == NULL) { | 73 | if (oh == NULL) { |
70 | *par->hotdrop = true; | 74 | par->hotdrop = true; |
71 | return false; | 75 | return false; |
72 | } | 76 | } |
73 | 77 | ||
@@ -160,32 +164,32 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
160 | return false; | 164 | return false; |
161 | } | 165 | } |
162 | 166 | ||
163 | static bool hbh_mt6_check(const struct xt_mtchk_param *par) | 167 | static int hbh_mt6_check(const struct xt_mtchk_param *par) |
164 | { | 168 | { |
165 | const struct ip6t_opts *optsinfo = par->matchinfo; | 169 | const struct ip6t_opts *optsinfo = par->matchinfo; |
166 | 170 | ||
167 | if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { | 171 | if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { |
168 | pr_debug("ip6t_opts: unknown flags %X\n", optsinfo->invflags); | 172 | pr_debug("unknown flags %X\n", optsinfo->invflags); |
169 | return false; | 173 | return -EINVAL; |
170 | } | 174 | } |
171 | 175 | ||
172 | if (optsinfo->flags & IP6T_OPTS_NSTRICT) { | 176 | if (optsinfo->flags & IP6T_OPTS_NSTRICT) { |
173 | pr_debug("ip6t_opts: Not strict - not implemented"); | 177 | pr_debug("Not strict - not implemented"); |
174 | return false; | 178 | return -EINVAL; |
175 | } | 179 | } |
176 | 180 | ||
177 | return true; | 181 | return 0; |
178 | } | 182 | } |
179 | 183 | ||
180 | static struct xt_match hbh_mt6_reg[] __read_mostly = { | 184 | static struct xt_match hbh_mt6_reg[] __read_mostly = { |
181 | { | 185 | { |
186 | /* Note, hbh_mt6 relies on the order of hbh_mt6_reg */ | ||
182 | .name = "hbh", | 187 | .name = "hbh", |
183 | .family = NFPROTO_IPV6, | 188 | .family = NFPROTO_IPV6, |
184 | .match = hbh_mt6, | 189 | .match = hbh_mt6, |
185 | .matchsize = sizeof(struct ip6t_opts), | 190 | .matchsize = sizeof(struct ip6t_opts), |
186 | .checkentry = hbh_mt6_check, | 191 | .checkentry = hbh_mt6_check, |
187 | .me = THIS_MODULE, | 192 | .me = THIS_MODULE, |
188 | .data = NEXTHDR_HOP, | ||
189 | }, | 193 | }, |
190 | { | 194 | { |
191 | .name = "dst", | 195 | .name = "dst", |
@@ -194,7 +198,6 @@ static struct xt_match hbh_mt6_reg[] __read_mostly = { | |||
194 | .matchsize = sizeof(struct ip6t_opts), | 198 | .matchsize = sizeof(struct ip6t_opts), |
195 | .checkentry = hbh_mt6_check, | 199 | .checkentry = hbh_mt6_check, |
196 | .me = THIS_MODULE, | 200 | .me = THIS_MODULE, |
197 | .data = NEXTHDR_DEST, | ||
198 | }, | 201 | }, |
199 | }; | 202 | }; |
200 | 203 | ||
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c index 91490ad9302c..54bd9790603f 100644 --- a/net/ipv6/netfilter/ip6t_ipv6header.c +++ b/net/ipv6/netfilter/ip6t_ipv6header.c | |||
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("Xtables: IPv6 header types match"); | |||
27 | MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); | 27 | MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); |
28 | 28 | ||
29 | static bool | 29 | static bool |
30 | ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 30 | ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
31 | { | 31 | { |
32 | const struct ip6t_ipv6header_info *info = par->matchinfo; | 32 | const struct ip6t_ipv6header_info *info = par->matchinfo; |
33 | unsigned int temp; | 33 | unsigned int temp; |
@@ -118,16 +118,16 @@ ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
118 | } | 118 | } |
119 | } | 119 | } |
120 | 120 | ||
121 | static bool ipv6header_mt6_check(const struct xt_mtchk_param *par) | 121 | static int ipv6header_mt6_check(const struct xt_mtchk_param *par) |
122 | { | 122 | { |
123 | const struct ip6t_ipv6header_info *info = par->matchinfo; | 123 | const struct ip6t_ipv6header_info *info = par->matchinfo; |
124 | 124 | ||
125 | /* invflags is 0 or 0xff in hard mode */ | 125 | /* invflags is 0 or 0xff in hard mode */ |
126 | if ((!info->modeflag) && info->invflags != 0x00 && | 126 | if ((!info->modeflag) && info->invflags != 0x00 && |
127 | info->invflags != 0xFF) | 127 | info->invflags != 0xFF) |
128 | return false; | 128 | return -EINVAL; |
129 | 129 | ||
130 | return true; | 130 | return 0; |
131 | } | 131 | } |
132 | 132 | ||
133 | static struct xt_match ipv6header_mt6_reg __read_mostly = { | 133 | static struct xt_match ipv6header_mt6_reg __read_mostly = { |
diff --git a/net/ipv6/netfilter/ip6t_mh.c b/net/ipv6/netfilter/ip6t_mh.c index aafe4e66577b..0c90c66b1992 100644 --- a/net/ipv6/netfilter/ip6t_mh.c +++ b/net/ipv6/netfilter/ip6t_mh.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * Based on net/netfilter/xt_tcpudp.c | 11 | * Based on net/netfilter/xt_tcpudp.c |
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | #include <linux/types.h> | 15 | #include <linux/types.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <net/ip.h> | 17 | #include <net/ip.h> |
@@ -24,12 +25,6 @@ | |||
24 | MODULE_DESCRIPTION("Xtables: IPv6 Mobility Header match"); | 25 | MODULE_DESCRIPTION("Xtables: IPv6 Mobility Header match"); |
25 | MODULE_LICENSE("GPL"); | 26 | MODULE_LICENSE("GPL"); |
26 | 27 | ||
27 | #ifdef DEBUG_IP_FIREWALL_USER | ||
28 | #define duprintf(format, args...) printk(format , ## args) | ||
29 | #else | ||
30 | #define duprintf(format, args...) | ||
31 | #endif | ||
32 | |||
33 | /* Returns 1 if the type is matched by the range, 0 otherwise */ | 28 | /* Returns 1 if the type is matched by the range, 0 otherwise */ |
34 | static inline bool | 29 | static inline bool |
35 | type_match(u_int8_t min, u_int8_t max, u_int8_t type, bool invert) | 30 | type_match(u_int8_t min, u_int8_t max, u_int8_t type, bool invert) |
@@ -37,7 +32,7 @@ type_match(u_int8_t min, u_int8_t max, u_int8_t type, bool invert) | |||
37 | return (type >= min && type <= max) ^ invert; | 32 | return (type >= min && type <= max) ^ invert; |
38 | } | 33 | } |
39 | 34 | ||
40 | static bool mh_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 35 | static bool mh_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
41 | { | 36 | { |
42 | struct ip6_mh _mh; | 37 | struct ip6_mh _mh; |
43 | const struct ip6_mh *mh; | 38 | const struct ip6_mh *mh; |
@@ -51,15 +46,15 @@ static bool mh_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
51 | if (mh == NULL) { | 46 | if (mh == NULL) { |
52 | /* We've been asked to examine this packet, and we | 47 | /* We've been asked to examine this packet, and we |
53 | can't. Hence, no choice but to drop. */ | 48 | can't. Hence, no choice but to drop. */ |
54 | duprintf("Dropping evil MH tinygram.\n"); | 49 | pr_debug("Dropping evil MH tinygram.\n"); |
55 | *par->hotdrop = true; | 50 | par->hotdrop = true; |
56 | return false; | 51 | return false; |
57 | } | 52 | } |
58 | 53 | ||
59 | if (mh->ip6mh_proto != IPPROTO_NONE) { | 54 | if (mh->ip6mh_proto != IPPROTO_NONE) { |
60 | duprintf("Dropping invalid MH Payload Proto: %u\n", | 55 | pr_debug("Dropping invalid MH Payload Proto: %u\n", |
61 | mh->ip6mh_proto); | 56 | mh->ip6mh_proto); |
62 | *par->hotdrop = true; | 57 | par->hotdrop = true; |
63 | return false; | 58 | return false; |
64 | } | 59 | } |
65 | 60 | ||
@@ -67,12 +62,12 @@ static bool mh_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
67 | !!(mhinfo->invflags & IP6T_MH_INV_TYPE)); | 62 | !!(mhinfo->invflags & IP6T_MH_INV_TYPE)); |
68 | } | 63 | } |
69 | 64 | ||
70 | static bool mh_mt6_check(const struct xt_mtchk_param *par) | 65 | static int mh_mt6_check(const struct xt_mtchk_param *par) |
71 | { | 66 | { |
72 | const struct ip6t_mh *mhinfo = par->matchinfo; | 67 | const struct ip6t_mh *mhinfo = par->matchinfo; |
73 | 68 | ||
74 | /* Must specify no unknown invflags */ | 69 | /* Must specify no unknown invflags */ |
75 | return !(mhinfo->invflags & ~IP6T_MH_INV_MASK); | 70 | return (mhinfo->invflags & ~IP6T_MH_INV_MASK) ? -EINVAL : 0; |
76 | } | 71 | } |
77 | 72 | ||
78 | static struct xt_match mh_mt6_reg __read_mostly = { | 73 | static struct xt_match mh_mt6_reg __read_mostly = { |
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c index b77307fc8743..d8488c50a8e0 100644 --- a/net/ipv6/netfilter/ip6t_rt.c +++ b/net/ipv6/netfilter/ip6t_rt.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
12 | #include <linux/ipv6.h> | 12 | #include <linux/ipv6.h> |
@@ -29,14 +29,14 @@ static inline bool | |||
29 | segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert) | 29 | segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert) |
30 | { | 30 | { |
31 | bool r; | 31 | bool r; |
32 | pr_debug("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x", | 32 | pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n", |
33 | invert ? '!' : ' ', min, id, max); | 33 | invert ? '!' : ' ', min, id, max); |
34 | r = (id >= min && id <= max) ^ invert; | 34 | r = (id >= min && id <= max) ^ invert; |
35 | pr_debug(" result %s\n", r ? "PASS" : "FAILED"); | 35 | pr_debug(" result %s\n", r ? "PASS" : "FAILED"); |
36 | return r; | 36 | return r; |
37 | } | 37 | } |
38 | 38 | ||
39 | static bool rt_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 39 | static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
40 | { | 40 | { |
41 | struct ipv6_rt_hdr _route; | 41 | struct ipv6_rt_hdr _route; |
42 | const struct ipv6_rt_hdr *rh; | 42 | const struct ipv6_rt_hdr *rh; |
@@ -52,13 +52,13 @@ static bool rt_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
52 | err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL); | 52 | err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL); |
53 | if (err < 0) { | 53 | if (err < 0) { |
54 | if (err != -ENOENT) | 54 | if (err != -ENOENT) |
55 | *par->hotdrop = true; | 55 | par->hotdrop = true; |
56 | return false; | 56 | return false; |
57 | } | 57 | } |
58 | 58 | ||
59 | rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); | 59 | rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); |
60 | if (rh == NULL) { | 60 | if (rh == NULL) { |
61 | *par->hotdrop = true; | 61 | par->hotdrop = true; |
62 | return false; | 62 | return false; |
63 | } | 63 | } |
64 | 64 | ||
@@ -183,23 +183,23 @@ static bool rt_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
183 | return false; | 183 | return false; |
184 | } | 184 | } |
185 | 185 | ||
186 | static bool rt_mt6_check(const struct xt_mtchk_param *par) | 186 | static int rt_mt6_check(const struct xt_mtchk_param *par) |
187 | { | 187 | { |
188 | const struct ip6t_rt *rtinfo = par->matchinfo; | 188 | const struct ip6t_rt *rtinfo = par->matchinfo; |
189 | 189 | ||
190 | if (rtinfo->invflags & ~IP6T_RT_INV_MASK) { | 190 | if (rtinfo->invflags & ~IP6T_RT_INV_MASK) { |
191 | pr_debug("ip6t_rt: unknown flags %X\n", rtinfo->invflags); | 191 | pr_debug("unknown flags %X\n", rtinfo->invflags); |
192 | return false; | 192 | return -EINVAL; |
193 | } | 193 | } |
194 | if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) && | 194 | if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) && |
195 | (!(rtinfo->flags & IP6T_RT_TYP) || | 195 | (!(rtinfo->flags & IP6T_RT_TYP) || |
196 | (rtinfo->rt_type != 0) || | 196 | (rtinfo->rt_type != 0) || |
197 | (rtinfo->invflags & IP6T_RT_INV_TYP))) { | 197 | (rtinfo->invflags & IP6T_RT_INV_TYP))) { |
198 | pr_debug("`--rt-type 0' required before `--rt-0-*'"); | 198 | pr_debug("`--rt-type 0' required before `--rt-0-*'"); |
199 | return false; | 199 | return -EINVAL; |
200 | } | 200 | } |
201 | 201 | ||
202 | return true; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | static struct xt_match rt_mt6_reg __read_mostly = { | 205 | static struct xt_match rt_mt6_reg __read_mostly = { |
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index d6fc9aff3163..c9e37c8fd62c 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c | |||
@@ -81,7 +81,7 @@ static int __init ip6table_filter_init(void) | |||
81 | int ret; | 81 | int ret; |
82 | 82 | ||
83 | if (forward < 0 || forward > NF_MAX_VERDICT) { | 83 | if (forward < 0 || forward > NF_MAX_VERDICT) { |
84 | printk("iptables forward must be 0 or 1\n"); | 84 | pr_err("iptables forward must be 0 or 1\n"); |
85 | return -EINVAL; | 85 | return -EINVAL; |
86 | } | 86 | } |
87 | 87 | ||
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 6a102b57f356..679a0a3b7b3c 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c | |||
@@ -43,7 +43,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) | |||
43 | if (skb->len < sizeof(struct iphdr) || | 43 | if (skb->len < sizeof(struct iphdr) || |
44 | ip_hdrlen(skb) < sizeof(struct iphdr)) { | 44 | ip_hdrlen(skb) < sizeof(struct iphdr)) { |
45 | if (net_ratelimit()) | 45 | if (net_ratelimit()) |
46 | printk("ip6t_hook: happy cracking.\n"); | 46 | pr_warning("ip6t_hook: happy cracking.\n"); |
47 | return NF_ACCEPT; | 47 | return NF_ACCEPT; |
48 | } | 48 | } |
49 | #endif | 49 | #endif |
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 996c3f41fecd..ff43461704be 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -280,7 +280,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum, | |||
280 | /* root is playing with raw sockets. */ | 280 | /* root is playing with raw sockets. */ |
281 | if (skb->len < sizeof(struct ipv6hdr)) { | 281 | if (skb->len < sizeof(struct ipv6hdr)) { |
282 | if (net_ratelimit()) | 282 | if (net_ratelimit()) |
283 | printk("ipv6_conntrack_local: packet too short\n"); | 283 | pr_notice("ipv6_conntrack_local: packet too short\n"); |
284 | return NF_ACCEPT; | 284 | return NF_ACCEPT; |
285 | } | 285 | } |
286 | return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn); | 286 | return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn); |
@@ -406,37 +406,37 @@ static int __init nf_conntrack_l3proto_ipv6_init(void) | |||
406 | 406 | ||
407 | ret = nf_ct_frag6_init(); | 407 | ret = nf_ct_frag6_init(); |
408 | if (ret < 0) { | 408 | if (ret < 0) { |
409 | printk("nf_conntrack_ipv6: can't initialize frag6.\n"); | 409 | pr_err("nf_conntrack_ipv6: can't initialize frag6.\n"); |
410 | return ret; | 410 | return ret; |
411 | } | 411 | } |
412 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6); | 412 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6); |
413 | if (ret < 0) { | 413 | if (ret < 0) { |
414 | printk("nf_conntrack_ipv6: can't register tcp.\n"); | 414 | pr_err("nf_conntrack_ipv6: can't register tcp.\n"); |
415 | goto cleanup_frag6; | 415 | goto cleanup_frag6; |
416 | } | 416 | } |
417 | 417 | ||
418 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6); | 418 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6); |
419 | if (ret < 0) { | 419 | if (ret < 0) { |
420 | printk("nf_conntrack_ipv6: can't register udp.\n"); | 420 | pr_err("nf_conntrack_ipv6: can't register udp.\n"); |
421 | goto cleanup_tcp; | 421 | goto cleanup_tcp; |
422 | } | 422 | } |
423 | 423 | ||
424 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6); | 424 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6); |
425 | if (ret < 0) { | 425 | if (ret < 0) { |
426 | printk("nf_conntrack_ipv6: can't register icmpv6.\n"); | 426 | pr_err("nf_conntrack_ipv6: can't register icmpv6.\n"); |
427 | goto cleanup_udp; | 427 | goto cleanup_udp; |
428 | } | 428 | } |
429 | 429 | ||
430 | ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6); | 430 | ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6); |
431 | if (ret < 0) { | 431 | if (ret < 0) { |
432 | printk("nf_conntrack_ipv6: can't register ipv6\n"); | 432 | pr_err("nf_conntrack_ipv6: can't register ipv6\n"); |
433 | goto cleanup_icmpv6; | 433 | goto cleanup_icmpv6; |
434 | } | 434 | } |
435 | 435 | ||
436 | ret = nf_register_hooks(ipv6_conntrack_ops, | 436 | ret = nf_register_hooks(ipv6_conntrack_ops, |
437 | ARRAY_SIZE(ipv6_conntrack_ops)); | 437 | ARRAY_SIZE(ipv6_conntrack_ops)); |
438 | if (ret < 0) { | 438 | if (ret < 0) { |
439 | printk("nf_conntrack_ipv6: can't register pre-routing defrag " | 439 | pr_err("nf_conntrack_ipv6: can't register pre-routing defrag " |
440 | "hook.\n"); | 440 | "hook.\n"); |
441 | goto cleanup_ipv6; | 441 | goto cleanup_ipv6; |
442 | } | 442 | } |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index dd5b9bd61c62..6fb890187de0 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -644,7 +644,7 @@ void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, | |||
644 | s2 = s->next; | 644 | s2 = s->next; |
645 | s->next = NULL; | 645 | s->next = NULL; |
646 | 646 | ||
647 | NF_HOOK_THRESH(PF_INET6, hooknum, s, in, out, okfn, | 647 | NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s, in, out, okfn, |
648 | NF_IP6_PRI_CONNTRACK_DEFRAG + 1); | 648 | NF_IP6_PRI_CONNTRACK_DEFRAG + 1); |
649 | s = s2; | 649 | s = s2; |
650 | } | 650 | } |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 458eabfbe130..566798d69f37 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -168,7 +168,6 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib) | |||
168 | i & 0x100 ? "Out" : "In", i & 0xff); | 168 | i & 0x100 ? "Out" : "In", i & 0xff); |
169 | seq_printf(seq, "%-32s\t%lu\n", name, val); | 169 | seq_printf(seq, "%-32s\t%lu\n", name, val); |
170 | } | 170 | } |
171 | return; | ||
172 | } | 171 | } |
173 | 172 | ||
174 | static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib, | 173 | static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib, |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 0e3d2dd92078..4a4dcbe4f8b2 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -640,8 +640,8 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, | |||
640 | goto error_fault; | 640 | goto error_fault; |
641 | 641 | ||
642 | IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); | 642 | IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); |
643 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, | 643 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, |
644 | dst_output); | 644 | rt->u.dst.dev, dst_output); |
645 | if (err > 0) | 645 | if (err > 0) |
646 | err = net_xmit_errno(err); | 646 | err = net_xmit_errno(err); |
647 | if (err) | 647 | if (err) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 05ebd7833043..252d76199c41 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -316,7 +316,6 @@ static void rt6_probe(struct rt6_info *rt) | |||
316 | #else | 316 | #else |
317 | static inline void rt6_probe(struct rt6_info *rt) | 317 | static inline void rt6_probe(struct rt6_info *rt) |
318 | { | 318 | { |
319 | return; | ||
320 | } | 319 | } |
321 | #endif | 320 | #endif |
322 | 321 | ||
@@ -815,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, | |||
815 | { | 814 | { |
816 | int flags = 0; | 815 | int flags = 0; |
817 | 816 | ||
818 | if (fl->oif || rt6_need_strict(&fl->fl6_dst)) | 817 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst)) |
819 | flags |= RT6_LOOKUP_F_IFACE; | 818 | flags |= RT6_LOOKUP_F_IFACE; |
820 | 819 | ||
821 | if (!ipv6_addr_any(&fl->fl6_src)) | 820 | if (!ipv6_addr_any(&fl->fl6_src)) |
@@ -1553,7 +1552,6 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, | |||
1553 | 1552 | ||
1554 | out: | 1553 | out: |
1555 | dst_release(&rt->u.dst); | 1554 | dst_release(&rt->u.dst); |
1556 | return; | ||
1557 | } | 1555 | } |
1558 | 1556 | ||
1559 | /* | 1557 | /* |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 5abae10cd884..e51e650ea80b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -566,11 +566,9 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
566 | kfree_skb(skb); | 566 | kfree_skb(skb); |
567 | return 0; | 567 | return 0; |
568 | } | 568 | } |
569 | tunnel->dev->stats.rx_packets++; | 569 | |
570 | tunnel->dev->stats.rx_bytes += skb->len; | 570 | skb_tunnel_rx(skb, tunnel->dev); |
571 | skb->dev = tunnel->dev; | 571 | |
572 | skb_dst_drop(skb); | ||
573 | nf_reset(skb); | ||
574 | ipip6_ecn_decapsulate(iph, skb); | 572 | ipip6_ecn_decapsulate(iph, skb); |
575 | netif_rx(skb); | 573 | netif_rx(skb); |
576 | rcu_read_unlock(); | 574 | rcu_read_unlock(); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 6603511e3673..2b7c3a100e2c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -604,7 +604,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, | |||
604 | kfree(newkey); | 604 | kfree(newkey); |
605 | return -ENOMEM; | 605 | return -ENOMEM; |
606 | } | 606 | } |
607 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 607 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); |
608 | } | 608 | } |
609 | if (tcp_alloc_md5sig_pool(sk) == NULL) { | 609 | if (tcp_alloc_md5sig_pool(sk) == NULL) { |
610 | kfree(newkey); | 610 | kfree(newkey); |
@@ -741,7 +741,7 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, | |||
741 | return -ENOMEM; | 741 | return -ENOMEM; |
742 | 742 | ||
743 | tp->md5sig_info = p; | 743 | tp->md5sig_info = p; |
744 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 744 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); |
745 | } | 745 | } |
746 | 746 | ||
747 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); | 747 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3d7a2c0b836a..87be58673b55 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
328 | int err; | 328 | int err; |
329 | int is_udplite = IS_UDPLITE(sk); | 329 | int is_udplite = IS_UDPLITE(sk); |
330 | int is_udp4; | 330 | int is_udp4; |
331 | bool slow; | ||
331 | 332 | ||
332 | if (addr_len) | 333 | if (addr_len) |
333 | *addr_len=sizeof(struct sockaddr_in6); | 334 | *addr_len=sizeof(struct sockaddr_in6); |
@@ -424,7 +425,7 @@ out: | |||
424 | return err; | 425 | return err; |
425 | 426 | ||
426 | csum_copy_err: | 427 | csum_copy_err: |
427 | lock_sock_bh(sk); | 428 | slow = lock_sock_fast(sk); |
428 | if (!skb_kill_datagram(sk, skb, flags)) { | 429 | if (!skb_kill_datagram(sk, skb, flags)) { |
429 | if (is_udp4) | 430 | if (is_udp4) |
430 | UDP_INC_STATS_USER(sock_net(sk), | 431 | UDP_INC_STATS_USER(sock_net(sk), |
@@ -433,7 +434,7 @@ csum_copy_err: | |||
433 | UDP6_INC_STATS_USER(sock_net(sk), | 434 | UDP6_INC_STATS_USER(sock_net(sk), |
434 | UDP_MIB_INERRORS, is_udplite); | 435 | UDP_MIB_INERRORS, is_udplite); |
435 | } | 436 | } |
436 | unlock_sock_bh(sk); | 437 | unlock_sock_fast(sk, slow); |
437 | 438 | ||
438 | if (flags & MSG_DONTWAIT) | 439 | if (flags & MSG_DONTWAIT) |
439 | return -EAGAIN; | 440 | return -EAGAIN; |
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 2bc98ede1235..f8c3cf842f53 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c | |||
@@ -42,7 +42,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) | |||
42 | ipv6_hdr(skb)->payload_len = htons(skb->len); | 42 | ipv6_hdr(skb)->payload_len = htons(skb->len); |
43 | __skb_push(skb, skb->data - skb_network_header(skb)); | 43 | __skb_push(skb, skb->data - skb_network_header(skb)); |
44 | 44 | ||
45 | NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, | 45 | NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, |
46 | ip6_rcv_finish); | 46 | ip6_rcv_finish); |
47 | return -1; | 47 | return -1; |
48 | } | 48 | } |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 0c92112dcba3..6434bd5ce088 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -90,6 +90,6 @@ static int xfrm6_output_finish(struct sk_buff *skb) | |||
90 | 90 | ||
91 | int xfrm6_output(struct sk_buff *skb) | 91 | int xfrm6_output(struct sk_buff *skb) |
92 | { | 92 | { |
93 | return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb_dst(skb)->dev, | 93 | return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, |
94 | xfrm6_output_finish); | 94 | skb_dst(skb)->dev, xfrm6_output_finish); |
95 | } | 95 | } |
diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 79a1e5a23e10..fce364c6c71a 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c | |||
@@ -685,8 +685,6 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, | |||
685 | /* We have a match; send the value. */ | 685 | /* We have a match; send the value. */ |
686 | iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS, | 686 | iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS, |
687 | attrib->value); | 687 | attrib->value); |
688 | |||
689 | return; | ||
690 | } | 688 | } |
691 | 689 | ||
692 | /* | 690 | /* |
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c index df18ab4b6c5e..e98e40d76f4f 100644 --- a/net/irda/irnet/irnet_irda.c +++ b/net/irda/irnet/irnet_irda.c | |||
@@ -678,7 +678,6 @@ irda_irnet_destroy(irnet_socket * self) | |||
678 | self->stsap_sel = 0; | 678 | self->stsap_sel = 0; |
679 | 679 | ||
680 | DEXIT(IRDA_SOCK_TRACE, "\n"); | 680 | DEXIT(IRDA_SOCK_TRACE, "\n"); |
681 | return; | ||
682 | } | 681 | } |
683 | 682 | ||
684 | 683 | ||
@@ -928,7 +927,6 @@ irnet_disconnect_server(irnet_socket * self, | |||
928 | irttp_listen(self->tsap); | 927 | irttp_listen(self->tsap); |
929 | 928 | ||
930 | DEXIT(IRDA_SERV_TRACE, "\n"); | 929 | DEXIT(IRDA_SERV_TRACE, "\n"); |
931 | return; | ||
932 | } | 930 | } |
933 | 931 | ||
934 | /*------------------------------------------------------------------*/ | 932 | /*------------------------------------------------------------------*/ |
@@ -1013,7 +1011,6 @@ irnet_destroy_server(void) | |||
1013 | irda_irnet_destroy(&irnet_server.s); | 1011 | irda_irnet_destroy(&irnet_server.s); |
1014 | 1012 | ||
1015 | DEXIT(IRDA_SERV_TRACE, "\n"); | 1013 | DEXIT(IRDA_SERV_TRACE, "\n"); |
1016 | return; | ||
1017 | } | 1014 | } |
1018 | 1015 | ||
1019 | 1016 | ||
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 8be324fe08b9..9637e45744fa 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -136,7 +136,6 @@ static void afiucv_pm_complete(struct device *dev) | |||
136 | #ifdef CONFIG_PM_DEBUG | 136 | #ifdef CONFIG_PM_DEBUG |
137 | printk(KERN_WARNING "afiucv_pm_complete\n"); | 137 | printk(KERN_WARNING "afiucv_pm_complete\n"); |
138 | #endif | 138 | #endif |
139 | return; | ||
140 | } | 139 | } |
141 | 140 | ||
142 | /** | 141 | /** |
@@ -1620,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | |||
1620 | save_message: | 1619 | save_message: |
1621 | save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); | 1620 | save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); |
1622 | if (!save_msg) | 1621 | if (!save_msg) |
1623 | return; | 1622 | goto out_unlock; |
1624 | save_msg->path = path; | 1623 | save_msg->path = path; |
1625 | save_msg->msg = *msg; | 1624 | save_msg->msg = *msg; |
1626 | 1625 | ||
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index fd8b28361a64..f28ad2cc8428 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -632,13 +632,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
632 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), | 632 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), |
633 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 633 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
634 | if (!iucv_irq_data[cpu]) | 634 | if (!iucv_irq_data[cpu]) |
635 | return NOTIFY_BAD; | 635 | return notifier_from_errno(-ENOMEM); |
636 | |||
636 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | 637 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), |
637 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 638 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
638 | if (!iucv_param[cpu]) { | 639 | if (!iucv_param[cpu]) { |
639 | kfree(iucv_irq_data[cpu]); | 640 | kfree(iucv_irq_data[cpu]); |
640 | iucv_irq_data[cpu] = NULL; | 641 | iucv_irq_data[cpu] = NULL; |
641 | return NOTIFY_BAD; | 642 | return notifier_from_errno(-ENOMEM); |
642 | } | 643 | } |
643 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), | 644 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), |
644 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 645 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
@@ -647,7 +648,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
647 | iucv_param[cpu] = NULL; | 648 | iucv_param[cpu] = NULL; |
648 | kfree(iucv_irq_data[cpu]); | 649 | kfree(iucv_irq_data[cpu]); |
649 | iucv_irq_data[cpu] = NULL; | 650 | iucv_irq_data[cpu] = NULL; |
650 | return NOTIFY_BAD; | 651 | return notifier_from_errno(-ENOMEM); |
651 | } | 652 | } |
652 | break; | 653 | break; |
653 | case CPU_UP_CANCELED: | 654 | case CPU_UP_CANCELED: |
@@ -677,7 +678,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
677 | cpu_clear(cpu, cpumask); | 678 | cpu_clear(cpu, cpumask); |
678 | if (cpus_empty(cpumask)) | 679 | if (cpus_empty(cpumask)) |
679 | /* Can't offline last IUCV enabled cpu. */ | 680 | /* Can't offline last IUCV enabled cpu. */ |
680 | return NOTIFY_BAD; | 681 | return notifier_from_errno(-EINVAL); |
681 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); | 682 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); |
682 | if (cpus_empty(iucv_irq_cpumask)) | 683 | if (cpus_empty(iucv_irq_cpumask)) |
683 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), | 684 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), |
diff --git a/net/key/af_key.c b/net/key/af_key.c index ba9a3fcc2fed..43040e97c474 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -99,7 +99,7 @@ static void pfkey_sock_destruct(struct sock *sk) | |||
99 | skb_queue_purge(&sk->sk_receive_queue); | 99 | skb_queue_purge(&sk->sk_receive_queue); |
100 | 100 | ||
101 | if (!sock_flag(sk, SOCK_DEAD)) { | 101 | if (!sock_flag(sk, SOCK_DEAD)) { |
102 | printk("Attempt to release alive pfkey socket: %p\n", sk); | 102 | pr_err("Attempt to release alive pfkey socket: %p\n", sk); |
103 | return; | 103 | return; |
104 | } | 104 | } |
105 | 105 | ||
@@ -1402,7 +1402,7 @@ static inline int event2poltype(int event) | |||
1402 | case XFRM_MSG_POLEXPIRE: | 1402 | case XFRM_MSG_POLEXPIRE: |
1403 | // return SADB_X_SPDEXPIRE; | 1403 | // return SADB_X_SPDEXPIRE; |
1404 | default: | 1404 | default: |
1405 | printk("pfkey: Unknown policy event %d\n", event); | 1405 | pr_err("pfkey: Unknown policy event %d\n", event); |
1406 | break; | 1406 | break; |
1407 | } | 1407 | } |
1408 | 1408 | ||
@@ -1421,7 +1421,7 @@ static inline int event2keytype(int event) | |||
1421 | case XFRM_MSG_EXPIRE: | 1421 | case XFRM_MSG_EXPIRE: |
1422 | return SADB_EXPIRE; | 1422 | return SADB_EXPIRE; |
1423 | default: | 1423 | default: |
1424 | printk("pfkey: Unknown SA event %d\n", event); | 1424 | pr_err("pfkey: Unknown SA event %d\n", event); |
1425 | break; | 1425 | break; |
1426 | } | 1426 | } |
1427 | 1427 | ||
@@ -2969,7 +2969,7 @@ static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c) | |||
2969 | case XFRM_MSG_NEWAE: /* not yet supported */ | 2969 | case XFRM_MSG_NEWAE: /* not yet supported */ |
2970 | break; | 2970 | break; |
2971 | default: | 2971 | default: |
2972 | printk("pfkey: Unknown SA event %d\n", c->event); | 2972 | pr_err("pfkey: Unknown SA event %d\n", c->event); |
2973 | break; | 2973 | break; |
2974 | } | 2974 | } |
2975 | 2975 | ||
@@ -2993,7 +2993,7 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_e | |||
2993 | break; | 2993 | break; |
2994 | return key_notify_policy_flush(c); | 2994 | return key_notify_policy_flush(c); |
2995 | default: | 2995 | default: |
2996 | printk("pfkey: Unknown policy event %d\n", c->event); | 2996 | pr_err("pfkey: Unknown policy event %d\n", c->event); |
2997 | break; | 2997 | break; |
2998 | } | 2998 | } |
2999 | 2999 | ||
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index a432f0ec051c..94e7fca75b85 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c | |||
@@ -31,7 +31,7 @@ static int llc_mac_header_len(unsigned short devtype) | |||
31 | case ARPHRD_ETHER: | 31 | case ARPHRD_ETHER: |
32 | case ARPHRD_LOOPBACK: | 32 | case ARPHRD_LOOPBACK: |
33 | return sizeof(struct ethhdr); | 33 | return sizeof(struct ethhdr); |
34 | #ifdef CONFIG_TR | 34 | #if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) |
35 | case ARPHRD_IEEE802_TR: | 35 | case ARPHRD_IEEE802_TR: |
36 | return sizeof(struct trh_hdr); | 36 | return sizeof(struct trh_hdr); |
37 | #endif | 37 | #endif |
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 04420291e7ad..84b48ba8a77e 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile | |||
@@ -23,7 +23,8 @@ mac80211-y := \ | |||
23 | key.o \ | 23 | key.o \ |
24 | util.o \ | 24 | util.o \ |
25 | wme.o \ | 25 | wme.o \ |
26 | event.o | 26 | event.o \ |
27 | chan.o | ||
27 | 28 | ||
28 | mac80211-$(CONFIG_MAC80211_LEDS) += led.o | 29 | mac80211-$(CONFIG_MAC80211_LEDS) += led.o |
29 | mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ | 30 | mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index c163d0a149f4..98258b7341e3 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
332 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | 332 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); |
333 | 333 | ||
334 | spin_unlock(&local->ampdu_lock); | 334 | spin_unlock(&local->ampdu_lock); |
335 | spin_unlock_bh(&sta->lock); | ||
336 | 335 | ||
337 | /* send an addBA request */ | 336 | /* prepare tid data */ |
338 | sta->ampdu_mlme.dialog_token_allocator++; | 337 | sta->ampdu_mlme.dialog_token_allocator++; |
339 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = | 338 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = |
340 | sta->ampdu_mlme.dialog_token_allocator; | 339 | sta->ampdu_mlme.dialog_token_allocator; |
341 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | 340 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; |
342 | 341 | ||
342 | spin_unlock_bh(&sta->lock); | ||
343 | |||
344 | /* send AddBA request */ | ||
343 | ieee80211_send_addba_request(sdata, pubsta->addr, tid, | 345 | ieee80211_send_addba_request(sdata, pubsta->addr, tid, |
344 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | 346 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, |
345 | sta->ampdu_mlme.tid_tx[tid]->ssn, | 347 | sta->ampdu_mlme.tid_tx[tid]->ssn, |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index ae37270a0633..c7000a6ca379 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1162,15 +1162,39 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, | |||
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | static int ieee80211_set_channel(struct wiphy *wiphy, | 1164 | static int ieee80211_set_channel(struct wiphy *wiphy, |
1165 | struct net_device *netdev, | ||
1165 | struct ieee80211_channel *chan, | 1166 | struct ieee80211_channel *chan, |
1166 | enum nl80211_channel_type channel_type) | 1167 | enum nl80211_channel_type channel_type) |
1167 | { | 1168 | { |
1168 | struct ieee80211_local *local = wiphy_priv(wiphy); | 1169 | struct ieee80211_local *local = wiphy_priv(wiphy); |
1170 | struct ieee80211_sub_if_data *sdata = NULL; | ||
1171 | |||
1172 | if (netdev) | ||
1173 | sdata = IEEE80211_DEV_TO_SUB_IF(netdev); | ||
1174 | |||
1175 | switch (ieee80211_get_channel_mode(local, NULL)) { | ||
1176 | case CHAN_MODE_HOPPING: | ||
1177 | return -EBUSY; | ||
1178 | case CHAN_MODE_FIXED: | ||
1179 | if (local->oper_channel != chan) | ||
1180 | return -EBUSY; | ||
1181 | if (!sdata && local->_oper_channel_type == channel_type) | ||
1182 | return 0; | ||
1183 | break; | ||
1184 | case CHAN_MODE_UNDEFINED: | ||
1185 | break; | ||
1186 | } | ||
1169 | 1187 | ||
1170 | local->oper_channel = chan; | 1188 | local->oper_channel = chan; |
1171 | local->oper_channel_type = channel_type; | ||
1172 | 1189 | ||
1173 | return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | 1190 | if (!ieee80211_set_channel_type(local, sdata, channel_type)) |
1191 | return -EBUSY; | ||
1192 | |||
1193 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | ||
1194 | if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) | ||
1195 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); | ||
1196 | |||
1197 | return 0; | ||
1174 | } | 1198 | } |
1175 | 1199 | ||
1176 | #ifdef CONFIG_PM | 1200 | #ifdef CONFIG_PM |
@@ -1214,6 +1238,20 @@ static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, | |||
1214 | static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, | 1238 | static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, |
1215 | struct cfg80211_assoc_request *req) | 1239 | struct cfg80211_assoc_request *req) |
1216 | { | 1240 | { |
1241 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
1242 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1243 | |||
1244 | switch (ieee80211_get_channel_mode(local, sdata)) { | ||
1245 | case CHAN_MODE_HOPPING: | ||
1246 | return -EBUSY; | ||
1247 | case CHAN_MODE_FIXED: | ||
1248 | if (local->oper_channel == req->bss->channel) | ||
1249 | break; | ||
1250 | return -EBUSY; | ||
1251 | case CHAN_MODE_UNDEFINED: | ||
1252 | break; | ||
1253 | } | ||
1254 | |||
1217 | return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); | 1255 | return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); |
1218 | } | 1256 | } |
1219 | 1257 | ||
@@ -1236,8 +1274,22 @@ static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, | |||
1236 | static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, | 1274 | static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, |
1237 | struct cfg80211_ibss_params *params) | 1275 | struct cfg80211_ibss_params *params) |
1238 | { | 1276 | { |
1277 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
1239 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1278 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1240 | 1279 | ||
1280 | switch (ieee80211_get_channel_mode(local, sdata)) { | ||
1281 | case CHAN_MODE_HOPPING: | ||
1282 | return -EBUSY; | ||
1283 | case CHAN_MODE_FIXED: | ||
1284 | if (!params->channel_fixed) | ||
1285 | return -EBUSY; | ||
1286 | if (local->oper_channel == params->channel) | ||
1287 | break; | ||
1288 | return -EBUSY; | ||
1289 | case CHAN_MODE_UNDEFINED: | ||
1290 | break; | ||
1291 | } | ||
1292 | |||
1241 | return ieee80211_ibss_join(sdata, params); | 1293 | return ieee80211_ibss_join(sdata, params); |
1242 | } | 1294 | } |
1243 | 1295 | ||
@@ -1366,7 +1418,7 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, | |||
1366 | * association, there's no need to send an action frame. | 1418 | * association, there's no need to send an action frame. |
1367 | */ | 1419 | */ |
1368 | if (!sdata->u.mgd.associated || | 1420 | if (!sdata->u.mgd.associated || |
1369 | sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) { | 1421 | sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) { |
1370 | mutex_lock(&sdata->local->iflist_mtx); | 1422 | mutex_lock(&sdata->local->iflist_mtx); |
1371 | ieee80211_recalc_smps(sdata->local, sdata); | 1423 | ieee80211_recalc_smps(sdata->local, sdata); |
1372 | mutex_unlock(&sdata->local->iflist_mtx); | 1424 | mutex_unlock(&sdata->local->iflist_mtx); |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c new file mode 100644 index 000000000000..32be11e4c4d9 --- /dev/null +++ b/net/mac80211/chan.c | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * mac80211 - channel management | ||
3 | */ | ||
4 | |||
5 | #include <linux/nl80211.h> | ||
6 | #include "ieee80211_i.h" | ||
7 | |||
8 | static enum ieee80211_chan_mode | ||
9 | __ieee80211_get_channel_mode(struct ieee80211_local *local, | ||
10 | struct ieee80211_sub_if_data *ignore) | ||
11 | { | ||
12 | struct ieee80211_sub_if_data *sdata; | ||
13 | |||
14 | WARN_ON(!mutex_is_locked(&local->iflist_mtx)); | ||
15 | |||
16 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
17 | if (sdata == ignore) | ||
18 | continue; | ||
19 | |||
20 | if (!ieee80211_sdata_running(sdata)) | ||
21 | continue; | ||
22 | |||
23 | if (sdata->vif.type == NL80211_IFTYPE_MONITOR) | ||
24 | continue; | ||
25 | |||
26 | if (sdata->vif.type == NL80211_IFTYPE_STATION && | ||
27 | !sdata->u.mgd.associated) | ||
28 | continue; | ||
29 | |||
30 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { | ||
31 | if (!sdata->u.ibss.ssid_len) | ||
32 | continue; | ||
33 | if (!sdata->u.ibss.fixed_channel) | ||
34 | return CHAN_MODE_HOPPING; | ||
35 | } | ||
36 | |||
37 | if (sdata->vif.type == NL80211_IFTYPE_AP && | ||
38 | !sdata->u.ap.beacon) | ||
39 | continue; | ||
40 | |||
41 | return CHAN_MODE_FIXED; | ||
42 | } | ||
43 | |||
44 | return CHAN_MODE_UNDEFINED; | ||
45 | } | ||
46 | |||
47 | enum ieee80211_chan_mode | ||
48 | ieee80211_get_channel_mode(struct ieee80211_local *local, | ||
49 | struct ieee80211_sub_if_data *ignore) | ||
50 | { | ||
51 | enum ieee80211_chan_mode mode; | ||
52 | |||
53 | mutex_lock(&local->iflist_mtx); | ||
54 | mode = __ieee80211_get_channel_mode(local, ignore); | ||
55 | mutex_unlock(&local->iflist_mtx); | ||
56 | |||
57 | return mode; | ||
58 | } | ||
59 | |||
60 | bool ieee80211_set_channel_type(struct ieee80211_local *local, | ||
61 | struct ieee80211_sub_if_data *sdata, | ||
62 | enum nl80211_channel_type chantype) | ||
63 | { | ||
64 | struct ieee80211_sub_if_data *tmp; | ||
65 | enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT; | ||
66 | bool result; | ||
67 | |||
68 | mutex_lock(&local->iflist_mtx); | ||
69 | |||
70 | list_for_each_entry(tmp, &local->interfaces, list) { | ||
71 | if (tmp == sdata) | ||
72 | continue; | ||
73 | |||
74 | if (!ieee80211_sdata_running(tmp)) | ||
75 | continue; | ||
76 | |||
77 | switch (tmp->vif.bss_conf.channel_type) { | ||
78 | case NL80211_CHAN_NO_HT: | ||
79 | case NL80211_CHAN_HT20: | ||
80 | superchan = tmp->vif.bss_conf.channel_type; | ||
81 | break; | ||
82 | case NL80211_CHAN_HT40PLUS: | ||
83 | WARN_ON(superchan == NL80211_CHAN_HT40MINUS); | ||
84 | superchan = NL80211_CHAN_HT40PLUS; | ||
85 | break; | ||
86 | case NL80211_CHAN_HT40MINUS: | ||
87 | WARN_ON(superchan == NL80211_CHAN_HT40PLUS); | ||
88 | superchan = NL80211_CHAN_HT40MINUS; | ||
89 | break; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | switch (superchan) { | ||
94 | case NL80211_CHAN_NO_HT: | ||
95 | case NL80211_CHAN_HT20: | ||
96 | /* | ||
97 | * allow any change that doesn't go to no-HT | ||
98 | * (if it already is no-HT no change is needed) | ||
99 | */ | ||
100 | if (chantype == NL80211_CHAN_NO_HT) | ||
101 | break; | ||
102 | superchan = chantype; | ||
103 | break; | ||
104 | case NL80211_CHAN_HT40PLUS: | ||
105 | case NL80211_CHAN_HT40MINUS: | ||
106 | /* allow smaller bandwidth and same */ | ||
107 | if (chantype == NL80211_CHAN_NO_HT) | ||
108 | break; | ||
109 | if (chantype == NL80211_CHAN_HT20) | ||
110 | break; | ||
111 | if (superchan == chantype) | ||
112 | break; | ||
113 | result = false; | ||
114 | goto out; | ||
115 | } | ||
116 | |||
117 | local->_oper_channel_type = superchan; | ||
118 | |||
119 | if (sdata) | ||
120 | sdata->vif.bss_conf.channel_type = chantype; | ||
121 | |||
122 | result = true; | ||
123 | out: | ||
124 | mutex_unlock(&local->iflist_mtx); | ||
125 | |||
126 | return result; | ||
127 | } | ||
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h index 68e6a2050f9a..09cc9be34796 100644 --- a/net/mac80211/debugfs.h +++ b/net/mac80211/debugfs.h | |||
@@ -7,7 +7,6 @@ extern int mac80211_open_file_generic(struct inode *inode, struct file *file); | |||
7 | #else | 7 | #else |
8 | static inline void debugfs_hw_add(struct ieee80211_local *local) | 8 | static inline void debugfs_hw_add(struct ieee80211_local *local) |
9 | { | 9 | { |
10 | return; | ||
11 | } | 10 | } |
12 | #endif | 11 | #endif |
13 | 12 | ||
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index ee8b63f92f71..9c1da0809160 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -349,7 +349,7 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx, | |||
349 | struct survey_info *survey) | 349 | struct survey_info *survey) |
350 | { | 350 | { |
351 | int ret = -EOPNOTSUPP; | 351 | int ret = -EOPNOTSUPP; |
352 | if (local->ops->conf_tx) | 352 | if (local->ops->get_survey) |
353 | ret = local->ops->get_survey(&local->hw, idx, survey); | 353 | ret = local->ops->get_survey(&local->hw, idx, survey); |
354 | /* trace_drv_get_survey(local, idx, survey, ret); */ | 354 | /* trace_drv_get_survey(local, idx, survey, ret); */ |
355 | return ret; | 355 | return ret; |
@@ -371,4 +371,15 @@ static inline void drv_flush(struct ieee80211_local *local, bool drop) | |||
371 | if (local->ops->flush) | 371 | if (local->ops->flush) |
372 | local->ops->flush(&local->hw, drop); | 372 | local->ops->flush(&local->hw, drop); |
373 | } | 373 | } |
374 | |||
375 | static inline void drv_channel_switch(struct ieee80211_local *local, | ||
376 | struct ieee80211_channel_switch *ch_switch) | ||
377 | { | ||
378 | might_sleep(); | ||
379 | |||
380 | local->ops->channel_switch(&local->hw, ch_switch); | ||
381 | |||
382 | trace_drv_channel_switch(local, ch_switch); | ||
383 | } | ||
384 | |||
374 | #endif /* __MAC80211_DRIVER_OPS */ | 385 | #endif /* __MAC80211_DRIVER_OPS */ |
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index ce734b58d07a..6a9b2342a9c2 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h | |||
@@ -774,6 +774,34 @@ TRACE_EVENT(drv_flush, | |||
774 | ) | 774 | ) |
775 | ); | 775 | ); |
776 | 776 | ||
777 | TRACE_EVENT(drv_channel_switch, | ||
778 | TP_PROTO(struct ieee80211_local *local, | ||
779 | struct ieee80211_channel_switch *ch_switch), | ||
780 | |||
781 | TP_ARGS(local, ch_switch), | ||
782 | |||
783 | TP_STRUCT__entry( | ||
784 | LOCAL_ENTRY | ||
785 | __field(u64, timestamp) | ||
786 | __field(bool, block_tx) | ||
787 | __field(u16, freq) | ||
788 | __field(u8, count) | ||
789 | ), | ||
790 | |||
791 | TP_fast_assign( | ||
792 | LOCAL_ASSIGN; | ||
793 | __entry->timestamp = ch_switch->timestamp; | ||
794 | __entry->block_tx = ch_switch->block_tx; | ||
795 | __entry->freq = ch_switch->channel->center_freq; | ||
796 | __entry->count = ch_switch->count; | ||
797 | ), | ||
798 | |||
799 | TP_printk( | ||
800 | LOCAL_PR_FMT " new freq:%u count:%d", | ||
801 | LOCAL_PR_ARG, __entry->freq, __entry->count | ||
802 | ) | ||
803 | ); | ||
804 | |||
777 | /* | 805 | /* |
778 | * Tracing for API calls that drivers call. | 806 | * Tracing for API calls that drivers call. |
779 | */ | 807 | */ |
@@ -992,6 +1020,27 @@ TRACE_EVENT(api_sta_block_awake, | |||
992 | ) | 1020 | ) |
993 | ); | 1021 | ); |
994 | 1022 | ||
1023 | TRACE_EVENT(api_chswitch_done, | ||
1024 | TP_PROTO(struct ieee80211_sub_if_data *sdata, bool success), | ||
1025 | |||
1026 | TP_ARGS(sdata, success), | ||
1027 | |||
1028 | TP_STRUCT__entry( | ||
1029 | VIF_ENTRY | ||
1030 | __field(bool, success) | ||
1031 | ), | ||
1032 | |||
1033 | TP_fast_assign( | ||
1034 | VIF_ASSIGN; | ||
1035 | __entry->success = success; | ||
1036 | ), | ||
1037 | |||
1038 | TP_printk( | ||
1039 | VIF_PR_FMT " success=%d", | ||
1040 | VIF_PR_ARG, __entry->success | ||
1041 | ) | ||
1042 | ); | ||
1043 | |||
995 | /* | 1044 | /* |
996 | * Tracing for internal functions | 1045 | * Tracing for internal functions |
997 | * (which may also be called in response to driver calls) | 1046 | * (which may also be called in response to driver calls) |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index b72ee6435fa3..b2cc1fda6cfd 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -103,7 +103,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
103 | sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; | 103 | sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; |
104 | 104 | ||
105 | local->oper_channel = chan; | 105 | local->oper_channel = chan; |
106 | local->oper_channel_type = NL80211_CHAN_NO_HT; | 106 | WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT)); |
107 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | 107 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); |
108 | 108 | ||
109 | sband = local->hw.wiphy->bands[chan->band]; | 109 | sband = local->hw.wiphy->bands[chan->band]; |
@@ -911,7 +911,8 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
911 | /* fix ourselves to that channel now already */ | 911 | /* fix ourselves to that channel now already */ |
912 | if (params->channel_fixed) { | 912 | if (params->channel_fixed) { |
913 | sdata->local->oper_channel = params->channel; | 913 | sdata->local->oper_channel = params->channel; |
914 | sdata->local->oper_channel_type = NL80211_CHAN_NO_HT; | 914 | WARN_ON(!ieee80211_set_channel_type(sdata->local, sdata, |
915 | NL80211_CHAN_NO_HT)); | ||
915 | } | 916 | } |
916 | 917 | ||
917 | if (params->ie) { | 918 | if (params->ie) { |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index cbaf4981e110..1a9e2da37a93 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -767,7 +767,7 @@ struct ieee80211_local { | |||
767 | enum mac80211_scan_state next_scan_state; | 767 | enum mac80211_scan_state next_scan_state; |
768 | struct delayed_work scan_work; | 768 | struct delayed_work scan_work; |
769 | struct ieee80211_sub_if_data *scan_sdata; | 769 | struct ieee80211_sub_if_data *scan_sdata; |
770 | enum nl80211_channel_type oper_channel_type; | 770 | enum nl80211_channel_type _oper_channel_type; |
771 | struct ieee80211_channel *oper_channel, *csa_channel; | 771 | struct ieee80211_channel *oper_channel, *csa_channel; |
772 | 772 | ||
773 | /* Temporary remain-on-channel for off-channel operations */ | 773 | /* Temporary remain-on-channel for off-channel operations */ |
@@ -998,7 +998,8 @@ int ieee80211_max_network_latency(struct notifier_block *nb, | |||
998 | unsigned long data, void *dummy); | 998 | unsigned long data, void *dummy); |
999 | void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | 999 | void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
1000 | struct ieee80211_channel_sw_ie *sw_elem, | 1000 | struct ieee80211_channel_sw_ie *sw_elem, |
1001 | struct ieee80211_bss *bss); | 1001 | struct ieee80211_bss *bss, |
1002 | u64 timestamp); | ||
1002 | void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); | 1003 | void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); |
1003 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); | 1004 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); |
1004 | 1005 | ||
@@ -1228,6 +1229,20 @@ int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata, | |||
1228 | int ieee80211_wk_cancel_remain_on_channel( | 1229 | int ieee80211_wk_cancel_remain_on_channel( |
1229 | struct ieee80211_sub_if_data *sdata, u64 cookie); | 1230 | struct ieee80211_sub_if_data *sdata, u64 cookie); |
1230 | 1231 | ||
1232 | /* channel management */ | ||
1233 | enum ieee80211_chan_mode { | ||
1234 | CHAN_MODE_UNDEFINED, | ||
1235 | CHAN_MODE_HOPPING, | ||
1236 | CHAN_MODE_FIXED, | ||
1237 | }; | ||
1238 | |||
1239 | enum ieee80211_chan_mode | ||
1240 | ieee80211_get_channel_mode(struct ieee80211_local *local, | ||
1241 | struct ieee80211_sub_if_data *ignore); | ||
1242 | bool ieee80211_set_channel_type(struct ieee80211_local *local, | ||
1243 | struct ieee80211_sub_if_data *sdata, | ||
1244 | enum nl80211_channel_type chantype); | ||
1245 | |||
1231 | #ifdef CONFIG_MAC80211_NOINLINE | 1246 | #ifdef CONFIG_MAC80211_NOINLINE |
1232 | #define debug_noinline noinline | 1247 | #define debug_noinline noinline |
1233 | #else | 1248 | #else |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 8d4b41787dcf..e8f6e3b252d8 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -140,7 +140,6 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
140 | struct ieee80211_sub_if_data, | 140 | struct ieee80211_sub_if_data, |
141 | u.ap); | 141 | u.ap); |
142 | 142 | ||
143 | key->conf.ap_addr = sdata->dev->dev_addr; | ||
144 | ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); | 143 | ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); |
145 | 144 | ||
146 | if (!ret) { | 145 | if (!ret) { |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index bd632e1ee2c5..22a384dfab65 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -111,7 +111,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) | |||
111 | channel_type = local->tmp_channel_type; | 111 | channel_type = local->tmp_channel_type; |
112 | } else { | 112 | } else { |
113 | chan = local->oper_channel; | 113 | chan = local->oper_channel; |
114 | channel_type = local->oper_channel_type; | 114 | channel_type = local->_oper_channel_type; |
115 | } | 115 | } |
116 | 116 | ||
117 | if (chan != local->hw.conf.channel || | 117 | if (chan != local->hw.conf.channel || |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 7e93524459fc..bde81031727a 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -287,8 +287,6 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) | |||
287 | *pos++ |= sdata->u.mesh.accepting_plinks ? | 287 | *pos++ |= sdata->u.mesh.accepting_plinks ? |
288 | MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; | 288 | MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; |
289 | *pos++ = 0x00; | 289 | *pos++ = 0x00; |
290 | |||
291 | return; | ||
292 | } | 290 | } |
293 | 291 | ||
294 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) | 292 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index d89ed7f2592b..0705018d8d1e 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -624,7 +624,6 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, | |||
624 | fail: | 624 | fail: |
625 | rcu_read_unlock(); | 625 | rcu_read_unlock(); |
626 | sdata->u.mesh.mshstats.dropped_frames_no_route++; | 626 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
627 | return; | ||
628 | } | 627 | } |
629 | 628 | ||
630 | static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, | 629 | static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 358226f63b81..f803f8b72a93 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -137,11 +137,14 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, | |||
137 | struct sta_info *sta; | 137 | struct sta_info *sta; |
138 | u32 changed = 0; | 138 | u32 changed = 0; |
139 | u16 ht_opmode; | 139 | u16 ht_opmode; |
140 | bool enable_ht = true, ht_changed; | 140 | bool enable_ht = true; |
141 | enum nl80211_channel_type prev_chantype; | ||
141 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | 142 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; |
142 | 143 | ||
143 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 144 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
144 | 145 | ||
146 | prev_chantype = sdata->vif.bss_conf.channel_type; | ||
147 | |||
145 | /* HT is not supported */ | 148 | /* HT is not supported */ |
146 | if (!sband->ht_cap.ht_supported) | 149 | if (!sband->ht_cap.ht_supported) |
147 | enable_ht = false; | 150 | enable_ht = false; |
@@ -172,38 +175,37 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, | |||
172 | } | 175 | } |
173 | } | 176 | } |
174 | 177 | ||
175 | ht_changed = conf_is_ht(&local->hw.conf) != enable_ht || | ||
176 | channel_type != local->hw.conf.channel_type; | ||
177 | |||
178 | if (local->tmp_channel) | 178 | if (local->tmp_channel) |
179 | local->tmp_channel_type = channel_type; | 179 | local->tmp_channel_type = channel_type; |
180 | local->oper_channel_type = channel_type; | ||
181 | 180 | ||
182 | if (ht_changed) { | 181 | if (!ieee80211_set_channel_type(local, sdata, channel_type)) { |
183 | /* channel_type change automatically detected */ | 182 | /* can only fail due to HT40+/- mismatch */ |
184 | ieee80211_hw_config(local, 0); | 183 | channel_type = NL80211_CHAN_HT20; |
184 | WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); | ||
185 | } | ||
185 | 186 | ||
187 | /* channel_type change automatically detected */ | ||
188 | ieee80211_hw_config(local, 0); | ||
189 | |||
190 | if (prev_chantype != channel_type) { | ||
186 | rcu_read_lock(); | 191 | rcu_read_lock(); |
187 | sta = sta_info_get(sdata, bssid); | 192 | sta = sta_info_get(sdata, bssid); |
188 | if (sta) | 193 | if (sta) |
189 | rate_control_rate_update(local, sband, sta, | 194 | rate_control_rate_update(local, sband, sta, |
190 | IEEE80211_RC_HT_CHANGED, | 195 | IEEE80211_RC_HT_CHANGED, |
191 | local->oper_channel_type); | 196 | channel_type); |
192 | rcu_read_unlock(); | 197 | rcu_read_unlock(); |
193 | } | 198 | } |
194 | |||
195 | /* disable HT */ | ||
196 | if (!enable_ht) | ||
197 | return 0; | ||
198 | 199 | ||
199 | ht_opmode = le16_to_cpu(hti->operation_mode); | 200 | ht_opmode = le16_to_cpu(hti->operation_mode); |
200 | 201 | ||
201 | /* if bss configuration changed store the new one */ | 202 | /* if bss configuration changed store the new one */ |
202 | if (!sdata->ht_opmode_valid || | 203 | if (sdata->ht_opmode_valid != enable_ht || |
203 | sdata->vif.bss_conf.ht_operation_mode != ht_opmode) { | 204 | sdata->vif.bss_conf.ht_operation_mode != ht_opmode || |
205 | prev_chantype != channel_type) { | ||
204 | changed |= BSS_CHANGED_HT; | 206 | changed |= BSS_CHANGED_HT; |
205 | sdata->vif.bss_conf.ht_operation_mode = ht_opmode; | 207 | sdata->vif.bss_conf.ht_operation_mode = ht_opmode; |
206 | sdata->ht_opmode_valid = true; | 208 | sdata->ht_opmode_valid = enable_ht; |
207 | } | 209 | } |
208 | 210 | ||
209 | return changed; | 211 | return changed; |
@@ -340,7 +342,11 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
340 | goto out; | 342 | goto out; |
341 | 343 | ||
342 | sdata->local->oper_channel = sdata->local->csa_channel; | 344 | sdata->local->oper_channel = sdata->local->csa_channel; |
343 | ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL); | 345 | if (!sdata->local->ops->channel_switch) { |
346 | /* call "hw_config" only if doing sw channel switch */ | ||
347 | ieee80211_hw_config(sdata->local, | ||
348 | IEEE80211_CONF_CHANGE_CHANNEL); | ||
349 | } | ||
344 | 350 | ||
345 | /* XXX: shouldn't really modify cfg80211-owned data! */ | 351 | /* XXX: shouldn't really modify cfg80211-owned data! */ |
346 | ifmgd->associated->channel = sdata->local->oper_channel; | 352 | ifmgd->associated->channel = sdata->local->oper_channel; |
@@ -352,6 +358,29 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
352 | mutex_unlock(&ifmgd->mtx); | 358 | mutex_unlock(&ifmgd->mtx); |
353 | } | 359 | } |
354 | 360 | ||
361 | void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) | ||
362 | { | ||
363 | struct ieee80211_sub_if_data *sdata; | ||
364 | struct ieee80211_if_managed *ifmgd; | ||
365 | |||
366 | sdata = vif_to_sdata(vif); | ||
367 | ifmgd = &sdata->u.mgd; | ||
368 | |||
369 | trace_api_chswitch_done(sdata, success); | ||
370 | if (!success) { | ||
371 | /* | ||
372 | * If the channel switch was not successful, stay | ||
373 | * around on the old channel. We currently lack | ||
374 | * good handling of this situation, possibly we | ||
375 | * should just drop the association. | ||
376 | */ | ||
377 | sdata->local->csa_channel = sdata->local->oper_channel; | ||
378 | } | ||
379 | |||
380 | ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); | ||
381 | } | ||
382 | EXPORT_SYMBOL(ieee80211_chswitch_done); | ||
383 | |||
355 | static void ieee80211_chswitch_timer(unsigned long data) | 384 | static void ieee80211_chswitch_timer(unsigned long data) |
356 | { | 385 | { |
357 | struct ieee80211_sub_if_data *sdata = | 386 | struct ieee80211_sub_if_data *sdata = |
@@ -368,7 +397,8 @@ static void ieee80211_chswitch_timer(unsigned long data) | |||
368 | 397 | ||
369 | void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | 398 | void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
370 | struct ieee80211_channel_sw_ie *sw_elem, | 399 | struct ieee80211_channel_sw_ie *sw_elem, |
371 | struct ieee80211_bss *bss) | 400 | struct ieee80211_bss *bss, |
401 | u64 timestamp) | ||
372 | { | 402 | { |
373 | struct cfg80211_bss *cbss = | 403 | struct cfg80211_bss *cbss = |
374 | container_of((void *)bss, struct cfg80211_bss, priv); | 404 | container_of((void *)bss, struct cfg80211_bss, priv); |
@@ -396,10 +426,29 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
396 | 426 | ||
397 | sdata->local->csa_channel = new_ch; | 427 | sdata->local->csa_channel = new_ch; |
398 | 428 | ||
429 | if (sdata->local->ops->channel_switch) { | ||
430 | /* use driver's channel switch callback */ | ||
431 | struct ieee80211_channel_switch ch_switch; | ||
432 | memset(&ch_switch, 0, sizeof(ch_switch)); | ||
433 | ch_switch.timestamp = timestamp; | ||
434 | if (sw_elem->mode) { | ||
435 | ch_switch.block_tx = true; | ||
436 | ieee80211_stop_queues_by_reason(&sdata->local->hw, | ||
437 | IEEE80211_QUEUE_STOP_REASON_CSA); | ||
438 | } | ||
439 | ch_switch.channel = new_ch; | ||
440 | ch_switch.count = sw_elem->count; | ||
441 | ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; | ||
442 | drv_channel_switch(sdata->local, &ch_switch); | ||
443 | return; | ||
444 | } | ||
445 | |||
446 | /* channel switch handled in software */ | ||
399 | if (sw_elem->count <= 1) { | 447 | if (sw_elem->count <= 1) { |
400 | ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); | 448 | ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); |
401 | } else { | 449 | } else { |
402 | ieee80211_stop_queues_by_reason(&sdata->local->hw, | 450 | if (sw_elem->mode) |
451 | ieee80211_stop_queues_by_reason(&sdata->local->hw, | ||
403 | IEEE80211_QUEUE_STOP_REASON_CSA); | 452 | IEEE80211_QUEUE_STOP_REASON_CSA); |
404 | ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; | 453 | ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; |
405 | mod_timer(&ifmgd->chswitch_timer, | 454 | mod_timer(&ifmgd->chswitch_timer, |
@@ -507,7 +556,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) | |||
507 | s32 beaconint_us; | 556 | s32 beaconint_us; |
508 | 557 | ||
509 | if (latency < 0) | 558 | if (latency < 0) |
510 | latency = pm_qos_requirement(PM_QOS_NETWORK_LATENCY); | 559 | latency = pm_qos_request(PM_QOS_NETWORK_LATENCY); |
511 | 560 | ||
512 | beaconint_us = ieee80211_tu_to_usec( | 561 | beaconint_us = ieee80211_tu_to_usec( |
513 | found->vif.bss_conf.beacon_int); | 562 | found->vif.bss_conf.beacon_int); |
@@ -866,7 +915,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
866 | ieee80211_set_wmm_default(sdata); | 915 | ieee80211_set_wmm_default(sdata); |
867 | 916 | ||
868 | /* channel(_type) changes are handled by ieee80211_hw_config */ | 917 | /* channel(_type) changes are handled by ieee80211_hw_config */ |
869 | local->oper_channel_type = NL80211_CHAN_NO_HT; | 918 | WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT)); |
870 | 919 | ||
871 | /* on the next assoc, re-program HT parameters */ | 920 | /* on the next assoc, re-program HT parameters */ |
872 | sdata->ht_opmode_valid = false; | 921 | sdata->ht_opmode_valid = false; |
@@ -883,8 +932,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
883 | 932 | ||
884 | ieee80211_hw_config(local, config_changed); | 933 | ieee80211_hw_config(local, config_changed); |
885 | 934 | ||
886 | /* And the BSSID changed -- not very interesting here */ | 935 | /* The BSSID (not really interesting) and HT changed */ |
887 | changed |= BSS_CHANGED_BSSID; | 936 | changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; |
888 | ieee80211_bss_info_change_notify(sdata, changed); | 937 | ieee80211_bss_info_change_notify(sdata, changed); |
889 | 938 | ||
890 | if (remove_sta) | 939 | if (remove_sta) |
@@ -1315,7 +1364,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
1315 | ETH_ALEN) == 0)) { | 1364 | ETH_ALEN) == 0)) { |
1316 | struct ieee80211_channel_sw_ie *sw_elem = | 1365 | struct ieee80211_channel_sw_ie *sw_elem = |
1317 | (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; | 1366 | (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; |
1318 | ieee80211_sta_process_chanswitch(sdata, sw_elem, bss); | 1367 | ieee80211_sta_process_chanswitch(sdata, sw_elem, |
1368 | bss, rx_status->mactime); | ||
1319 | } | 1369 | } |
1320 | } | 1370 | } |
1321 | 1371 | ||
@@ -1642,13 +1692,52 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1642 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); | 1692 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); |
1643 | break; | 1693 | break; |
1644 | case IEEE80211_STYPE_ACTION: | 1694 | case IEEE80211_STYPE_ACTION: |
1645 | if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) | 1695 | switch (mgmt->u.action.category) { |
1696 | case WLAN_CATEGORY_BACK: { | ||
1697 | struct ieee80211_local *local = sdata->local; | ||
1698 | int len = skb->len; | ||
1699 | struct sta_info *sta; | ||
1700 | |||
1701 | rcu_read_lock(); | ||
1702 | sta = sta_info_get(sdata, mgmt->sa); | ||
1703 | if (!sta) { | ||
1704 | rcu_read_unlock(); | ||
1705 | break; | ||
1706 | } | ||
1707 | |||
1708 | local_bh_disable(); | ||
1709 | |||
1710 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
1711 | case WLAN_ACTION_ADDBA_REQ: | ||
1712 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1713 | sizeof(mgmt->u.action.u.addba_req))) | ||
1714 | break; | ||
1715 | ieee80211_process_addba_request(local, sta, mgmt, len); | ||
1716 | break; | ||
1717 | case WLAN_ACTION_ADDBA_RESP: | ||
1718 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1719 | sizeof(mgmt->u.action.u.addba_resp))) | ||
1720 | break; | ||
1721 | ieee80211_process_addba_resp(local, sta, mgmt, len); | ||
1722 | break; | ||
1723 | case WLAN_ACTION_DELBA: | ||
1724 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1725 | sizeof(mgmt->u.action.u.delba))) | ||
1726 | break; | ||
1727 | ieee80211_process_delba(sdata, sta, mgmt, len); | ||
1728 | break; | ||
1729 | } | ||
1730 | local_bh_enable(); | ||
1731 | rcu_read_unlock(); | ||
1646 | break; | 1732 | break; |
1647 | 1733 | } | |
1648 | ieee80211_sta_process_chanswitch(sdata, | 1734 | case WLAN_CATEGORY_SPECTRUM_MGMT: |
1649 | &mgmt->u.action.u.chan_switch.sw_elem, | 1735 | ieee80211_sta_process_chanswitch(sdata, |
1650 | (void *)ifmgd->associated->priv); | 1736 | &mgmt->u.action.u.chan_switch.sw_elem, |
1651 | break; | 1737 | (void *)ifmgd->associated->priv, |
1738 | rx_status->mactime); | ||
1739 | break; | ||
1740 | } | ||
1652 | } | 1741 | } |
1653 | mutex_unlock(&ifmgd->mtx); | 1742 | mutex_unlock(&ifmgd->mtx); |
1654 | 1743 | ||
@@ -1671,9 +1760,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1671 | mutex_unlock(&ifmgd->mtx); | 1760 | mutex_unlock(&ifmgd->mtx); |
1672 | 1761 | ||
1673 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && | 1762 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && |
1674 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) | 1763 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) { |
1675 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | 1764 | struct ieee80211_local *local = sdata->local; |
1765 | struct ieee80211_work *wk; | ||
1766 | |||
1767 | mutex_lock(&local->work_mtx); | ||
1768 | list_for_each_entry(wk, &local->work_list, list) { | ||
1769 | if (wk->sdata != sdata) | ||
1770 | continue; | ||
1771 | |||
1772 | if (wk->type != IEEE80211_WORK_ASSOC) | ||
1773 | continue; | ||
1774 | |||
1775 | if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN)) | ||
1776 | continue; | ||
1777 | if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN)) | ||
1778 | continue; | ||
1676 | 1779 | ||
1780 | /* | ||
1781 | * Printing the message only here means we can't | ||
1782 | * spuriously print it, but it also means that it | ||
1783 | * won't be printed when the frame comes in before | ||
1784 | * we even tried to associate or in similar cases. | ||
1785 | * | ||
1786 | * Ultimately, I suspect cfg80211 should print the | ||
1787 | * messages instead. | ||
1788 | */ | ||
1789 | printk(KERN_DEBUG | ||
1790 | "%s: deauthenticated from %pM (Reason: %u)\n", | ||
1791 | sdata->name, mgmt->bssid, | ||
1792 | le16_to_cpu(mgmt->u.deauth.reason_code)); | ||
1793 | |||
1794 | list_del_rcu(&wk->list); | ||
1795 | free_work(wk); | ||
1796 | break; | ||
1797 | } | ||
1798 | mutex_unlock(&local->work_mtx); | ||
1799 | |||
1800 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | ||
1801 | } | ||
1677 | out: | 1802 | out: |
1678 | kfree_skb(skb); | 1803 | kfree_skb(skb); |
1679 | } | 1804 | } |
@@ -2176,7 +2301,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
2176 | continue; | 2301 | continue; |
2177 | 2302 | ||
2178 | if (wk->type != IEEE80211_WORK_DIRECT_PROBE && | 2303 | if (wk->type != IEEE80211_WORK_DIRECT_PROBE && |
2179 | wk->type != IEEE80211_WORK_AUTH) | 2304 | wk->type != IEEE80211_WORK_AUTH && |
2305 | wk->type != IEEE80211_WORK_ASSOC) | ||
2180 | continue; | 2306 | continue; |
2181 | 2307 | ||
2182 | if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) | 2308 | if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) |
@@ -2266,7 +2392,7 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, | |||
2266 | if ((chan != local->tmp_channel || | 2392 | if ((chan != local->tmp_channel || |
2267 | channel_type != local->tmp_channel_type) && | 2393 | channel_type != local->tmp_channel_type) && |
2268 | (chan != local->oper_channel || | 2394 | (chan != local->oper_channel || |
2269 | channel_type != local->oper_channel_type)) | 2395 | channel_type != local->_oper_channel_type)) |
2270 | return -EBUSY; | 2396 | return -EBUSY; |
2271 | 2397 | ||
2272 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); | 2398 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 9a08f2c446c6..be9abc2e6348 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1253,6 +1253,12 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1253 | if (skb_linearize(rx->skb)) | 1253 | if (skb_linearize(rx->skb)) |
1254 | return RX_DROP_UNUSABLE; | 1254 | return RX_DROP_UNUSABLE; |
1255 | 1255 | ||
1256 | /* | ||
1257 | * skb_linearize() might change the skb->data and | ||
1258 | * previously cached variables (in this case, hdr) need to | ||
1259 | * be refreshed with the new data. | ||
1260 | */ | ||
1261 | hdr = (struct ieee80211_hdr *)rx->skb->data; | ||
1256 | seq = (sc & IEEE80211_SCTL_SEQ) >> 4; | 1262 | seq = (sc & IEEE80211_SCTL_SEQ) >> 4; |
1257 | 1263 | ||
1258 | if (frag == 0) { | 1264 | if (frag == 0) { |
@@ -1812,17 +1818,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1812 | return RX_CONTINUE; | 1818 | return RX_CONTINUE; |
1813 | 1819 | ||
1814 | if (ieee80211_is_back_req(bar->frame_control)) { | 1820 | if (ieee80211_is_back_req(bar->frame_control)) { |
1821 | struct { | ||
1822 | __le16 control, start_seq_num; | ||
1823 | } __packed bar_data; | ||
1824 | |||
1815 | if (!rx->sta) | 1825 | if (!rx->sta) |
1816 | return RX_DROP_MONITOR; | 1826 | return RX_DROP_MONITOR; |
1827 | |||
1828 | if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), | ||
1829 | &bar_data, sizeof(bar_data))) | ||
1830 | return RX_DROP_MONITOR; | ||
1831 | |||
1817 | spin_lock(&rx->sta->lock); | 1832 | spin_lock(&rx->sta->lock); |
1818 | tid = le16_to_cpu(bar->control) >> 12; | 1833 | tid = le16_to_cpu(bar_data.control) >> 12; |
1819 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { | 1834 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { |
1820 | spin_unlock(&rx->sta->lock); | 1835 | spin_unlock(&rx->sta->lock); |
1821 | return RX_DROP_MONITOR; | 1836 | return RX_DROP_MONITOR; |
1822 | } | 1837 | } |
1823 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; | 1838 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; |
1824 | 1839 | ||
1825 | start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; | 1840 | start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; |
1826 | 1841 | ||
1827 | /* reset session timer */ | 1842 | /* reset session timer */ |
1828 | if (tid_agg_rx->timeout) | 1843 | if (tid_agg_rx->timeout) |
@@ -1929,6 +1944,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1929 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | 1944 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) |
1930 | break; | 1945 | break; |
1931 | 1946 | ||
1947 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
1948 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | ||
1949 | |||
1932 | switch (mgmt->u.action.u.addba_req.action_code) { | 1950 | switch (mgmt->u.action.u.addba_req.action_code) { |
1933 | case WLAN_ACTION_ADDBA_REQ: | 1951 | case WLAN_ACTION_ADDBA_REQ: |
1934 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1952 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index e14c44195ae9..e1b0be7a57b9 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -510,7 +510,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
510 | bad_latency = time_after(jiffies + | 510 | bad_latency = time_after(jiffies + |
511 | ieee80211_scan_get_channel_time(next_chan), | 511 | ieee80211_scan_get_channel_time(next_chan), |
512 | local->leave_oper_channel_time + | 512 | local->leave_oper_channel_time + |
513 | usecs_to_jiffies(pm_qos_requirement(PM_QOS_NETWORK_LATENCY))); | 513 | usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY))); |
514 | 514 | ||
515 | listen_int_exceeded = time_after(jiffies + | 515 | listen_int_exceeded = time_after(jiffies + |
516 | ieee80211_scan_get_channel_time(next_chan), | 516 | ieee80211_scan_get_channel_time(next_chan), |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 730197591ab5..ba9360a475b0 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -259,7 +259,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
259 | skb_queue_head_init(&sta->tx_filtered); | 259 | skb_queue_head_init(&sta->tx_filtered); |
260 | 260 | ||
261 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) | 261 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) |
262 | sta->last_seq_ctrl[i] = cpu_to_le16(USHORT_MAX); | 262 | sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); |
263 | 263 | ||
264 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 264 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
265 | printk(KERN_DEBUG "%s: Allocated STA %pM\n", | 265 | printk(KERN_DEBUG "%s: Allocated STA %pM\n", |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 48a5e80957f0..df9d45544ca5 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -145,7 +145,7 @@ enum plink_state { | |||
145 | /** | 145 | /** |
146 | * struct sta_ampdu_mlme - STA aggregation information. | 146 | * struct sta_ampdu_mlme - STA aggregation information. |
147 | * | 147 | * |
148 | * @tid_state_rx: TID's state in Rx session state machine. | 148 | * @tid_active_rx: TID's state in Rx session state machine. |
149 | * @tid_rx: aggregation info for Rx per TID | 149 | * @tid_rx: aggregation info for Rx per TID |
150 | * @tid_state_tx: TID's state in Tx session state machine. | 150 | * @tid_state_tx: TID's state in Tx session state machine. |
151 | * @tid_tx: aggregation info for Tx per TID | 151 | * @tid_tx: aggregation info for Tx per TID |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f3841f43249e..680bcb7093db 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -2251,8 +2251,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, | |||
2251 | 2251 | ||
2252 | info->control.vif = vif; | 2252 | info->control.vif = vif; |
2253 | 2253 | ||
2254 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; | 2254 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT | |
2255 | info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; | 2255 | IEEE80211_TX_CTL_ASSIGN_SEQ | |
2256 | IEEE80211_TX_CTL_FIRST_FRAGMENT; | ||
2256 | out: | 2257 | out: |
2257 | rcu_read_unlock(); | 2258 | rcu_read_unlock(); |
2258 | return skb; | 2259 | return skb; |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 2b75b4fb68f4..5b79d552780a 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1160,18 +1160,33 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1160 | 1160 | ||
1161 | /* Finally also reconfigure all the BSS information */ | 1161 | /* Finally also reconfigure all the BSS information */ |
1162 | list_for_each_entry(sdata, &local->interfaces, list) { | 1162 | list_for_each_entry(sdata, &local->interfaces, list) { |
1163 | u32 changed = ~0; | 1163 | u32 changed; |
1164 | |||
1164 | if (!ieee80211_sdata_running(sdata)) | 1165 | if (!ieee80211_sdata_running(sdata)) |
1165 | continue; | 1166 | continue; |
1167 | |||
1168 | /* common change flags for all interface types */ | ||
1169 | changed = BSS_CHANGED_ERP_CTS_PROT | | ||
1170 | BSS_CHANGED_ERP_PREAMBLE | | ||
1171 | BSS_CHANGED_ERP_SLOT | | ||
1172 | BSS_CHANGED_HT | | ||
1173 | BSS_CHANGED_BASIC_RATES | | ||
1174 | BSS_CHANGED_BEACON_INT | | ||
1175 | BSS_CHANGED_BSSID | | ||
1176 | BSS_CHANGED_CQM; | ||
1177 | |||
1166 | switch (sdata->vif.type) { | 1178 | switch (sdata->vif.type) { |
1167 | case NL80211_IFTYPE_STATION: | 1179 | case NL80211_IFTYPE_STATION: |
1168 | /* disable beacon change bits */ | 1180 | changed |= BSS_CHANGED_ASSOC; |
1169 | changed &= ~(BSS_CHANGED_BEACON | | 1181 | ieee80211_bss_info_change_notify(sdata, changed); |
1170 | BSS_CHANGED_BEACON_ENABLED); | 1182 | break; |
1171 | /* fall through */ | ||
1172 | case NL80211_IFTYPE_ADHOC: | 1183 | case NL80211_IFTYPE_ADHOC: |
1184 | changed |= BSS_CHANGED_IBSS; | ||
1185 | /* fall through */ | ||
1173 | case NL80211_IFTYPE_AP: | 1186 | case NL80211_IFTYPE_AP: |
1174 | case NL80211_IFTYPE_MESH_POINT: | 1187 | case NL80211_IFTYPE_MESH_POINT: |
1188 | changed |= BSS_CHANGED_BEACON | | ||
1189 | BSS_CHANGED_BEACON_ENABLED; | ||
1175 | ieee80211_bss_info_change_notify(sdata, changed); | 1190 | ieee80211_bss_info_change_notify(sdata, changed); |
1176 | break; | 1191 | break; |
1177 | case NL80211_IFTYPE_WDS: | 1192 | case NL80211_IFTYPE_WDS: |
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index 3dd07600199d..be3d4a698692 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #define IEEE80211_MAX_PROBE_TRIES 5 | 33 | #define IEEE80211_MAX_PROBE_TRIES 5 |
34 | 34 | ||
35 | enum work_action { | 35 | enum work_action { |
36 | WORK_ACT_MISMATCH, | ||
36 | WORK_ACT_NONE, | 37 | WORK_ACT_NONE, |
37 | WORK_ACT_TIMEOUT, | 38 | WORK_ACT_TIMEOUT, |
38 | WORK_ACT_DONE, | 39 | WORK_ACT_DONE, |
@@ -585,7 +586,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_work *wk, | |||
585 | u16 auth_alg, auth_transaction, status_code; | 586 | u16 auth_alg, auth_transaction, status_code; |
586 | 587 | ||
587 | if (wk->type != IEEE80211_WORK_AUTH) | 588 | if (wk->type != IEEE80211_WORK_AUTH) |
588 | return WORK_ACT_NONE; | 589 | return WORK_ACT_MISMATCH; |
589 | 590 | ||
590 | if (len < 24 + 6) | 591 | if (len < 24 + 6) |
591 | return WORK_ACT_NONE; | 592 | return WORK_ACT_NONE; |
@@ -636,6 +637,9 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk, | |||
636 | struct ieee802_11_elems elems; | 637 | struct ieee802_11_elems elems; |
637 | u8 *pos; | 638 | u8 *pos; |
638 | 639 | ||
640 | if (wk->type != IEEE80211_WORK_ASSOC) | ||
641 | return WORK_ACT_MISMATCH; | ||
642 | |||
639 | /* | 643 | /* |
640 | * AssocResp and ReassocResp have identical structure, so process both | 644 | * AssocResp and ReassocResp have identical structure, so process both |
641 | * of them in this function. | 645 | * of them in this function. |
@@ -691,6 +695,12 @@ ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk, | |||
691 | 695 | ||
692 | ASSERT_WORK_MTX(local); | 696 | ASSERT_WORK_MTX(local); |
693 | 697 | ||
698 | if (wk->type != IEEE80211_WORK_DIRECT_PROBE) | ||
699 | return WORK_ACT_MISMATCH; | ||
700 | |||
701 | if (len < 24 + 12) | ||
702 | return WORK_ACT_NONE; | ||
703 | |||
694 | baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; | 704 | baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; |
695 | if (baselen > len) | 705 | if (baselen > len) |
696 | return WORK_ACT_NONE; | 706 | return WORK_ACT_NONE; |
@@ -705,7 +715,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, | |||
705 | struct ieee80211_rx_status *rx_status; | 715 | struct ieee80211_rx_status *rx_status; |
706 | struct ieee80211_mgmt *mgmt; | 716 | struct ieee80211_mgmt *mgmt; |
707 | struct ieee80211_work *wk; | 717 | struct ieee80211_work *wk; |
708 | enum work_action rma = WORK_ACT_NONE; | 718 | enum work_action rma; |
709 | u16 fc; | 719 | u16 fc; |
710 | 720 | ||
711 | rx_status = (struct ieee80211_rx_status *) skb->cb; | 721 | rx_status = (struct ieee80211_rx_status *) skb->cb; |
@@ -752,7 +762,17 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, | |||
752 | break; | 762 | break; |
753 | default: | 763 | default: |
754 | WARN_ON(1); | 764 | WARN_ON(1); |
765 | rma = WORK_ACT_NONE; | ||
755 | } | 766 | } |
767 | |||
768 | /* | ||
769 | * We've either received an unexpected frame, or we have | ||
770 | * multiple work items and need to match the frame to the | ||
771 | * right one. | ||
772 | */ | ||
773 | if (rma == WORK_ACT_MISMATCH) | ||
774 | continue; | ||
775 | |||
756 | /* | 776 | /* |
757 | * We've processed this frame for that work, so it can't | 777 | * We've processed this frame for that work, so it can't |
758 | * belong to another work struct. | 778 | * belong to another work struct. |
@@ -762,6 +782,9 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, | |||
762 | } | 782 | } |
763 | 783 | ||
764 | switch (rma) { | 784 | switch (rma) { |
785 | case WORK_ACT_MISMATCH: | ||
786 | /* ignore this unmatched frame */ | ||
787 | break; | ||
765 | case WORK_ACT_NONE: | 788 | case WORK_ACT_NONE: |
766 | break; | 789 | break; |
767 | case WORK_ACT_DONE: | 790 | case WORK_ACT_DONE: |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 18d77b5c351a..8593a77cfea9 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -314,8 +314,39 @@ config NETFILTER_XTABLES | |||
314 | 314 | ||
315 | if NETFILTER_XTABLES | 315 | if NETFILTER_XTABLES |
316 | 316 | ||
317 | comment "Xtables combined modules" | ||
318 | |||
319 | config NETFILTER_XT_MARK | ||
320 | tristate 'nfmark target and match support' | ||
321 | default m if NETFILTER_ADVANCED=n | ||
322 | ---help--- | ||
323 | This option adds the "MARK" target and "mark" match. | ||
324 | |||
325 | Netfilter mark matching allows you to match packets based on the | ||
326 | "nfmark" value in the packet. | ||
327 | The target allows you to create rules in the "mangle" table which alter | ||
328 | the netfilter mark (nfmark) field associated with the packet. | ||
329 | |||
330 | Prior to routing, the nfmark can influence the routing method (see | ||
331 | "Use netfilter MARK value as routing key") and can also be used by | ||
332 | other subsystems to change their behavior. | ||
333 | |||
334 | config NETFILTER_XT_CONNMARK | ||
335 | tristate 'ctmark target and match support' | ||
336 | depends on NF_CONNTRACK | ||
337 | depends on NETFILTER_ADVANCED | ||
338 | select NF_CONNTRACK_MARK | ||
339 | ---help--- | ||
340 | This option adds the "CONNMARK" target and "connmark" match. | ||
341 | |||
342 | Netfilter allows you to store a mark value per connection (a.k.a. | ||
343 | ctmark), similarly to the packet mark (nfmark). Using this | ||
344 | target and match, you can set and match on this mark. | ||
345 | |||
317 | # alphabetically ordered list of targets | 346 | # alphabetically ordered list of targets |
318 | 347 | ||
348 | comment "Xtables targets" | ||
349 | |||
319 | config NETFILTER_XT_TARGET_CLASSIFY | 350 | config NETFILTER_XT_TARGET_CLASSIFY |
320 | tristate '"CLASSIFY" target support' | 351 | tristate '"CLASSIFY" target support' |
321 | depends on NETFILTER_ADVANCED | 352 | depends on NETFILTER_ADVANCED |
@@ -332,15 +363,11 @@ config NETFILTER_XT_TARGET_CONNMARK | |||
332 | tristate '"CONNMARK" target support' | 363 | tristate '"CONNMARK" target support' |
333 | depends on NF_CONNTRACK | 364 | depends on NF_CONNTRACK |
334 | depends on NETFILTER_ADVANCED | 365 | depends on NETFILTER_ADVANCED |
335 | select NF_CONNTRACK_MARK | 366 | select NETFILTER_XT_CONNMARK |
336 | help | 367 | ---help--- |
337 | This option adds a `CONNMARK' target, which allows one to manipulate | 368 | This is a backwards-compat option for the user's convenience |
338 | the connection mark value. Similar to the MARK target, but | 369 | (e.g. when running oldconfig). It selects |
339 | affects the connection mark value rather than the packet mark value. | 370 | CONFIG_NETFILTER_XT_CONNMARK (combined connmark/CONNMARK module). |
340 | |||
341 | If you want to compile it as a module, say M here and read | ||
342 | <file:Documentation/kbuild/modules.txt>. The module will be called | ||
343 | ipt_CONNMARK. If unsure, say `N'. | ||
344 | 371 | ||
345 | config NETFILTER_XT_TARGET_CONNSECMARK | 372 | config NETFILTER_XT_TARGET_CONNSECMARK |
346 | tristate '"CONNSECMARK" target support' | 373 | tristate '"CONNSECMARK" target support' |
@@ -423,16 +450,12 @@ config NETFILTER_XT_TARGET_LED | |||
423 | 450 | ||
424 | config NETFILTER_XT_TARGET_MARK | 451 | config NETFILTER_XT_TARGET_MARK |
425 | tristate '"MARK" target support' | 452 | tristate '"MARK" target support' |
426 | default m if NETFILTER_ADVANCED=n | 453 | depends on NETFILTER_ADVANCED |
427 | help | 454 | select NETFILTER_XT_MARK |
428 | This option adds a `MARK' target, which allows you to create rules | 455 | ---help--- |
429 | in the `mangle' table which alter the netfilter mark (nfmark) field | 456 | This is a backwards-compat option for the user's convenience |
430 | associated with the packet prior to routing. This can change | 457 | (e.g. when running oldconfig). It selects |
431 | the routing method (see `Use netfilter MARK value as routing | 458 | CONFIG_NETFILTER_XT_MARK (combined mark/MARK module). |
432 | key') and can also be used by other subsystems to change their | ||
433 | behavior. | ||
434 | |||
435 | To compile it as a module, choose M here. If unsure, say N. | ||
436 | 459 | ||
437 | config NETFILTER_XT_TARGET_NFLOG | 460 | config NETFILTER_XT_TARGET_NFLOG |
438 | tristate '"NFLOG" target support' | 461 | tristate '"NFLOG" target support' |
@@ -479,6 +502,15 @@ config NETFILTER_XT_TARGET_RATEEST | |||
479 | 502 | ||
480 | To compile it as a module, choose M here. If unsure, say N. | 503 | To compile it as a module, choose M here. If unsure, say N. |
481 | 504 | ||
505 | config NETFILTER_XT_TARGET_TEE | ||
506 | tristate '"TEE" - packet cloning to alternate destiantion' | ||
507 | depends on NETFILTER_ADVANCED | ||
508 | depends on (IPV6 || IPV6=n) | ||
509 | depends on !NF_CONNTRACK || NF_CONNTRACK | ||
510 | ---help--- | ||
511 | This option adds a "TEE" target with which a packet can be cloned and | ||
512 | this clone be rerouted to another nexthop. | ||
513 | |||
482 | config NETFILTER_XT_TARGET_TPROXY | 514 | config NETFILTER_XT_TARGET_TPROXY |
483 | tristate '"TPROXY" target support (EXPERIMENTAL)' | 515 | tristate '"TPROXY" target support (EXPERIMENTAL)' |
484 | depends on EXPERIMENTAL | 516 | depends on EXPERIMENTAL |
@@ -552,6 +584,10 @@ config NETFILTER_XT_TARGET_TCPOPTSTRIP | |||
552 | This option adds a "TCPOPTSTRIP" target, which allows you to strip | 584 | This option adds a "TCPOPTSTRIP" target, which allows you to strip |
553 | TCP options from TCP packets. | 585 | TCP options from TCP packets. |
554 | 586 | ||
587 | # alphabetically ordered list of matches | ||
588 | |||
589 | comment "Xtables matches" | ||
590 | |||
555 | config NETFILTER_XT_MATCH_CLUSTER | 591 | config NETFILTER_XT_MATCH_CLUSTER |
556 | tristate '"cluster" match support' | 592 | tristate '"cluster" match support' |
557 | depends on NF_CONNTRACK | 593 | depends on NF_CONNTRACK |
@@ -602,14 +638,11 @@ config NETFILTER_XT_MATCH_CONNMARK | |||
602 | tristate '"connmark" connection mark match support' | 638 | tristate '"connmark" connection mark match support' |
603 | depends on NF_CONNTRACK | 639 | depends on NF_CONNTRACK |
604 | depends on NETFILTER_ADVANCED | 640 | depends on NETFILTER_ADVANCED |
605 | select NF_CONNTRACK_MARK | 641 | select NETFILTER_XT_CONNMARK |
606 | help | 642 | ---help--- |
607 | This option adds a `connmark' match, which allows you to match the | 643 | This is a backwards-compat option for the user's convenience |
608 | connection mark value previously set for the session by `CONNMARK'. | 644 | (e.g. when running oldconfig). It selects |
609 | 645 | CONFIG_NETFILTER_XT_CONNMARK (combined connmark/CONNMARK module). | |
610 | If you want to compile it as a module, say M here and read | ||
611 | <file:Documentation/kbuild/modules.txt>. The module will be called | ||
612 | ipt_connmark. If unsure, say `N'. | ||
613 | 646 | ||
614 | config NETFILTER_XT_MATCH_CONNTRACK | 647 | config NETFILTER_XT_MATCH_CONNTRACK |
615 | tristate '"conntrack" connection tracking match support' | 648 | tristate '"conntrack" connection tracking match support' |
@@ -733,13 +766,12 @@ config NETFILTER_XT_MATCH_MAC | |||
733 | 766 | ||
734 | config NETFILTER_XT_MATCH_MARK | 767 | config NETFILTER_XT_MATCH_MARK |
735 | tristate '"mark" match support' | 768 | tristate '"mark" match support' |
736 | default m if NETFILTER_ADVANCED=n | 769 | depends on NETFILTER_ADVANCED |
737 | help | 770 | select NETFILTER_XT_MARK |
738 | Netfilter mark matching allows you to match packets based on the | 771 | ---help--- |
739 | `nfmark' value in the packet. This can be set by the MARK target | 772 | This is a backwards-compat option for the user's convenience |
740 | (see below). | 773 | (e.g. when running oldconfig). It selects |
741 | 774 | CONFIG_NETFILTER_XT_MARK (combined mark/MARK module). | |
742 | To compile it as a module, choose M here. If unsure, say N. | ||
743 | 775 | ||
744 | config NETFILTER_XT_MATCH_MULTIPORT | 776 | config NETFILTER_XT_MATCH_MULTIPORT |
745 | tristate '"multiport" Multiple port match support' | 777 | tristate '"multiport" Multiple port match support' |
@@ -751,6 +783,19 @@ config NETFILTER_XT_MATCH_MULTIPORT | |||
751 | 783 | ||
752 | To compile it as a module, choose M here. If unsure, say N. | 784 | To compile it as a module, choose M here. If unsure, say N. |
753 | 785 | ||
786 | config NETFILTER_XT_MATCH_OSF | ||
787 | tristate '"osf" Passive OS fingerprint match' | ||
788 | depends on NETFILTER_ADVANCED && NETFILTER_NETLINK | ||
789 | help | ||
790 | This option selects the Passive OS Fingerprinting match module | ||
791 | that allows to passively match the remote operating system by | ||
792 | analyzing incoming TCP SYN packets. | ||
793 | |||
794 | Rules and loading software can be downloaded from | ||
795 | http://www.ioremap.net/projects/osf | ||
796 | |||
797 | To compile it as a module, choose M here. If unsure, say N. | ||
798 | |||
754 | config NETFILTER_XT_MATCH_OWNER | 799 | config NETFILTER_XT_MATCH_OWNER |
755 | tristate '"owner" match support' | 800 | tristate '"owner" match support' |
756 | depends on NETFILTER_ADVANCED | 801 | depends on NETFILTER_ADVANCED |
@@ -836,13 +881,6 @@ config NETFILTER_XT_MATCH_RECENT | |||
836 | Short options are available by using 'iptables -m recent -h' | 881 | Short options are available by using 'iptables -m recent -h' |
837 | Official Website: <http://snowman.net/projects/ipt_recent/> | 882 | Official Website: <http://snowman.net/projects/ipt_recent/> |
838 | 883 | ||
839 | config NETFILTER_XT_MATCH_RECENT_PROC_COMPAT | ||
840 | bool 'Enable obsolete /proc/net/ipt_recent' | ||
841 | depends on NETFILTER_XT_MATCH_RECENT && PROC_FS | ||
842 | ---help--- | ||
843 | This option enables the old /proc/net/ipt_recent interface, | ||
844 | which has been obsoleted by /proc/net/xt_recent. | ||
845 | |||
846 | config NETFILTER_XT_MATCH_SCTP | 884 | config NETFILTER_XT_MATCH_SCTP |
847 | tristate '"sctp" protocol match support (EXPERIMENTAL)' | 885 | tristate '"sctp" protocol match support (EXPERIMENTAL)' |
848 | depends on EXPERIMENTAL | 886 | depends on EXPERIMENTAL |
@@ -942,19 +980,6 @@ config NETFILTER_XT_MATCH_U32 | |||
942 | 980 | ||
943 | Details and examples are in the kernel module source. | 981 | Details and examples are in the kernel module source. |
944 | 982 | ||
945 | config NETFILTER_XT_MATCH_OSF | ||
946 | tristate '"osf" Passive OS fingerprint match' | ||
947 | depends on NETFILTER_ADVANCED && NETFILTER_NETLINK | ||
948 | help | ||
949 | This option selects the Passive OS Fingerprinting match module | ||
950 | that allows to passively match the remote operating system by | ||
951 | analyzing incoming TCP SYN packets. | ||
952 | |||
953 | Rules and loading software can be downloaded from | ||
954 | http://www.ioremap.net/projects/osf | ||
955 | |||
956 | To compile it as a module, choose M here. If unsure, say N. | ||
957 | |||
958 | endif # NETFILTER_XTABLES | 983 | endif # NETFILTER_XTABLES |
959 | 984 | ||
960 | endmenu | 985 | endmenu |
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index f873644f02f6..14e3a8fd8180 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile | |||
@@ -40,15 +40,17 @@ obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o | |||
40 | # generic X tables | 40 | # generic X tables |
41 | obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o | 41 | obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o |
42 | 42 | ||
43 | # combos | ||
44 | obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o | ||
45 | obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o | ||
46 | |||
43 | # targets | 47 | # targets |
44 | obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o | 48 | obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o |
45 | obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o | ||
46 | obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o | 49 | obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o |
47 | obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o | 50 | obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o |
48 | obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o | 51 | obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o |
49 | obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o | 52 | obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o |
50 | obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o | 53 | obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o |
51 | obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o | ||
52 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o | 54 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o |
53 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o | 55 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o |
54 | obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o | 56 | obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o |
@@ -57,6 +59,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o | |||
57 | obj-$(CONFIG_NETFILTER_XT_TARGET_TPROXY) += xt_TPROXY.o | 59 | obj-$(CONFIG_NETFILTER_XT_TARGET_TPROXY) += xt_TPROXY.o |
58 | obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o | 60 | obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o |
59 | obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o | 61 | obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o |
62 | obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o | ||
60 | obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o | 63 | obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o |
61 | 64 | ||
62 | # matches | 65 | # matches |
@@ -64,7 +67,6 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o | |||
64 | obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o | 67 | obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o |
65 | obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o | 68 | obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o |
66 | obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o | 69 | obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o |
67 | obj-$(CONFIG_NETFILTER_XT_MATCH_CONNMARK) += xt_connmark.o | ||
68 | obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o | 70 | obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o |
69 | obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o | 71 | obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o |
70 | obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o | 72 | obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o |
@@ -76,7 +78,6 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o | |||
76 | obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o | 78 | obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o |
77 | obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o | 79 | obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o |
78 | obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o | 80 | obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o |
79 | obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o | ||
80 | obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o | 81 | obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o |
81 | obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o | 82 | obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o |
82 | obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o | 83 | obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o |
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 2c7f185dfae4..2ae747a376a5 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c | |||
@@ -209,8 +209,14 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
209 | */ | 209 | */ |
210 | from.ip = n_cp->vaddr.ip; | 210 | from.ip = n_cp->vaddr.ip; |
211 | port = n_cp->vport; | 211 | port = n_cp->vport; |
212 | sprintf(buf, "%u,%u,%u,%u,%u,%u", NIPQUAD(from.ip), | 212 | snprintf(buf, sizeof(buf), "%u,%u,%u,%u,%u,%u", |
213 | (ntohs(port)>>8)&255, ntohs(port)&255); | 213 | ((unsigned char *)&from.ip)[0], |
214 | ((unsigned char *)&from.ip)[1], | ||
215 | ((unsigned char *)&from.ip)[2], | ||
216 | ((unsigned char *)&from.ip)[3], | ||
217 | ntohs(port) >> 8, | ||
218 | ntohs(port) & 0xFF); | ||
219 | |||
214 | buf_len = strlen(buf); | 220 | buf_len = strlen(buf); |
215 | 221 | ||
216 | /* | 222 | /* |
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index 7fc49f4cf5ad..2d3d5e4b35f8 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c | |||
@@ -167,26 +167,24 @@ ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp, | |||
167 | 167 | ||
168 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | 168 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); |
169 | if (ih == NULL) | 169 | if (ih == NULL) |
170 | sprintf(buf, "%s TRUNCATED", pp->name); | 170 | sprintf(buf, "TRUNCATED"); |
171 | else if (ih->frag_off & htons(IP_OFFSET)) | 171 | else if (ih->frag_off & htons(IP_OFFSET)) |
172 | sprintf(buf, "%s %pI4->%pI4 frag", | 172 | sprintf(buf, "%pI4->%pI4 frag", &ih->saddr, &ih->daddr); |
173 | pp->name, &ih->saddr, &ih->daddr); | ||
174 | else { | 173 | else { |
175 | __be16 _ports[2], *pptr | 174 | __be16 _ports[2], *pptr |
176 | ; | 175 | ; |
177 | pptr = skb_header_pointer(skb, offset + ih->ihl*4, | 176 | pptr = skb_header_pointer(skb, offset + ih->ihl*4, |
178 | sizeof(_ports), _ports); | 177 | sizeof(_ports), _ports); |
179 | if (pptr == NULL) | 178 | if (pptr == NULL) |
180 | sprintf(buf, "%s TRUNCATED %pI4->%pI4", | 179 | sprintf(buf, "TRUNCATED %pI4->%pI4", |
181 | pp->name, &ih->saddr, &ih->daddr); | 180 | &ih->saddr, &ih->daddr); |
182 | else | 181 | else |
183 | sprintf(buf, "%s %pI4:%u->%pI4:%u", | 182 | sprintf(buf, "%pI4:%u->%pI4:%u", |
184 | pp->name, | ||
185 | &ih->saddr, ntohs(pptr[0]), | 183 | &ih->saddr, ntohs(pptr[0]), |
186 | &ih->daddr, ntohs(pptr[1])); | 184 | &ih->daddr, ntohs(pptr[1])); |
187 | } | 185 | } |
188 | 186 | ||
189 | pr_debug("%s: %s\n", msg, buf); | 187 | pr_debug("%s: %s %s\n", msg, pp->name, buf); |
190 | } | 188 | } |
191 | 189 | ||
192 | #ifdef CONFIG_IP_VS_IPV6 | 190 | #ifdef CONFIG_IP_VS_IPV6 |
@@ -201,26 +199,24 @@ ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, | |||
201 | 199 | ||
202 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | 200 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); |
203 | if (ih == NULL) | 201 | if (ih == NULL) |
204 | sprintf(buf, "%s TRUNCATED", pp->name); | 202 | sprintf(buf, "TRUNCATED"); |
205 | else if (ih->nexthdr == IPPROTO_FRAGMENT) | 203 | else if (ih->nexthdr == IPPROTO_FRAGMENT) |
206 | sprintf(buf, "%s %pI6->%pI6 frag", | 204 | sprintf(buf, "%pI6->%pI6 frag", &ih->saddr, &ih->daddr); |
207 | pp->name, &ih->saddr, &ih->daddr); | ||
208 | else { | 205 | else { |
209 | __be16 _ports[2], *pptr; | 206 | __be16 _ports[2], *pptr; |
210 | 207 | ||
211 | pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), | 208 | pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), |
212 | sizeof(_ports), _ports); | 209 | sizeof(_ports), _ports); |
213 | if (pptr == NULL) | 210 | if (pptr == NULL) |
214 | sprintf(buf, "%s TRUNCATED %pI6->%pI6", | 211 | sprintf(buf, "TRUNCATED %pI6->%pI6", |
215 | pp->name, &ih->saddr, &ih->daddr); | 212 | &ih->saddr, &ih->daddr); |
216 | else | 213 | else |
217 | sprintf(buf, "%s %pI6:%u->%pI6:%u", | 214 | sprintf(buf, "%pI6:%u->%pI6:%u", |
218 | pp->name, | ||
219 | &ih->saddr, ntohs(pptr[0]), | 215 | &ih->saddr, ntohs(pptr[0]), |
220 | &ih->daddr, ntohs(pptr[1])); | 216 | &ih->daddr, ntohs(pptr[1])); |
221 | } | 217 | } |
222 | 218 | ||
223 | pr_debug("%s: %s\n", msg, buf); | 219 | pr_debug("%s: %s %s\n", msg, pp->name, buf); |
224 | } | 220 | } |
225 | #endif | 221 | #endif |
226 | 222 | ||
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c index c30b43c36cd7..1892dfc12fdd 100644 --- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c +++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c | |||
@@ -136,12 +136,11 @@ ah_esp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb, | |||
136 | 136 | ||
137 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | 137 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); |
138 | if (ih == NULL) | 138 | if (ih == NULL) |
139 | sprintf(buf, "%s TRUNCATED", pp->name); | 139 | sprintf(buf, "TRUNCATED"); |
140 | else | 140 | else |
141 | sprintf(buf, "%s %pI4->%pI4", | 141 | sprintf(buf, "%pI4->%pI4", &ih->saddr, &ih->daddr); |
142 | pp->name, &ih->saddr, &ih->daddr); | ||
143 | 142 | ||
144 | pr_debug("%s: %s\n", msg, buf); | 143 | pr_debug("%s: %s %s\n", msg, pp->name, buf); |
145 | } | 144 | } |
146 | 145 | ||
147 | #ifdef CONFIG_IP_VS_IPV6 | 146 | #ifdef CONFIG_IP_VS_IPV6 |
@@ -154,12 +153,11 @@ ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb, | |||
154 | 153 | ||
155 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | 154 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); |
156 | if (ih == NULL) | 155 | if (ih == NULL) |
157 | sprintf(buf, "%s TRUNCATED", pp->name); | 156 | sprintf(buf, "TRUNCATED"); |
158 | else | 157 | else |
159 | sprintf(buf, "%s %pI6->%pI6", | 158 | sprintf(buf, "%pI6->%pI6", &ih->saddr, &ih->daddr); |
160 | pp->name, &ih->saddr, &ih->daddr); | ||
161 | 159 | ||
162 | pr_debug("%s: %s\n", msg, buf); | 160 | pr_debug("%s: %s %s\n", msg, pp->name, buf); |
163 | } | 161 | } |
164 | #endif | 162 | #endif |
165 | 163 | ||
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index e450cd6f4eb5..93c15a107b2c 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -270,7 +270,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
270 | /* Another hack: avoid icmp_send in ip_fragment */ | 270 | /* Another hack: avoid icmp_send in ip_fragment */ |
271 | skb->local_df = 1; | 271 | skb->local_df = 1; |
272 | 272 | ||
273 | IP_VS_XMIT(PF_INET, skb, rt); | 273 | IP_VS_XMIT(NFPROTO_IPV4, skb, rt); |
274 | 274 | ||
275 | LeaveFunction(10); | 275 | LeaveFunction(10); |
276 | return NF_STOLEN; | 276 | return NF_STOLEN; |
@@ -334,7 +334,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
334 | /* Another hack: avoid icmp_send in ip_fragment */ | 334 | /* Another hack: avoid icmp_send in ip_fragment */ |
335 | skb->local_df = 1; | 335 | skb->local_df = 1; |
336 | 336 | ||
337 | IP_VS_XMIT(PF_INET6, skb, rt); | 337 | IP_VS_XMIT(NFPROTO_IPV6, skb, rt); |
338 | 338 | ||
339 | LeaveFunction(10); | 339 | LeaveFunction(10); |
340 | return NF_STOLEN; | 340 | return NF_STOLEN; |
@@ -410,7 +410,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
410 | /* Another hack: avoid icmp_send in ip_fragment */ | 410 | /* Another hack: avoid icmp_send in ip_fragment */ |
411 | skb->local_df = 1; | 411 | skb->local_df = 1; |
412 | 412 | ||
413 | IP_VS_XMIT(PF_INET, skb, rt); | 413 | IP_VS_XMIT(NFPROTO_IPV4, skb, rt); |
414 | 414 | ||
415 | LeaveFunction(10); | 415 | LeaveFunction(10); |
416 | return NF_STOLEN; | 416 | return NF_STOLEN; |
@@ -486,7 +486,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
486 | /* Another hack: avoid icmp_send in ip_fragment */ | 486 | /* Another hack: avoid icmp_send in ip_fragment */ |
487 | skb->local_df = 1; | 487 | skb->local_df = 1; |
488 | 488 | ||
489 | IP_VS_XMIT(PF_INET6, skb, rt); | 489 | IP_VS_XMIT(NFPROTO_IPV6, skb, rt); |
490 | 490 | ||
491 | LeaveFunction(10); | 491 | LeaveFunction(10); |
492 | return NF_STOLEN; | 492 | return NF_STOLEN; |
@@ -785,7 +785,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
785 | /* Another hack: avoid icmp_send in ip_fragment */ | 785 | /* Another hack: avoid icmp_send in ip_fragment */ |
786 | skb->local_df = 1; | 786 | skb->local_df = 1; |
787 | 787 | ||
788 | IP_VS_XMIT(PF_INET, skb, rt); | 788 | IP_VS_XMIT(NFPROTO_IPV4, skb, rt); |
789 | 789 | ||
790 | LeaveFunction(10); | 790 | LeaveFunction(10); |
791 | return NF_STOLEN; | 791 | return NF_STOLEN; |
@@ -838,7 +838,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
838 | /* Another hack: avoid icmp_send in ip_fragment */ | 838 | /* Another hack: avoid icmp_send in ip_fragment */ |
839 | skb->local_df = 1; | 839 | skb->local_df = 1; |
840 | 840 | ||
841 | IP_VS_XMIT(PF_INET6, skb, rt); | 841 | IP_VS_XMIT(NFPROTO_IPV6, skb, rt); |
842 | 842 | ||
843 | LeaveFunction(10); | 843 | LeaveFunction(10); |
844 | return NF_STOLEN; | 844 | return NF_STOLEN; |
@@ -912,7 +912,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
912 | /* Another hack: avoid icmp_send in ip_fragment */ | 912 | /* Another hack: avoid icmp_send in ip_fragment */ |
913 | skb->local_df = 1; | 913 | skb->local_df = 1; |
914 | 914 | ||
915 | IP_VS_XMIT(PF_INET, skb, rt); | 915 | IP_VS_XMIT(NFPROTO_IPV4, skb, rt); |
916 | 916 | ||
917 | rc = NF_STOLEN; | 917 | rc = NF_STOLEN; |
918 | goto out; | 918 | goto out; |
@@ -987,7 +987,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
987 | /* Another hack: avoid icmp_send in ip_fragment */ | 987 | /* Another hack: avoid icmp_send in ip_fragment */ |
988 | skb->local_df = 1; | 988 | skb->local_df = 1; |
989 | 989 | ||
990 | IP_VS_XMIT(PF_INET6, skb, rt); | 990 | IP_VS_XMIT(NFPROTO_IPV6, skb, rt); |
991 | 991 | ||
992 | rc = NF_STOLEN; | 992 | rc = NF_STOLEN; |
993 | goto out; | 993 | goto out; |
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c index 372e80f07a81..13fd2c55e329 100644 --- a/net/netfilter/nf_conntrack_amanda.c +++ b/net/netfilter/nf_conntrack_amanda.c | |||
@@ -108,7 +108,7 @@ static int amanda_help(struct sk_buff *skb, | |||
108 | dataoff = protoff + sizeof(struct udphdr); | 108 | dataoff = protoff + sizeof(struct udphdr); |
109 | if (dataoff >= skb->len) { | 109 | if (dataoff >= skb->len) { |
110 | if (net_ratelimit()) | 110 | if (net_ratelimit()) |
111 | printk("amanda_help: skblen = %u\n", skb->len); | 111 | printk(KERN_ERR "amanda_help: skblen = %u\n", skb->len); |
112 | return NF_ACCEPT; | 112 | return NF_ACCEPT; |
113 | } | 113 | } |
114 | 114 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0c9bbe93cc16..eeeb8bc73982 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -319,8 +319,10 @@ begin: | |||
319 | * not the expected one, we must restart lookup. | 319 | * not the expected one, we must restart lookup. |
320 | * We probably met an item that was moved to another chain. | 320 | * We probably met an item that was moved to another chain. |
321 | */ | 321 | */ |
322 | if (get_nulls_value(n) != hash) | 322 | if (get_nulls_value(n) != hash) { |
323 | NF_CT_STAT_INC(net, search_restart); | ||
323 | goto begin; | 324 | goto begin; |
325 | } | ||
324 | local_bh_enable(); | 326 | local_bh_enable(); |
325 | 327 | ||
326 | return NULL; | 328 | return NULL; |
@@ -422,6 +424,16 @@ __nf_conntrack_confirm(struct sk_buff *skb) | |||
422 | 424 | ||
423 | spin_lock_bh(&nf_conntrack_lock); | 425 | spin_lock_bh(&nf_conntrack_lock); |
424 | 426 | ||
427 | /* We have to check the DYING flag inside the lock to prevent | ||
428 | a race against nf_ct_get_next_corpse() possibly called from | ||
429 | user context, else we insert an already 'dead' hash, blocking | ||
430 | further use of that particular connection -JM */ | ||
431 | |||
432 | if (unlikely(nf_ct_is_dying(ct))) { | ||
433 | spin_unlock_bh(&nf_conntrack_lock); | ||
434 | return NF_ACCEPT; | ||
435 | } | ||
436 | |||
425 | /* See if there's one in the list already, including reverse: | 437 | /* See if there's one in the list already, including reverse: |
426 | NAT could have grabbed it without realizing, since we're | 438 | NAT could have grabbed it without realizing, since we're |
427 | not in the hash. If there is, we lost race. */ | 439 | not in the hash. If there is, we lost race. */ |
@@ -1333,7 +1345,7 @@ static int nf_conntrack_init_init_net(void) | |||
1333 | } | 1345 | } |
1334 | nf_conntrack_max = max_factor * nf_conntrack_htable_size; | 1346 | nf_conntrack_max = max_factor * nf_conntrack_htable_size; |
1335 | 1347 | ||
1336 | printk("nf_conntrack version %s (%u buckets, %d max)\n", | 1348 | printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", |
1337 | NF_CONNTRACK_VERSION, nf_conntrack_htable_size, | 1349 | NF_CONNTRACK_VERSION, nf_conntrack_htable_size, |
1338 | nf_conntrack_max); | 1350 | nf_conntrack_max); |
1339 | 1351 | ||
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index f516961a83b4..cdcc7649476b 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c | |||
@@ -85,7 +85,8 @@ int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new) | |||
85 | struct nf_ct_event_notifier *notify; | 85 | struct nf_ct_event_notifier *notify; |
86 | 86 | ||
87 | mutex_lock(&nf_ct_ecache_mutex); | 87 | mutex_lock(&nf_ct_ecache_mutex); |
88 | notify = rcu_dereference(nf_conntrack_event_cb); | 88 | notify = rcu_dereference_protected(nf_conntrack_event_cb, |
89 | lockdep_is_held(&nf_ct_ecache_mutex)); | ||
89 | if (notify != NULL) { | 90 | if (notify != NULL) { |
90 | ret = -EBUSY; | 91 | ret = -EBUSY; |
91 | goto out_unlock; | 92 | goto out_unlock; |
@@ -105,7 +106,8 @@ void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new) | |||
105 | struct nf_ct_event_notifier *notify; | 106 | struct nf_ct_event_notifier *notify; |
106 | 107 | ||
107 | mutex_lock(&nf_ct_ecache_mutex); | 108 | mutex_lock(&nf_ct_ecache_mutex); |
108 | notify = rcu_dereference(nf_conntrack_event_cb); | 109 | notify = rcu_dereference_protected(nf_conntrack_event_cb, |
110 | lockdep_is_held(&nf_ct_ecache_mutex)); | ||
109 | BUG_ON(notify != new); | 111 | BUG_ON(notify != new); |
110 | rcu_assign_pointer(nf_conntrack_event_cb, NULL); | 112 | rcu_assign_pointer(nf_conntrack_event_cb, NULL); |
111 | mutex_unlock(&nf_ct_ecache_mutex); | 113 | mutex_unlock(&nf_ct_ecache_mutex); |
@@ -118,7 +120,8 @@ int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new) | |||
118 | struct nf_exp_event_notifier *notify; | 120 | struct nf_exp_event_notifier *notify; |
119 | 121 | ||
120 | mutex_lock(&nf_ct_ecache_mutex); | 122 | mutex_lock(&nf_ct_ecache_mutex); |
121 | notify = rcu_dereference(nf_expect_event_cb); | 123 | notify = rcu_dereference_protected(nf_expect_event_cb, |
124 | lockdep_is_held(&nf_ct_ecache_mutex)); | ||
122 | if (notify != NULL) { | 125 | if (notify != NULL) { |
123 | ret = -EBUSY; | 126 | ret = -EBUSY; |
124 | goto out_unlock; | 127 | goto out_unlock; |
@@ -138,7 +141,8 @@ void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new) | |||
138 | struct nf_exp_event_notifier *notify; | 141 | struct nf_exp_event_notifier *notify; |
139 | 142 | ||
140 | mutex_lock(&nf_ct_ecache_mutex); | 143 | mutex_lock(&nf_ct_ecache_mutex); |
141 | notify = rcu_dereference(nf_expect_event_cb); | 144 | notify = rcu_dereference_protected(nf_expect_event_cb, |
145 | lockdep_is_held(&nf_ct_ecache_mutex)); | ||
142 | BUG_ON(notify != new); | 146 | BUG_ON(notify != new); |
143 | rcu_assign_pointer(nf_expect_event_cb, NULL); | 147 | rcu_assign_pointer(nf_expect_event_cb, NULL); |
144 | mutex_unlock(&nf_ct_ecache_mutex); | 148 | mutex_unlock(&nf_ct_ecache_mutex); |
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index 2ae3169e7633..e17cb7c7dd8f 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c | |||
@@ -573,8 +573,8 @@ static int __init nf_conntrack_ftp_init(void) | |||
573 | ftp[i][j].tuple.src.l3num, ports[i]); | 573 | ftp[i][j].tuple.src.l3num, ports[i]); |
574 | ret = nf_conntrack_helper_register(&ftp[i][j]); | 574 | ret = nf_conntrack_helper_register(&ftp[i][j]); |
575 | if (ret) { | 575 | if (ret) { |
576 | printk("nf_ct_ftp: failed to register helper " | 576 | printk(KERN_ERR "nf_ct_ftp: failed to register" |
577 | " for pf: %d port: %d\n", | 577 | " helper for pf: %d port: %d\n", |
578 | ftp[i][j].tuple.src.l3num, ports[i]); | 578 | ftp[i][j].tuple.src.l3num, ports[i]); |
579 | nf_conntrack_ftp_fini(); | 579 | nf_conntrack_ftp_fini(); |
580 | return ret; | 580 | return ret; |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index a487c8038044..6eaee7c8a337 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
@@ -194,8 +194,7 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff, | |||
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
196 | 196 | ||
197 | if (net_ratelimit()) | 197 | pr_debug("nf_ct_h323: incomplete TPKT (fragmented?)\n"); |
198 | printk("nf_ct_h323: incomplete TPKT (fragmented?)\n"); | ||
199 | goto clear_out; | 198 | goto clear_out; |
200 | } | 199 | } |
201 | 200 | ||
@@ -608,7 +607,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff, | |||
608 | drop: | 607 | drop: |
609 | spin_unlock_bh(&nf_h323_lock); | 608 | spin_unlock_bh(&nf_h323_lock); |
610 | if (net_ratelimit()) | 609 | if (net_ratelimit()) |
611 | printk("nf_ct_h245: packet dropped\n"); | 610 | pr_info("nf_ct_h245: packet dropped\n"); |
612 | return NF_DROP; | 611 | return NF_DROP; |
613 | } | 612 | } |
614 | 613 | ||
@@ -1153,7 +1152,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff, | |||
1153 | drop: | 1152 | drop: |
1154 | spin_unlock_bh(&nf_h323_lock); | 1153 | spin_unlock_bh(&nf_h323_lock); |
1155 | if (net_ratelimit()) | 1154 | if (net_ratelimit()) |
1156 | printk("nf_ct_q931: packet dropped\n"); | 1155 | pr_info("nf_ct_q931: packet dropped\n"); |
1157 | return NF_DROP; | 1156 | return NF_DROP; |
1158 | } | 1157 | } |
1159 | 1158 | ||
@@ -1728,7 +1727,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff, | |||
1728 | drop: | 1727 | drop: |
1729 | spin_unlock_bh(&nf_h323_lock); | 1728 | spin_unlock_bh(&nf_h323_lock); |
1730 | if (net_ratelimit()) | 1729 | if (net_ratelimit()) |
1731 | printk("nf_ct_ras: packet dropped\n"); | 1730 | pr_info("nf_ct_ras: packet dropped\n"); |
1732 | return NF_DROP; | 1731 | return NF_DROP; |
1733 | } | 1732 | } |
1734 | 1733 | ||
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 7673930ca342..b394aa318776 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c | |||
@@ -235,7 +235,7 @@ static int __init nf_conntrack_irc_init(void) | |||
235 | char *tmpname; | 235 | char *tmpname; |
236 | 236 | ||
237 | if (max_dcc_channels < 1) { | 237 | if (max_dcc_channels < 1) { |
238 | printk("nf_ct_irc: max_dcc_channels must not be zero\n"); | 238 | printk(KERN_ERR "nf_ct_irc: max_dcc_channels must not be zero\n"); |
239 | return -EINVAL; | 239 | return -EINVAL; |
240 | } | 240 | } |
241 | 241 | ||
@@ -267,7 +267,7 @@ static int __init nf_conntrack_irc_init(void) | |||
267 | 267 | ||
268 | ret = nf_conntrack_helper_register(&irc[i]); | 268 | ret = nf_conntrack_helper_register(&irc[i]); |
269 | if (ret) { | 269 | if (ret) { |
270 | printk("nf_ct_irc: failed to register helper " | 270 | printk(KERN_ERR "nf_ct_irc: failed to register helper " |
271 | "for pf: %u port: %u\n", | 271 | "for pf: %u port: %u\n", |
272 | irc[i].tuple.src.l3num, ports[i]); | 272 | irc[i].tuple.src.l3num, ports[i]); |
273 | nf_conntrack_irc_fini(); | 273 | nf_conntrack_irc_fini(); |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index afc52f2ee4ac..c42ff6aa441d 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -427,6 +427,17 @@ ctnetlink_proto_size(const struct nf_conn *ct) | |||
427 | } | 427 | } |
428 | 428 | ||
429 | static inline size_t | 429 | static inline size_t |
430 | ctnetlink_counters_size(const struct nf_conn *ct) | ||
431 | { | ||
432 | if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) | ||
433 | return 0; | ||
434 | return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ | ||
435 | + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ | ||
436 | + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ | ||
437 | ; | ||
438 | } | ||
439 | |||
440 | static inline size_t | ||
430 | ctnetlink_nlmsg_size(const struct nf_conn *ct) | 441 | ctnetlink_nlmsg_size(const struct nf_conn *ct) |
431 | { | 442 | { |
432 | return NLMSG_ALIGN(sizeof(struct nfgenmsg)) | 443 | return NLMSG_ALIGN(sizeof(struct nfgenmsg)) |
@@ -436,11 +447,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct) | |||
436 | + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ | 447 | + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ |
437 | + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ | 448 | + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ |
438 | + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ | 449 | + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ |
439 | #ifdef CONFIG_NF_CT_ACCT | 450 | + ctnetlink_counters_size(ct) |
440 | + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ | ||
441 | + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ | ||
442 | + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ | ||
443 | #endif | ||
444 | + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ | 451 | + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ |
445 | + nla_total_size(0) /* CTA_PROTOINFO */ | 452 | + nla_total_size(0) /* CTA_PROTOINFO */ |
446 | + nla_total_size(0) /* CTA_HELP */ | 453 | + nla_total_size(0) /* CTA_HELP */ |
@@ -2050,29 +2057,29 @@ static int __init ctnetlink_init(void) | |||
2050 | { | 2057 | { |
2051 | int ret; | 2058 | int ret; |
2052 | 2059 | ||
2053 | printk("ctnetlink v%s: registering with nfnetlink.\n", version); | 2060 | pr_info("ctnetlink v%s: registering with nfnetlink.\n", version); |
2054 | ret = nfnetlink_subsys_register(&ctnl_subsys); | 2061 | ret = nfnetlink_subsys_register(&ctnl_subsys); |
2055 | if (ret < 0) { | 2062 | if (ret < 0) { |
2056 | printk("ctnetlink_init: cannot register with nfnetlink.\n"); | 2063 | pr_err("ctnetlink_init: cannot register with nfnetlink.\n"); |
2057 | goto err_out; | 2064 | goto err_out; |
2058 | } | 2065 | } |
2059 | 2066 | ||
2060 | ret = nfnetlink_subsys_register(&ctnl_exp_subsys); | 2067 | ret = nfnetlink_subsys_register(&ctnl_exp_subsys); |
2061 | if (ret < 0) { | 2068 | if (ret < 0) { |
2062 | printk("ctnetlink_init: cannot register exp with nfnetlink.\n"); | 2069 | pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n"); |
2063 | goto err_unreg_subsys; | 2070 | goto err_unreg_subsys; |
2064 | } | 2071 | } |
2065 | 2072 | ||
2066 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 2073 | #ifdef CONFIG_NF_CONNTRACK_EVENTS |
2067 | ret = nf_conntrack_register_notifier(&ctnl_notifier); | 2074 | ret = nf_conntrack_register_notifier(&ctnl_notifier); |
2068 | if (ret < 0) { | 2075 | if (ret < 0) { |
2069 | printk("ctnetlink_init: cannot register notifier.\n"); | 2076 | pr_err("ctnetlink_init: cannot register notifier.\n"); |
2070 | goto err_unreg_exp_subsys; | 2077 | goto err_unreg_exp_subsys; |
2071 | } | 2078 | } |
2072 | 2079 | ||
2073 | ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp); | 2080 | ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp); |
2074 | if (ret < 0) { | 2081 | if (ret < 0) { |
2075 | printk("ctnetlink_init: cannot expect register notifier.\n"); | 2082 | pr_err("ctnetlink_init: cannot expect register notifier.\n"); |
2076 | goto err_unreg_notifier; | 2083 | goto err_unreg_notifier; |
2077 | } | 2084 | } |
2078 | #endif | 2085 | #endif |
@@ -2093,7 +2100,7 @@ err_out: | |||
2093 | 2100 | ||
2094 | static void __exit ctnetlink_exit(void) | 2101 | static void __exit ctnetlink_exit(void) |
2095 | { | 2102 | { |
2096 | printk("ctnetlink: unregistering from nfnetlink.\n"); | 2103 | pr_info("ctnetlink: unregistering from nfnetlink.\n"); |
2097 | 2104 | ||
2098 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 2105 | #ifdef CONFIG_NF_CONNTRACK_EVENTS |
2099 | nf_ct_expect_unregister_notifier(&ctnl_notifier_exp); | 2106 | nf_ct_expect_unregister_notifier(&ctnl_notifier_exp); |
@@ -2102,7 +2109,6 @@ static void __exit ctnetlink_exit(void) | |||
2102 | 2109 | ||
2103 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); | 2110 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); |
2104 | nfnetlink_subsys_unregister(&ctnl_subsys); | 2111 | nfnetlink_subsys_unregister(&ctnl_subsys); |
2105 | return; | ||
2106 | } | 2112 | } |
2107 | 2113 | ||
2108 | module_init(ctnetlink_init); | 2114 | module_init(ctnetlink_init); |
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index a44fa75b5178..5886ba1d52a0 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c | |||
@@ -14,12 +14,10 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/skbuff.h> | ||
18 | #include <linux/vmalloc.h> | 17 | #include <linux/vmalloc.h> |
19 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
20 | #include <linux/err.h> | 19 | #include <linux/err.h> |
21 | #include <linux/percpu.h> | 20 | #include <linux/percpu.h> |
22 | #include <linux/moduleparam.h> | ||
23 | #include <linux/notifier.h> | 21 | #include <linux/notifier.h> |
24 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
25 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
@@ -119,9 +117,13 @@ void nf_ct_l3proto_module_put(unsigned short l3proto) | |||
119 | { | 117 | { |
120 | struct nf_conntrack_l3proto *p; | 118 | struct nf_conntrack_l3proto *p; |
121 | 119 | ||
122 | /* rcu_read_lock not necessary since the caller holds a reference */ | 120 | /* rcu_read_lock not necessary since the caller holds a reference, but |
121 | * taken anyways to avoid lockdep warnings in __nf_ct_l3proto_find() | ||
122 | */ | ||
123 | rcu_read_lock(); | ||
123 | p = __nf_ct_l3proto_find(l3proto); | 124 | p = __nf_ct_l3proto_find(l3proto); |
124 | module_put(p->me); | 125 | module_put(p->me); |
126 | rcu_read_unlock(); | ||
125 | } | 127 | } |
126 | EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put); | 128 | EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put); |
127 | 129 | ||
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index b68ff15ed979..c6049c2d5ea8 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
@@ -717,12 +717,12 @@ static int __init nf_conntrack_proto_sctp_init(void) | |||
717 | 717 | ||
718 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4); | 718 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4); |
719 | if (ret) { | 719 | if (ret) { |
720 | printk("nf_conntrack_l4proto_sctp4: protocol register failed\n"); | 720 | pr_err("nf_conntrack_l4proto_sctp4: protocol register failed\n"); |
721 | goto out; | 721 | goto out; |
722 | } | 722 | } |
723 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6); | 723 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6); |
724 | if (ret) { | 724 | if (ret) { |
725 | printk("nf_conntrack_l4proto_sctp6: protocol register failed\n"); | 725 | pr_err("nf_conntrack_l4proto_sctp6: protocol register failed\n"); |
726 | goto cleanup_sctp4; | 726 | goto cleanup_sctp4; |
727 | } | 727 | } |
728 | 728 | ||
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index c6cd1b84eddd..53d892210a04 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -1393,10 +1393,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, | |||
1393 | 1393 | ||
1394 | nf_ct_refresh(ct, skb, sip_timeout * HZ); | 1394 | nf_ct_refresh(ct, skb, sip_timeout * HZ); |
1395 | 1395 | ||
1396 | if (skb_is_nonlinear(skb)) { | 1396 | if (unlikely(skb_linearize(skb))) |
1397 | pr_debug("Copy of skbuff not supported yet.\n"); | 1397 | return NF_DROP; |
1398 | return NF_ACCEPT; | ||
1399 | } | ||
1400 | 1398 | ||
1401 | dptr = skb->data + dataoff; | 1399 | dptr = skb->data + dataoff; |
1402 | datalen = skb->len - dataoff; | 1400 | datalen = skb->len - dataoff; |
@@ -1455,10 +1453,8 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, | |||
1455 | 1453 | ||
1456 | nf_ct_refresh(ct, skb, sip_timeout * HZ); | 1454 | nf_ct_refresh(ct, skb, sip_timeout * HZ); |
1457 | 1455 | ||
1458 | if (skb_is_nonlinear(skb)) { | 1456 | if (unlikely(skb_linearize(skb))) |
1459 | pr_debug("Copy of skbuff not supported yet.\n"); | 1457 | return NF_DROP; |
1460 | return NF_ACCEPT; | ||
1461 | } | ||
1462 | 1458 | ||
1463 | dptr = skb->data + dataoff; | 1459 | dptr = skb->data + dataoff; |
1464 | datalen = skb->len - dataoff; | 1460 | datalen = skb->len - dataoff; |
@@ -1549,8 +1545,8 @@ static int __init nf_conntrack_sip_init(void) | |||
1549 | 1545 | ||
1550 | ret = nf_conntrack_helper_register(&sip[i][j]); | 1546 | ret = nf_conntrack_helper_register(&sip[i][j]); |
1551 | if (ret) { | 1547 | if (ret) { |
1552 | printk("nf_ct_sip: failed to register helper " | 1548 | printk(KERN_ERR "nf_ct_sip: failed to register" |
1553 | "for pf: %u port: %u\n", | 1549 | " helper for pf: %u port: %u\n", |
1554 | sip[i][j].tuple.src.l3num, ports[i]); | 1550 | sip[i][j].tuple.src.l3num, ports[i]); |
1555 | nf_conntrack_sip_fini(); | 1551 | nf_conntrack_sip_fini(); |
1556 | return ret; | 1552 | return ret; |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index faa8eb3722b9..eb973fcd67ab 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -252,12 +252,12 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v) | |||
252 | const struct ip_conntrack_stat *st = v; | 252 | const struct ip_conntrack_stat *st = v; |
253 | 253 | ||
254 | if (v == SEQ_START_TOKEN) { | 254 | if (v == SEQ_START_TOKEN) { |
255 | seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n"); | 255 | seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n"); |
256 | return 0; | 256 | return 0; |
257 | } | 257 | } |
258 | 258 | ||
259 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x " | 259 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x " |
260 | "%08x %08x %08x %08x %08x %08x %08x %08x \n", | 260 | "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", |
261 | nr_conntracks, | 261 | nr_conntracks, |
262 | st->searched, | 262 | st->searched, |
263 | st->found, | 263 | st->found, |
@@ -274,7 +274,8 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v) | |||
274 | 274 | ||
275 | st->expect_new, | 275 | st->expect_new, |
276 | st->expect_create, | 276 | st->expect_create, |
277 | st->expect_delete | 277 | st->expect_delete, |
278 | st->search_restart | ||
278 | ); | 279 | ); |
279 | return 0; | 280 | return 0; |
280 | } | 281 | } |
@@ -445,7 +446,7 @@ out_kmemdup: | |||
445 | if (net_eq(net, &init_net)) | 446 | if (net_eq(net, &init_net)) |
446 | unregister_sysctl_table(nf_ct_netfilter_header); | 447 | unregister_sysctl_table(nf_ct_netfilter_header); |
447 | out: | 448 | out: |
448 | printk("nf_conntrack: can't register to sysctl.\n"); | 449 | printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n"); |
449 | return -ENOMEM; | 450 | return -ENOMEM; |
450 | } | 451 | } |
451 | 452 | ||
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c index 46e646b2e9b9..75466fd72f4f 100644 --- a/net/netfilter/nf_conntrack_tftp.c +++ b/net/netfilter/nf_conntrack_tftp.c | |||
@@ -138,8 +138,8 @@ static int __init nf_conntrack_tftp_init(void) | |||
138 | 138 | ||
139 | ret = nf_conntrack_helper_register(&tftp[i][j]); | 139 | ret = nf_conntrack_helper_register(&tftp[i][j]); |
140 | if (ret) { | 140 | if (ret) { |
141 | printk("nf_ct_tftp: failed to register helper " | 141 | printk(KERN_ERR "nf_ct_tftp: failed to register" |
142 | "for pf: %u port: %u\n", | 142 | " helper for pf: %u port: %u\n", |
143 | tftp[i][j].tuple.src.l3num, ports[i]); | 143 | tftp[i][j].tuple.src.l3num, ports[i]); |
144 | nf_conntrack_tftp_fini(); | 144 | nf_conntrack_tftp_fini(); |
145 | return ret; | 145 | return ret; |
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h index bf6609978af7..770f76432ad0 100644 --- a/net/netfilter/nf_internals.h +++ b/net/netfilter/nf_internals.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/netdevice.h> | 6 | #include <linux/netdevice.h> |
7 | 7 | ||
8 | #ifdef CONFIG_NETFILTER_DEBUG | 8 | #ifdef CONFIG_NETFILTER_DEBUG |
9 | #define NFDEBUG(format, args...) printk(format , ## args) | 9 | #define NFDEBUG(format, args...) printk(KERN_DEBUG format , ## args) |
10 | #else | 10 | #else |
11 | #define NFDEBUG(format, args...) | 11 | #define NFDEBUG(format, args...) |
12 | #endif | 12 | #endif |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 015725a5cd50..7df37fd786bc 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -52,7 +52,8 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger) | |||
52 | } else { | 52 | } else { |
53 | /* register at end of list to honor first register win */ | 53 | /* register at end of list to honor first register win */ |
54 | list_add_tail(&logger->list[pf], &nf_loggers_l[pf]); | 54 | list_add_tail(&logger->list[pf], &nf_loggers_l[pf]); |
55 | llog = rcu_dereference(nf_loggers[pf]); | 55 | llog = rcu_dereference_protected(nf_loggers[pf], |
56 | lockdep_is_held(&nf_log_mutex)); | ||
56 | if (llog == NULL) | 57 | if (llog == NULL) |
57 | rcu_assign_pointer(nf_loggers[pf], logger); | 58 | rcu_assign_pointer(nf_loggers[pf], logger); |
58 | } | 59 | } |
@@ -70,7 +71,8 @@ void nf_log_unregister(struct nf_logger *logger) | |||
70 | 71 | ||
71 | mutex_lock(&nf_log_mutex); | 72 | mutex_lock(&nf_log_mutex); |
72 | for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) { | 73 | for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) { |
73 | c_logger = rcu_dereference(nf_loggers[i]); | 74 | c_logger = rcu_dereference_protected(nf_loggers[i], |
75 | lockdep_is_held(&nf_log_mutex)); | ||
74 | if (c_logger == logger) | 76 | if (c_logger == logger) |
75 | rcu_assign_pointer(nf_loggers[i], NULL); | 77 | rcu_assign_pointer(nf_loggers[i], NULL); |
76 | list_del(&logger->list[i]); | 78 | list_del(&logger->list[i]); |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index c49ef219899e..78b3cf9c519c 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/rcupdate.h> | 9 | #include <linux/rcupdate.h> |
10 | #include <net/protocol.h> | 10 | #include <net/protocol.h> |
11 | #include <net/netfilter/nf_queue.h> | 11 | #include <net/netfilter/nf_queue.h> |
12 | #include <net/dst.h> | ||
12 | 13 | ||
13 | #include "nf_internals.h" | 14 | #include "nf_internals.h" |
14 | 15 | ||
@@ -170,6 +171,7 @@ static int __nf_queue(struct sk_buff *skb, | |||
170 | dev_hold(physoutdev); | 171 | dev_hold(physoutdev); |
171 | } | 172 | } |
172 | #endif | 173 | #endif |
174 | skb_dst_force(skb); | ||
173 | afinfo->saveroute(skb, entry); | 175 | afinfo->saveroute(skb, entry); |
174 | status = qh->outfn(entry, queuenum); | 176 | status = qh->outfn(entry, queuenum); |
175 | 177 | ||
@@ -279,7 +281,6 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) | |||
279 | } | 281 | } |
280 | rcu_read_unlock(); | 282 | rcu_read_unlock(); |
281 | kfree(entry); | 283 | kfree(entry); |
282 | return; | ||
283 | } | 284 | } |
284 | EXPORT_SYMBOL(nf_reinject); | 285 | EXPORT_SYMBOL(nf_reinject); |
285 | 286 | ||
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 6afa3d52ea5f..b4a4532823e8 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -18,12 +18,9 @@ | |||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/socket.h> | 19 | #include <linux/socket.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/major.h> | ||
22 | #include <linux/timer.h> | ||
23 | #include <linux/string.h> | 21 | #include <linux/string.h> |
24 | #include <linux/sockios.h> | 22 | #include <linux/sockios.h> |
25 | #include <linux/net.h> | 23 | #include <linux/net.h> |
26 | #include <linux/fcntl.h> | ||
27 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
28 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
29 | #include <asm/system.h> | 26 | #include <asm/system.h> |
@@ -215,13 +212,13 @@ static struct pernet_operations nfnetlink_net_ops = { | |||
215 | 212 | ||
216 | static int __init nfnetlink_init(void) | 213 | static int __init nfnetlink_init(void) |
217 | { | 214 | { |
218 | printk("Netfilter messages via NETLINK v%s.\n", nfversion); | 215 | pr_info("Netfilter messages via NETLINK v%s.\n", nfversion); |
219 | return register_pernet_subsys(&nfnetlink_net_ops); | 216 | return register_pernet_subsys(&nfnetlink_net_ops); |
220 | } | 217 | } |
221 | 218 | ||
222 | static void __exit nfnetlink_exit(void) | 219 | static void __exit nfnetlink_exit(void) |
223 | { | 220 | { |
224 | printk("Removing netfilter NETLINK layer.\n"); | 221 | pr_info("Removing netfilter NETLINK layer.\n"); |
225 | unregister_pernet_subsys(&nfnetlink_net_ops); | 222 | unregister_pernet_subsys(&nfnetlink_net_ops); |
226 | } | 223 | } |
227 | module_init(nfnetlink_init); | 224 | module_init(nfnetlink_init); |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 203643fb2c52..fc9a211e629e 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -297,7 +297,7 @@ nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size) | |||
297 | n = max(inst_size, pkt_size); | 297 | n = max(inst_size, pkt_size); |
298 | skb = alloc_skb(n, GFP_ATOMIC); | 298 | skb = alloc_skb(n, GFP_ATOMIC); |
299 | if (!skb) { | 299 | if (!skb) { |
300 | PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", | 300 | pr_notice("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", |
301 | inst_size); | 301 | inst_size); |
302 | 302 | ||
303 | if (n > pkt_size) { | 303 | if (n > pkt_size) { |
@@ -306,7 +306,7 @@ nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size) | |||
306 | 306 | ||
307 | skb = alloc_skb(pkt_size, GFP_ATOMIC); | 307 | skb = alloc_skb(pkt_size, GFP_ATOMIC); |
308 | if (!skb) | 308 | if (!skb) |
309 | PRINTR("nfnetlink_log: can't even alloc %u " | 309 | pr_err("nfnetlink_log: can't even alloc %u " |
310 | "bytes\n", pkt_size); | 310 | "bytes\n", pkt_size); |
311 | } | 311 | } |
312 | } | 312 | } |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index e70a6ef1f4f2..12e1ab37fcd8 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -246,8 +246,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
246 | break; | 246 | break; |
247 | 247 | ||
248 | case NFQNL_COPY_PACKET: | 248 | case NFQNL_COPY_PACKET: |
249 | if ((entskb->ip_summed == CHECKSUM_PARTIAL || | 249 | if (entskb->ip_summed == CHECKSUM_PARTIAL && |
250 | entskb->ip_summed == CHECKSUM_COMPLETE) && | ||
251 | skb_checksum_help(entskb)) { | 250 | skb_checksum_help(entskb)) { |
252 | spin_unlock_bh(&queue->lock); | 251 | spin_unlock_bh(&queue->lock); |
253 | return NULL; | 252 | return NULL; |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 665f5beef6ad..e34622fa0003 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/socket.h> | 17 | #include <linux/socket.h> |
18 | #include <linux/net.h> | 18 | #include <linux/net.h> |
@@ -55,12 +55,6 @@ struct xt_af { | |||
55 | 55 | ||
56 | static struct xt_af *xt; | 56 | static struct xt_af *xt; |
57 | 57 | ||
58 | #ifdef DEBUG_IP_FIREWALL_USER | ||
59 | #define duprintf(format, args...) printk(format , ## args) | ||
60 | #else | ||
61 | #define duprintf(format, args...) | ||
62 | #endif | ||
63 | |||
64 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { | 58 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { |
65 | [NFPROTO_UNSPEC] = "x", | 59 | [NFPROTO_UNSPEC] = "x", |
66 | [NFPROTO_IPV4] = "ip", | 60 | [NFPROTO_IPV4] = "ip", |
@@ -69,6 +63,9 @@ static const char *const xt_prefix[NFPROTO_NUMPROTO] = { | |||
69 | [NFPROTO_IPV6] = "ip6", | 63 | [NFPROTO_IPV6] = "ip6", |
70 | }; | 64 | }; |
71 | 65 | ||
66 | /* Allow this many total (re)entries. */ | ||
67 | static const unsigned int xt_jumpstack_multiplier = 2; | ||
68 | |||
72 | /* Registration hooks for targets. */ | 69 | /* Registration hooks for targets. */ |
73 | int | 70 | int |
74 | xt_register_target(struct xt_target *target) | 71 | xt_register_target(struct xt_target *target) |
@@ -221,6 +218,17 @@ struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) | |||
221 | } | 218 | } |
222 | EXPORT_SYMBOL(xt_find_match); | 219 | EXPORT_SYMBOL(xt_find_match); |
223 | 220 | ||
221 | struct xt_match * | ||
222 | xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) | ||
223 | { | ||
224 | struct xt_match *match; | ||
225 | |||
226 | match = try_then_request_module(xt_find_match(nfproto, name, revision), | ||
227 | "%st_%s", xt_prefix[nfproto], name); | ||
228 | return (match != NULL) ? match : ERR_PTR(-ENOENT); | ||
229 | } | ||
230 | EXPORT_SYMBOL_GPL(xt_request_find_match); | ||
231 | |||
224 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ | 232 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ |
225 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) | 233 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) |
226 | { | 234 | { |
@@ -257,9 +265,7 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) | |||
257 | 265 | ||
258 | target = try_then_request_module(xt_find_target(af, name, revision), | 266 | target = try_then_request_module(xt_find_target(af, name, revision), |
259 | "%st_%s", xt_prefix[af], name); | 267 | "%st_%s", xt_prefix[af], name); |
260 | if (IS_ERR(target) || !target) | 268 | return (target != NULL) ? target : ERR_PTR(-ENOENT); |
261 | return NULL; | ||
262 | return target; | ||
263 | } | 269 | } |
264 | EXPORT_SYMBOL_GPL(xt_request_find_target); | 270 | EXPORT_SYMBOL_GPL(xt_request_find_target); |
265 | 271 | ||
@@ -361,6 +367,8 @@ static char *textify_hooks(char *buf, size_t size, unsigned int mask) | |||
361 | int xt_check_match(struct xt_mtchk_param *par, | 367 | int xt_check_match(struct xt_mtchk_param *par, |
362 | unsigned int size, u_int8_t proto, bool inv_proto) | 368 | unsigned int size, u_int8_t proto, bool inv_proto) |
363 | { | 369 | { |
370 | int ret; | ||
371 | |||
364 | if (XT_ALIGN(par->match->matchsize) != size && | 372 | if (XT_ALIGN(par->match->matchsize) != size && |
365 | par->match->matchsize != -1) { | 373 | par->match->matchsize != -1) { |
366 | /* | 374 | /* |
@@ -397,8 +405,14 @@ int xt_check_match(struct xt_mtchk_param *par, | |||
397 | par->match->proto); | 405 | par->match->proto); |
398 | return -EINVAL; | 406 | return -EINVAL; |
399 | } | 407 | } |
400 | if (par->match->checkentry != NULL && !par->match->checkentry(par)) | 408 | if (par->match->checkentry != NULL) { |
401 | return -EINVAL; | 409 | ret = par->match->checkentry(par); |
410 | if (ret < 0) | ||
411 | return ret; | ||
412 | else if (ret > 0) | ||
413 | /* Flag up potential errors. */ | ||
414 | return -EIO; | ||
415 | } | ||
402 | return 0; | 416 | return 0; |
403 | } | 417 | } |
404 | EXPORT_SYMBOL_GPL(xt_check_match); | 418 | EXPORT_SYMBOL_GPL(xt_check_match); |
@@ -518,6 +532,8 @@ EXPORT_SYMBOL_GPL(xt_compat_match_to_user); | |||
518 | int xt_check_target(struct xt_tgchk_param *par, | 532 | int xt_check_target(struct xt_tgchk_param *par, |
519 | unsigned int size, u_int8_t proto, bool inv_proto) | 533 | unsigned int size, u_int8_t proto, bool inv_proto) |
520 | { | 534 | { |
535 | int ret; | ||
536 | |||
521 | if (XT_ALIGN(par->target->targetsize) != size) { | 537 | if (XT_ALIGN(par->target->targetsize) != size) { |
522 | pr_err("%s_tables: %s.%u target: invalid size " | 538 | pr_err("%s_tables: %s.%u target: invalid size " |
523 | "%u (kernel) != (user) %u\n", | 539 | "%u (kernel) != (user) %u\n", |
@@ -549,8 +565,14 @@ int xt_check_target(struct xt_tgchk_param *par, | |||
549 | par->target->proto); | 565 | par->target->proto); |
550 | return -EINVAL; | 566 | return -EINVAL; |
551 | } | 567 | } |
552 | if (par->target->checkentry != NULL && !par->target->checkentry(par)) | 568 | if (par->target->checkentry != NULL) { |
553 | return -EINVAL; | 569 | ret = par->target->checkentry(par); |
570 | if (ret < 0) | ||
571 | return ret; | ||
572 | else if (ret > 0) | ||
573 | /* Flag up potential errors. */ | ||
574 | return -EIO; | ||
575 | } | ||
554 | return 0; | 576 | return 0; |
555 | } | 577 | } |
556 | EXPORT_SYMBOL_GPL(xt_check_target); | 578 | EXPORT_SYMBOL_GPL(xt_check_target); |
@@ -662,6 +684,24 @@ void xt_free_table_info(struct xt_table_info *info) | |||
662 | else | 684 | else |
663 | vfree(info->entries[cpu]); | 685 | vfree(info->entries[cpu]); |
664 | } | 686 | } |
687 | |||
688 | if (info->jumpstack != NULL) { | ||
689 | if (sizeof(void *) * info->stacksize > PAGE_SIZE) { | ||
690 | for_each_possible_cpu(cpu) | ||
691 | vfree(info->jumpstack[cpu]); | ||
692 | } else { | ||
693 | for_each_possible_cpu(cpu) | ||
694 | kfree(info->jumpstack[cpu]); | ||
695 | } | ||
696 | } | ||
697 | |||
698 | if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE) | ||
699 | vfree(info->jumpstack); | ||
700 | else | ||
701 | kfree(info->jumpstack); | ||
702 | |||
703 | free_percpu(info->stackptr); | ||
704 | |||
665 | kfree(info); | 705 | kfree(info); |
666 | } | 706 | } |
667 | EXPORT_SYMBOL(xt_free_table_info); | 707 | EXPORT_SYMBOL(xt_free_table_info); |
@@ -706,6 +746,44 @@ EXPORT_SYMBOL_GPL(xt_compat_unlock); | |||
706 | DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); | 746 | DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); |
707 | EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); | 747 | EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); |
708 | 748 | ||
749 | static int xt_jumpstack_alloc(struct xt_table_info *i) | ||
750 | { | ||
751 | unsigned int size; | ||
752 | int cpu; | ||
753 | |||
754 | i->stackptr = alloc_percpu(unsigned int); | ||
755 | if (i->stackptr == NULL) | ||
756 | return -ENOMEM; | ||
757 | |||
758 | size = sizeof(void **) * nr_cpu_ids; | ||
759 | if (size > PAGE_SIZE) | ||
760 | i->jumpstack = vmalloc(size); | ||
761 | else | ||
762 | i->jumpstack = kmalloc(size, GFP_KERNEL); | ||
763 | if (i->jumpstack == NULL) | ||
764 | return -ENOMEM; | ||
765 | memset(i->jumpstack, 0, size); | ||
766 | |||
767 | i->stacksize *= xt_jumpstack_multiplier; | ||
768 | size = sizeof(void *) * i->stacksize; | ||
769 | for_each_possible_cpu(cpu) { | ||
770 | if (size > PAGE_SIZE) | ||
771 | i->jumpstack[cpu] = vmalloc_node(size, | ||
772 | cpu_to_node(cpu)); | ||
773 | else | ||
774 | i->jumpstack[cpu] = kmalloc_node(size, | ||
775 | GFP_KERNEL, cpu_to_node(cpu)); | ||
776 | if (i->jumpstack[cpu] == NULL) | ||
777 | /* | ||
778 | * Freeing will be done later on by the callers. The | ||
779 | * chain is: xt_replace_table -> __do_replace -> | ||
780 | * do_replace -> xt_free_table_info. | ||
781 | */ | ||
782 | return -ENOMEM; | ||
783 | } | ||
784 | |||
785 | return 0; | ||
786 | } | ||
709 | 787 | ||
710 | struct xt_table_info * | 788 | struct xt_table_info * |
711 | xt_replace_table(struct xt_table *table, | 789 | xt_replace_table(struct xt_table *table, |
@@ -714,6 +792,13 @@ xt_replace_table(struct xt_table *table, | |||
714 | int *error) | 792 | int *error) |
715 | { | 793 | { |
716 | struct xt_table_info *private; | 794 | struct xt_table_info *private; |
795 | int ret; | ||
796 | |||
797 | ret = xt_jumpstack_alloc(newinfo); | ||
798 | if (ret < 0) { | ||
799 | *error = ret; | ||
800 | return NULL; | ||
801 | } | ||
717 | 802 | ||
718 | /* Do the substitution. */ | 803 | /* Do the substitution. */ |
719 | local_bh_disable(); | 804 | local_bh_disable(); |
@@ -721,7 +806,7 @@ xt_replace_table(struct xt_table *table, | |||
721 | 806 | ||
722 | /* Check inside lock: is the old number correct? */ | 807 | /* Check inside lock: is the old number correct? */ |
723 | if (num_counters != private->number) { | 808 | if (num_counters != private->number) { |
724 | duprintf("num_counters != table->private->number (%u/%u)\n", | 809 | pr_debug("num_counters != table->private->number (%u/%u)\n", |
725 | num_counters, private->number); | 810 | num_counters, private->number); |
726 | local_bh_enable(); | 811 | local_bh_enable(); |
727 | *error = -EAGAIN; | 812 | *error = -EAGAIN; |
@@ -778,7 +863,7 @@ struct xt_table *xt_register_table(struct net *net, | |||
778 | goto unlock; | 863 | goto unlock; |
779 | 864 | ||
780 | private = table->private; | 865 | private = table->private; |
781 | duprintf("table->private->number = %u\n", private->number); | 866 | pr_debug("table->private->number = %u\n", private->number); |
782 | 867 | ||
783 | /* save number of initial entries */ | 868 | /* save number of initial entries */ |
784 | private->initial_entries = private->number; | 869 | private->initial_entries = private->number; |
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c index 011bc80dd2a1..c2c0e4abeb99 100644 --- a/net/netfilter/xt_CLASSIFY.c +++ b/net/netfilter/xt_CLASSIFY.c | |||
@@ -27,7 +27,7 @@ MODULE_ALIAS("ipt_CLASSIFY"); | |||
27 | MODULE_ALIAS("ip6t_CLASSIFY"); | 27 | MODULE_ALIAS("ip6t_CLASSIFY"); |
28 | 28 | ||
29 | static unsigned int | 29 | static unsigned int |
30 | classify_tg(struct sk_buff *skb, const struct xt_target_param *par) | 30 | classify_tg(struct sk_buff *skb, const struct xt_action_param *par) |
31 | { | 31 | { |
32 | const struct xt_classify_target_info *clinfo = par->targinfo; | 32 | const struct xt_classify_target_info *clinfo = par->targinfo; |
33 | 33 | ||
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c deleted file mode 100644 index 593457068ae1..000000000000 --- a/net/netfilter/xt_CONNMARK.c +++ /dev/null | |||
@@ -1,113 +0,0 @@ | |||
1 | /* | ||
2 | * xt_CONNMARK - Netfilter module to modify the connection mark values | ||
3 | * | ||
4 | * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> | ||
5 | * by Henrik Nordstrom <hno@marasystems.com> | ||
6 | * Copyright © CC Computer Consultants GmbH, 2007 - 2008 | ||
7 | * Jan Engelhardt <jengelh@computergmbh.de> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | */ | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/skbuff.h> | ||
25 | #include <linux/ip.h> | ||
26 | #include <net/checksum.h> | ||
27 | |||
28 | MODULE_AUTHOR("Henrik Nordstrom <hno@marasystems.com>"); | ||
29 | MODULE_DESCRIPTION("Xtables: connection mark modification"); | ||
30 | MODULE_LICENSE("GPL"); | ||
31 | MODULE_ALIAS("ipt_CONNMARK"); | ||
32 | MODULE_ALIAS("ip6t_CONNMARK"); | ||
33 | |||
34 | #include <linux/netfilter/x_tables.h> | ||
35 | #include <linux/netfilter/xt_CONNMARK.h> | ||
36 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
37 | |||
38 | static unsigned int | ||
39 | connmark_tg(struct sk_buff *skb, const struct xt_target_param *par) | ||
40 | { | ||
41 | const struct xt_connmark_tginfo1 *info = par->targinfo; | ||
42 | enum ip_conntrack_info ctinfo; | ||
43 | struct nf_conn *ct; | ||
44 | u_int32_t newmark; | ||
45 | |||
46 | ct = nf_ct_get(skb, &ctinfo); | ||
47 | if (ct == NULL) | ||
48 | return XT_CONTINUE; | ||
49 | |||
50 | switch (info->mode) { | ||
51 | case XT_CONNMARK_SET: | ||
52 | newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; | ||
53 | if (ct->mark != newmark) { | ||
54 | ct->mark = newmark; | ||
55 | nf_conntrack_event_cache(IPCT_MARK, ct); | ||
56 | } | ||
57 | break; | ||
58 | case XT_CONNMARK_SAVE: | ||
59 | newmark = (ct->mark & ~info->ctmask) ^ | ||
60 | (skb->mark & info->nfmask); | ||
61 | if (ct->mark != newmark) { | ||
62 | ct->mark = newmark; | ||
63 | nf_conntrack_event_cache(IPCT_MARK, ct); | ||
64 | } | ||
65 | break; | ||
66 | case XT_CONNMARK_RESTORE: | ||
67 | newmark = (skb->mark & ~info->nfmask) ^ | ||
68 | (ct->mark & info->ctmask); | ||
69 | skb->mark = newmark; | ||
70 | break; | ||
71 | } | ||
72 | |||
73 | return XT_CONTINUE; | ||
74 | } | ||
75 | |||
76 | static bool connmark_tg_check(const struct xt_tgchk_param *par) | ||
77 | { | ||
78 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | ||
79 | printk(KERN_WARNING "cannot load conntrack support for " | ||
80 | "proto=%u\n", par->family); | ||
81 | return false; | ||
82 | } | ||
83 | return true; | ||
84 | } | ||
85 | |||
86 | static void connmark_tg_destroy(const struct xt_tgdtor_param *par) | ||
87 | { | ||
88 | nf_ct_l3proto_module_put(par->family); | ||
89 | } | ||
90 | |||
91 | static struct xt_target connmark_tg_reg __read_mostly = { | ||
92 | .name = "CONNMARK", | ||
93 | .revision = 1, | ||
94 | .family = NFPROTO_UNSPEC, | ||
95 | .checkentry = connmark_tg_check, | ||
96 | .target = connmark_tg, | ||
97 | .targetsize = sizeof(struct xt_connmark_tginfo1), | ||
98 | .destroy = connmark_tg_destroy, | ||
99 | .me = THIS_MODULE, | ||
100 | }; | ||
101 | |||
102 | static int __init connmark_tg_init(void) | ||
103 | { | ||
104 | return xt_register_target(&connmark_tg_reg); | ||
105 | } | ||
106 | |||
107 | static void __exit connmark_tg_exit(void) | ||
108 | { | ||
109 | xt_unregister_target(&connmark_tg_reg); | ||
110 | } | ||
111 | |||
112 | module_init(connmark_tg_init); | ||
113 | module_exit(connmark_tg_exit); | ||
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c index b54c3756fdc3..e04dc282e3bb 100644 --- a/net/netfilter/xt_CONNSECMARK.c +++ b/net/netfilter/xt_CONNSECMARK.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * published by the Free Software Foundation. | 15 | * published by the Free Software Foundation. |
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
19 | #include <linux/skbuff.h> | 20 | #include <linux/skbuff.h> |
20 | #include <linux/netfilter/x_tables.h> | 21 | #include <linux/netfilter/x_tables.h> |
@@ -22,8 +23,6 @@ | |||
22 | #include <net/netfilter/nf_conntrack.h> | 23 | #include <net/netfilter/nf_conntrack.h> |
23 | #include <net/netfilter/nf_conntrack_ecache.h> | 24 | #include <net/netfilter/nf_conntrack_ecache.h> |
24 | 25 | ||
25 | #define PFX "CONNSECMARK: " | ||
26 | |||
27 | MODULE_LICENSE("GPL"); | 26 | MODULE_LICENSE("GPL"); |
28 | MODULE_AUTHOR("James Morris <jmorris@redhat.com>"); | 27 | MODULE_AUTHOR("James Morris <jmorris@redhat.com>"); |
29 | MODULE_DESCRIPTION("Xtables: target for copying between connection and security mark"); | 28 | MODULE_DESCRIPTION("Xtables: target for copying between connection and security mark"); |
@@ -65,7 +64,7 @@ static void secmark_restore(struct sk_buff *skb) | |||
65 | } | 64 | } |
66 | 65 | ||
67 | static unsigned int | 66 | static unsigned int |
68 | connsecmark_tg(struct sk_buff *skb, const struct xt_target_param *par) | 67 | connsecmark_tg(struct sk_buff *skb, const struct xt_action_param *par) |
69 | { | 68 | { |
70 | const struct xt_connsecmark_target_info *info = par->targinfo; | 69 | const struct xt_connsecmark_target_info *info = par->targinfo; |
71 | 70 | ||
@@ -85,15 +84,16 @@ connsecmark_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
85 | return XT_CONTINUE; | 84 | return XT_CONTINUE; |
86 | } | 85 | } |
87 | 86 | ||
88 | static bool connsecmark_tg_check(const struct xt_tgchk_param *par) | 87 | static int connsecmark_tg_check(const struct xt_tgchk_param *par) |
89 | { | 88 | { |
90 | const struct xt_connsecmark_target_info *info = par->targinfo; | 89 | const struct xt_connsecmark_target_info *info = par->targinfo; |
90 | int ret; | ||
91 | 91 | ||
92 | if (strcmp(par->table, "mangle") != 0 && | 92 | if (strcmp(par->table, "mangle") != 0 && |
93 | strcmp(par->table, "security") != 0) { | 93 | strcmp(par->table, "security") != 0) { |
94 | printk(KERN_INFO PFX "target only valid in the \'mangle\' " | 94 | pr_info("target only valid in the \'mangle\' " |
95 | "or \'security\' tables, not \'%s\'.\n", par->table); | 95 | "or \'security\' tables, not \'%s\'.\n", par->table); |
96 | return false; | 96 | return -EINVAL; |
97 | } | 97 | } |
98 | 98 | ||
99 | switch (info->mode) { | 99 | switch (info->mode) { |
@@ -102,16 +102,15 @@ static bool connsecmark_tg_check(const struct xt_tgchk_param *par) | |||
102 | break; | 102 | break; |
103 | 103 | ||
104 | default: | 104 | default: |
105 | printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode); | 105 | pr_info("invalid mode: %hu\n", info->mode); |
106 | return false; | 106 | return -EINVAL; |
107 | } | 107 | } |
108 | 108 | ||
109 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | 109 | ret = nf_ct_l3proto_try_module_get(par->family); |
110 | printk(KERN_WARNING "can't load conntrack support for " | 110 | if (ret < 0) |
111 | "proto=%u\n", par->family); | 111 | pr_info("cannot load conntrack support for proto=%u\n", |
112 | return false; | 112 | par->family); |
113 | } | 113 | return ret; |
114 | return true; | ||
115 | } | 114 | } |
116 | 115 | ||
117 | static void connsecmark_tg_destroy(const struct xt_tgdtor_param *par) | 116 | static void connsecmark_tg_destroy(const struct xt_tgdtor_param *par) |
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index ee18b231b950..562bf3266e04 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <net/netfilter/nf_conntrack_zones.h> | 20 | #include <net/netfilter/nf_conntrack_zones.h> |
21 | 21 | ||
22 | static unsigned int xt_ct_target(struct sk_buff *skb, | 22 | static unsigned int xt_ct_target(struct sk_buff *skb, |
23 | const struct xt_target_param *par) | 23 | const struct xt_action_param *par) |
24 | { | 24 | { |
25 | const struct xt_ct_target_info *info = par->targinfo; | 25 | const struct xt_ct_target_info *info = par->targinfo; |
26 | struct nf_conn *ct = info->ct; | 26 | struct nf_conn *ct = info->ct; |
@@ -38,13 +38,13 @@ static unsigned int xt_ct_target(struct sk_buff *skb, | |||
38 | 38 | ||
39 | static u8 xt_ct_find_proto(const struct xt_tgchk_param *par) | 39 | static u8 xt_ct_find_proto(const struct xt_tgchk_param *par) |
40 | { | 40 | { |
41 | if (par->family == AF_INET) { | 41 | if (par->family == NFPROTO_IPV4) { |
42 | const struct ipt_entry *e = par->entryinfo; | 42 | const struct ipt_entry *e = par->entryinfo; |
43 | 43 | ||
44 | if (e->ip.invflags & IPT_INV_PROTO) | 44 | if (e->ip.invflags & IPT_INV_PROTO) |
45 | return 0; | 45 | return 0; |
46 | return e->ip.proto; | 46 | return e->ip.proto; |
47 | } else if (par->family == AF_INET6) { | 47 | } else if (par->family == NFPROTO_IPV6) { |
48 | const struct ip6t_entry *e = par->entryinfo; | 48 | const struct ip6t_entry *e = par->entryinfo; |
49 | 49 | ||
50 | if (e->ipv6.invflags & IP6T_INV_PROTO) | 50 | if (e->ipv6.invflags & IP6T_INV_PROTO) |
@@ -54,16 +54,17 @@ static u8 xt_ct_find_proto(const struct xt_tgchk_param *par) | |||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
57 | static bool xt_ct_tg_check(const struct xt_tgchk_param *par) | 57 | static int xt_ct_tg_check(const struct xt_tgchk_param *par) |
58 | { | 58 | { |
59 | struct xt_ct_target_info *info = par->targinfo; | 59 | struct xt_ct_target_info *info = par->targinfo; |
60 | struct nf_conntrack_tuple t; | 60 | struct nf_conntrack_tuple t; |
61 | struct nf_conn_help *help; | 61 | struct nf_conn_help *help; |
62 | struct nf_conn *ct; | 62 | struct nf_conn *ct; |
63 | int ret = 0; | ||
63 | u8 proto; | 64 | u8 proto; |
64 | 65 | ||
65 | if (info->flags & ~XT_CT_NOTRACK) | 66 | if (info->flags & ~XT_CT_NOTRACK) |
66 | return false; | 67 | return -EINVAL; |
67 | 68 | ||
68 | if (info->flags & XT_CT_NOTRACK) { | 69 | if (info->flags & XT_CT_NOTRACK) { |
69 | ct = &nf_conntrack_untracked; | 70 | ct = &nf_conntrack_untracked; |
@@ -76,28 +77,34 @@ static bool xt_ct_tg_check(const struct xt_tgchk_param *par) | |||
76 | goto err1; | 77 | goto err1; |
77 | #endif | 78 | #endif |
78 | 79 | ||
79 | if (nf_ct_l3proto_try_module_get(par->family) < 0) | 80 | ret = nf_ct_l3proto_try_module_get(par->family); |
81 | if (ret < 0) | ||
80 | goto err1; | 82 | goto err1; |
81 | 83 | ||
82 | memset(&t, 0, sizeof(t)); | 84 | memset(&t, 0, sizeof(t)); |
83 | ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL); | 85 | ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL); |
86 | ret = PTR_ERR(ct); | ||
84 | if (IS_ERR(ct)) | 87 | if (IS_ERR(ct)) |
85 | goto err2; | 88 | goto err2; |
86 | 89 | ||
90 | ret = 0; | ||
87 | if ((info->ct_events || info->exp_events) && | 91 | if ((info->ct_events || info->exp_events) && |
88 | !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events, | 92 | !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events, |
89 | GFP_KERNEL)) | 93 | GFP_KERNEL)) |
90 | goto err3; | 94 | goto err3; |
91 | 95 | ||
92 | if (info->helper[0]) { | 96 | if (info->helper[0]) { |
97 | ret = -ENOENT; | ||
93 | proto = xt_ct_find_proto(par); | 98 | proto = xt_ct_find_proto(par); |
94 | if (!proto) | 99 | if (!proto) |
95 | goto err3; | 100 | goto err3; |
96 | 101 | ||
102 | ret = -ENOMEM; | ||
97 | help = nf_ct_helper_ext_add(ct, GFP_KERNEL); | 103 | help = nf_ct_helper_ext_add(ct, GFP_KERNEL); |
98 | if (help == NULL) | 104 | if (help == NULL) |
99 | goto err3; | 105 | goto err3; |
100 | 106 | ||
107 | ret = -ENOENT; | ||
101 | help->helper = nf_conntrack_helper_try_module_get(info->helper, | 108 | help->helper = nf_conntrack_helper_try_module_get(info->helper, |
102 | par->family, | 109 | par->family, |
103 | proto); | 110 | proto); |
@@ -109,14 +116,14 @@ static bool xt_ct_tg_check(const struct xt_tgchk_param *par) | |||
109 | __set_bit(IPS_CONFIRMED_BIT, &ct->status); | 116 | __set_bit(IPS_CONFIRMED_BIT, &ct->status); |
110 | out: | 117 | out: |
111 | info->ct = ct; | 118 | info->ct = ct; |
112 | return true; | 119 | return 0; |
113 | 120 | ||
114 | err3: | 121 | err3: |
115 | nf_conntrack_free(ct); | 122 | nf_conntrack_free(ct); |
116 | err2: | 123 | err2: |
117 | nf_ct_l3proto_module_put(par->family); | 124 | nf_ct_l3proto_module_put(par->family); |
118 | err1: | 125 | err1: |
119 | return false; | 126 | return ret; |
120 | } | 127 | } |
121 | 128 | ||
122 | static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par) | 129 | static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par) |
@@ -138,7 +145,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par) | |||
138 | static struct xt_target xt_ct_tg __read_mostly = { | 145 | static struct xt_target xt_ct_tg __read_mostly = { |
139 | .name = "CT", | 146 | .name = "CT", |
140 | .family = NFPROTO_UNSPEC, | 147 | .family = NFPROTO_UNSPEC, |
141 | .targetsize = XT_ALIGN(sizeof(struct xt_ct_target_info)), | 148 | .targetsize = sizeof(struct xt_ct_target_info), |
142 | .checkentry = xt_ct_tg_check, | 149 | .checkentry = xt_ct_tg_check, |
143 | .destroy = xt_ct_tg_destroy, | 150 | .destroy = xt_ct_tg_destroy, |
144 | .target = xt_ct_target, | 151 | .target = xt_ct_target, |
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c index 74ce89260056..0a229191e55b 100644 --- a/net/netfilter/xt_DSCP.c +++ b/net/netfilter/xt_DSCP.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * | 9 | * |
10 | * See RFC2474 for a description of the DSCP field within the IP Header. | 10 | * See RFC2474 for a description of the DSCP field within the IP Header. |
11 | */ | 11 | */ |
12 | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/ip.h> | 15 | #include <linux/ip.h> |
@@ -28,7 +28,7 @@ MODULE_ALIAS("ipt_TOS"); | |||
28 | MODULE_ALIAS("ip6t_TOS"); | 28 | MODULE_ALIAS("ip6t_TOS"); |
29 | 29 | ||
30 | static unsigned int | 30 | static unsigned int |
31 | dscp_tg(struct sk_buff *skb, const struct xt_target_param *par) | 31 | dscp_tg(struct sk_buff *skb, const struct xt_action_param *par) |
32 | { | 32 | { |
33 | const struct xt_DSCP_info *dinfo = par->targinfo; | 33 | const struct xt_DSCP_info *dinfo = par->targinfo; |
34 | u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT; | 34 | u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT; |
@@ -45,7 +45,7 @@ dscp_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | static unsigned int | 47 | static unsigned int |
48 | dscp_tg6(struct sk_buff *skb, const struct xt_target_param *par) | 48 | dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par) |
49 | { | 49 | { |
50 | const struct xt_DSCP_info *dinfo = par->targinfo; | 50 | const struct xt_DSCP_info *dinfo = par->targinfo; |
51 | u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT; | 51 | u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT; |
@@ -60,19 +60,19 @@ dscp_tg6(struct sk_buff *skb, const struct xt_target_param *par) | |||
60 | return XT_CONTINUE; | 60 | return XT_CONTINUE; |
61 | } | 61 | } |
62 | 62 | ||
63 | static bool dscp_tg_check(const struct xt_tgchk_param *par) | 63 | static int dscp_tg_check(const struct xt_tgchk_param *par) |
64 | { | 64 | { |
65 | const struct xt_DSCP_info *info = par->targinfo; | 65 | const struct xt_DSCP_info *info = par->targinfo; |
66 | 66 | ||
67 | if (info->dscp > XT_DSCP_MAX) { | 67 | if (info->dscp > XT_DSCP_MAX) { |
68 | printk(KERN_WARNING "DSCP: dscp %x out of range\n", info->dscp); | 68 | pr_info("dscp %x out of range\n", info->dscp); |
69 | return false; | 69 | return -EDOM; |
70 | } | 70 | } |
71 | return true; | 71 | return 0; |
72 | } | 72 | } |
73 | 73 | ||
74 | static unsigned int | 74 | static unsigned int |
75 | tos_tg(struct sk_buff *skb, const struct xt_target_param *par) | 75 | tos_tg(struct sk_buff *skb, const struct xt_action_param *par) |
76 | { | 76 | { |
77 | const struct xt_tos_target_info *info = par->targinfo; | 77 | const struct xt_tos_target_info *info = par->targinfo; |
78 | struct iphdr *iph = ip_hdr(skb); | 78 | struct iphdr *iph = ip_hdr(skb); |
@@ -92,7 +92,7 @@ tos_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
92 | } | 92 | } |
93 | 93 | ||
94 | static unsigned int | 94 | static unsigned int |
95 | tos_tg6(struct sk_buff *skb, const struct xt_target_param *par) | 95 | tos_tg6(struct sk_buff *skb, const struct xt_action_param *par) |
96 | { | 96 | { |
97 | const struct xt_tos_target_info *info = par->targinfo; | 97 | const struct xt_tos_target_info *info = par->targinfo; |
98 | struct ipv6hdr *iph = ipv6_hdr(skb); | 98 | struct ipv6hdr *iph = ipv6_hdr(skb); |
diff --git a/net/netfilter/xt_HL.c b/net/netfilter/xt_HL.c index 10e789e2d12a..95b084800fcc 100644 --- a/net/netfilter/xt_HL.c +++ b/net/netfilter/xt_HL.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/ip.h> | 15 | #include <linux/ip.h> |
@@ -26,7 +26,7 @@ MODULE_DESCRIPTION("Xtables: Hoplimit/TTL Limit field modification target"); | |||
26 | MODULE_LICENSE("GPL"); | 26 | MODULE_LICENSE("GPL"); |
27 | 27 | ||
28 | static unsigned int | 28 | static unsigned int |
29 | ttl_tg(struct sk_buff *skb, const struct xt_target_param *par) | 29 | ttl_tg(struct sk_buff *skb, const struct xt_action_param *par) |
30 | { | 30 | { |
31 | struct iphdr *iph; | 31 | struct iphdr *iph; |
32 | const struct ipt_TTL_info *info = par->targinfo; | 32 | const struct ipt_TTL_info *info = par->targinfo; |
@@ -66,7 +66,7 @@ ttl_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
66 | } | 66 | } |
67 | 67 | ||
68 | static unsigned int | 68 | static unsigned int |
69 | hl_tg6(struct sk_buff *skb, const struct xt_target_param *par) | 69 | hl_tg6(struct sk_buff *skb, const struct xt_action_param *par) |
70 | { | 70 | { |
71 | struct ipv6hdr *ip6h; | 71 | struct ipv6hdr *ip6h; |
72 | const struct ip6t_HL_info *info = par->targinfo; | 72 | const struct ip6t_HL_info *info = par->targinfo; |
@@ -101,35 +101,33 @@ hl_tg6(struct sk_buff *skb, const struct xt_target_param *par) | |||
101 | return XT_CONTINUE; | 101 | return XT_CONTINUE; |
102 | } | 102 | } |
103 | 103 | ||
104 | static bool ttl_tg_check(const struct xt_tgchk_param *par) | 104 | static int ttl_tg_check(const struct xt_tgchk_param *par) |
105 | { | 105 | { |
106 | const struct ipt_TTL_info *info = par->targinfo; | 106 | const struct ipt_TTL_info *info = par->targinfo; |
107 | 107 | ||
108 | if (info->mode > IPT_TTL_MAXMODE) { | 108 | if (info->mode > IPT_TTL_MAXMODE) { |
109 | printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n", | 109 | pr_info("TTL: invalid or unknown mode %u\n", info->mode); |
110 | info->mode); | 110 | return -EINVAL; |
111 | return false; | ||
112 | } | 111 | } |
113 | if (info->mode != IPT_TTL_SET && info->ttl == 0) | 112 | if (info->mode != IPT_TTL_SET && info->ttl == 0) |
114 | return false; | 113 | return -EINVAL; |
115 | return true; | 114 | return 0; |
116 | } | 115 | } |
117 | 116 | ||
118 | static bool hl_tg6_check(const struct xt_tgchk_param *par) | 117 | static int hl_tg6_check(const struct xt_tgchk_param *par) |
119 | { | 118 | { |
120 | const struct ip6t_HL_info *info = par->targinfo; | 119 | const struct ip6t_HL_info *info = par->targinfo; |
121 | 120 | ||
122 | if (info->mode > IP6T_HL_MAXMODE) { | 121 | if (info->mode > IP6T_HL_MAXMODE) { |
123 | printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n", | 122 | pr_info("invalid or unknown mode %u\n", info->mode); |
124 | info->mode); | 123 | return -EINVAL; |
125 | return false; | ||
126 | } | 124 | } |
127 | if (info->mode != IP6T_HL_SET && info->hop_limit == 0) { | 125 | if (info->mode != IP6T_HL_SET && info->hop_limit == 0) { |
128 | printk(KERN_WARNING "ip6t_HL: increment/decrement doesn't " | 126 | pr_info("increment/decrement does not " |
129 | "make sense with value 0\n"); | 127 | "make sense with value 0\n"); |
130 | return false; | 128 | return -EINVAL; |
131 | } | 129 | } |
132 | return true; | 130 | return 0; |
133 | } | 131 | } |
134 | 132 | ||
135 | static struct xt_target hl_tg_reg[] __read_mostly = { | 133 | static struct xt_target hl_tg_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c index 3271c8e52153..a4140509eea1 100644 --- a/net/netfilter/xt_LED.c +++ b/net/netfilter/xt_LED.c | |||
@@ -18,7 +18,7 @@ | |||
18 | * 02110-1301 USA. | 18 | * 02110-1301 USA. |
19 | * | 19 | * |
20 | */ | 20 | */ |
21 | 21 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
24 | #include <linux/netfilter/x_tables.h> | 24 | #include <linux/netfilter/x_tables.h> |
@@ -32,18 +32,24 @@ MODULE_LICENSE("GPL"); | |||
32 | MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>"); | 32 | MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>"); |
33 | MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match"); | 33 | MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match"); |
34 | 34 | ||
35 | static LIST_HEAD(xt_led_triggers); | ||
36 | static DEFINE_MUTEX(xt_led_mutex); | ||
37 | |||
35 | /* | 38 | /* |
36 | * This is declared in here (the kernel module) only, to avoid having these | 39 | * This is declared in here (the kernel module) only, to avoid having these |
37 | * dependencies in userspace code. This is what xt_led_info.internal_data | 40 | * dependencies in userspace code. This is what xt_led_info.internal_data |
38 | * points to. | 41 | * points to. |
39 | */ | 42 | */ |
40 | struct xt_led_info_internal { | 43 | struct xt_led_info_internal { |
44 | struct list_head list; | ||
45 | int refcnt; | ||
46 | char *trigger_id; | ||
41 | struct led_trigger netfilter_led_trigger; | 47 | struct led_trigger netfilter_led_trigger; |
42 | struct timer_list timer; | 48 | struct timer_list timer; |
43 | }; | 49 | }; |
44 | 50 | ||
45 | static unsigned int | 51 | static unsigned int |
46 | led_tg(struct sk_buff *skb, const struct xt_target_param *par) | 52 | led_tg(struct sk_buff *skb, const struct xt_action_param *par) |
47 | { | 53 | { |
48 | const struct xt_led_info *ledinfo = par->targinfo; | 54 | const struct xt_led_info *ledinfo = par->targinfo; |
49 | struct xt_led_info_internal *ledinternal = ledinfo->internal_data; | 55 | struct xt_led_info_internal *ledinternal = ledinfo->internal_data; |
@@ -54,7 +60,7 @@ led_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
54 | */ | 60 | */ |
55 | if ((ledinfo->delay > 0) && ledinfo->always_blink && | 61 | if ((ledinfo->delay > 0) && ledinfo->always_blink && |
56 | timer_pending(&ledinternal->timer)) | 62 | timer_pending(&ledinternal->timer)) |
57 | led_trigger_event(&ledinternal->netfilter_led_trigger,LED_OFF); | 63 | led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); |
58 | 64 | ||
59 | led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL); | 65 | led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL); |
60 | 66 | ||
@@ -75,54 +81,86 @@ led_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
75 | 81 | ||
76 | static void led_timeout_callback(unsigned long data) | 82 | static void led_timeout_callback(unsigned long data) |
77 | { | 83 | { |
78 | struct xt_led_info *ledinfo = (struct xt_led_info *)data; | 84 | struct xt_led_info_internal *ledinternal = (struct xt_led_info_internal *)data; |
79 | struct xt_led_info_internal *ledinternal = ledinfo->internal_data; | ||
80 | 85 | ||
81 | led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); | 86 | led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); |
82 | } | 87 | } |
83 | 88 | ||
84 | static bool led_tg_check(const struct xt_tgchk_param *par) | 89 | static struct xt_led_info_internal *led_trigger_lookup(const char *name) |
90 | { | ||
91 | struct xt_led_info_internal *ledinternal; | ||
92 | |||
93 | list_for_each_entry(ledinternal, &xt_led_triggers, list) { | ||
94 | if (!strcmp(name, ledinternal->netfilter_led_trigger.name)) { | ||
95 | return ledinternal; | ||
96 | } | ||
97 | } | ||
98 | return NULL; | ||
99 | } | ||
100 | |||
101 | static int led_tg_check(const struct xt_tgchk_param *par) | ||
85 | { | 102 | { |
86 | struct xt_led_info *ledinfo = par->targinfo; | 103 | struct xt_led_info *ledinfo = par->targinfo; |
87 | struct xt_led_info_internal *ledinternal; | 104 | struct xt_led_info_internal *ledinternal; |
88 | int err; | 105 | int err; |
89 | 106 | ||
90 | if (ledinfo->id[0] == '\0') { | 107 | if (ledinfo->id[0] == '\0') { |
91 | printk(KERN_ERR KBUILD_MODNAME ": No 'id' parameter given.\n"); | 108 | pr_info("No 'id' parameter given.\n"); |
92 | return false; | 109 | return -EINVAL; |
93 | } | 110 | } |
94 | 111 | ||
95 | ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL); | 112 | mutex_lock(&xt_led_mutex); |
96 | if (!ledinternal) { | 113 | |
97 | printk(KERN_CRIT KBUILD_MODNAME ": out of memory\n"); | 114 | ledinternal = led_trigger_lookup(ledinfo->id); |
98 | return false; | 115 | if (ledinternal) { |
116 | ledinternal->refcnt++; | ||
117 | goto out; | ||
99 | } | 118 | } |
100 | 119 | ||
101 | ledinternal->netfilter_led_trigger.name = ledinfo->id; | 120 | err = -ENOMEM; |
121 | ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL); | ||
122 | if (!ledinternal) | ||
123 | goto exit_mutex_only; | ||
124 | |||
125 | ledinternal->trigger_id = kstrdup(ledinfo->id, GFP_KERNEL); | ||
126 | if (!ledinternal->trigger_id) | ||
127 | goto exit_internal_alloc; | ||
128 | |||
129 | ledinternal->refcnt = 1; | ||
130 | ledinternal->netfilter_led_trigger.name = ledinternal->trigger_id; | ||
102 | 131 | ||
103 | err = led_trigger_register(&ledinternal->netfilter_led_trigger); | 132 | err = led_trigger_register(&ledinternal->netfilter_led_trigger); |
104 | if (err) { | 133 | if (err) { |
105 | printk(KERN_CRIT KBUILD_MODNAME | 134 | pr_warning("led_trigger_register() failed\n"); |
106 | ": led_trigger_register() failed\n"); | ||
107 | if (err == -EEXIST) | 135 | if (err == -EEXIST) |
108 | printk(KERN_ERR KBUILD_MODNAME | 136 | pr_warning("Trigger name is already in use.\n"); |
109 | ": Trigger name is already in use.\n"); | ||
110 | goto exit_alloc; | 137 | goto exit_alloc; |
111 | } | 138 | } |
112 | 139 | ||
113 | /* See if we need to set up a timer */ | 140 | /* See if we need to set up a timer */ |
114 | if (ledinfo->delay > 0) | 141 | if (ledinfo->delay > 0) |
115 | setup_timer(&ledinternal->timer, led_timeout_callback, | 142 | setup_timer(&ledinternal->timer, led_timeout_callback, |
116 | (unsigned long)ledinfo); | 143 | (unsigned long)ledinternal); |
144 | |||
145 | list_add_tail(&ledinternal->list, &xt_led_triggers); | ||
146 | |||
147 | out: | ||
148 | mutex_unlock(&xt_led_mutex); | ||
117 | 149 | ||
118 | ledinfo->internal_data = ledinternal; | 150 | ledinfo->internal_data = ledinternal; |
119 | 151 | ||
120 | return true; | 152 | return 0; |
121 | 153 | ||
122 | exit_alloc: | 154 | exit_alloc: |
155 | kfree(ledinternal->trigger_id); | ||
156 | |||
157 | exit_internal_alloc: | ||
123 | kfree(ledinternal); | 158 | kfree(ledinternal); |
124 | 159 | ||
125 | return false; | 160 | exit_mutex_only: |
161 | mutex_unlock(&xt_led_mutex); | ||
162 | |||
163 | return err; | ||
126 | } | 164 | } |
127 | 165 | ||
128 | static void led_tg_destroy(const struct xt_tgdtor_param *par) | 166 | static void led_tg_destroy(const struct xt_tgdtor_param *par) |
@@ -130,10 +168,23 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par) | |||
130 | const struct xt_led_info *ledinfo = par->targinfo; | 168 | const struct xt_led_info *ledinfo = par->targinfo; |
131 | struct xt_led_info_internal *ledinternal = ledinfo->internal_data; | 169 | struct xt_led_info_internal *ledinternal = ledinfo->internal_data; |
132 | 170 | ||
171 | mutex_lock(&xt_led_mutex); | ||
172 | |||
173 | if (--ledinternal->refcnt) { | ||
174 | mutex_unlock(&xt_led_mutex); | ||
175 | return; | ||
176 | } | ||
177 | |||
178 | list_del(&ledinternal->list); | ||
179 | |||
133 | if (ledinfo->delay > 0) | 180 | if (ledinfo->delay > 0) |
134 | del_timer_sync(&ledinternal->timer); | 181 | del_timer_sync(&ledinternal->timer); |
135 | 182 | ||
136 | led_trigger_unregister(&ledinternal->netfilter_led_trigger); | 183 | led_trigger_unregister(&ledinternal->netfilter_led_trigger); |
184 | |||
185 | mutex_unlock(&xt_led_mutex); | ||
186 | |||
187 | kfree(ledinternal->trigger_id); | ||
137 | kfree(ledinternal); | 188 | kfree(ledinternal); |
138 | } | 189 | } |
139 | 190 | ||
@@ -142,7 +193,7 @@ static struct xt_target led_tg_reg __read_mostly = { | |||
142 | .revision = 0, | 193 | .revision = 0, |
143 | .family = NFPROTO_UNSPEC, | 194 | .family = NFPROTO_UNSPEC, |
144 | .target = led_tg, | 195 | .target = led_tg, |
145 | .targetsize = XT_ALIGN(sizeof(struct xt_led_info)), | 196 | .targetsize = sizeof(struct xt_led_info), |
146 | .checkentry = led_tg_check, | 197 | .checkentry = led_tg_check, |
147 | .destroy = led_tg_destroy, | 198 | .destroy = led_tg_destroy, |
148 | .me = THIS_MODULE, | 199 | .me = THIS_MODULE, |
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c deleted file mode 100644 index 225f8d11e173..000000000000 --- a/net/netfilter/xt_MARK.c +++ /dev/null | |||
@@ -1,56 +0,0 @@ | |||
1 | /* | ||
2 | * xt_MARK - Netfilter module to modify the NFMARK field of an skb | ||
3 | * | ||
4 | * (C) 1999-2001 Marc Boucher <marc@mbsi.ca> | ||
5 | * Copyright © CC Computer Consultants GmbH, 2007 - 2008 | ||
6 | * Jan Engelhardt <jengelh@computergmbh.de> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/ip.h> | ||
16 | #include <net/checksum.h> | ||
17 | |||
18 | #include <linux/netfilter/x_tables.h> | ||
19 | #include <linux/netfilter/xt_MARK.h> | ||
20 | |||
21 | MODULE_LICENSE("GPL"); | ||
22 | MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); | ||
23 | MODULE_DESCRIPTION("Xtables: packet mark modification"); | ||
24 | MODULE_ALIAS("ipt_MARK"); | ||
25 | MODULE_ALIAS("ip6t_MARK"); | ||
26 | |||
27 | static unsigned int | ||
28 | mark_tg(struct sk_buff *skb, const struct xt_target_param *par) | ||
29 | { | ||
30 | const struct xt_mark_tginfo2 *info = par->targinfo; | ||
31 | |||
32 | skb->mark = (skb->mark & ~info->mask) ^ info->mark; | ||
33 | return XT_CONTINUE; | ||
34 | } | ||
35 | |||
36 | static struct xt_target mark_tg_reg __read_mostly = { | ||
37 | .name = "MARK", | ||
38 | .revision = 2, | ||
39 | .family = NFPROTO_UNSPEC, | ||
40 | .target = mark_tg, | ||
41 | .targetsize = sizeof(struct xt_mark_tginfo2), | ||
42 | .me = THIS_MODULE, | ||
43 | }; | ||
44 | |||
45 | static int __init mark_tg_init(void) | ||
46 | { | ||
47 | return xt_register_target(&mark_tg_reg); | ||
48 | } | ||
49 | |||
50 | static void __exit mark_tg_exit(void) | ||
51 | { | ||
52 | xt_unregister_target(&mark_tg_reg); | ||
53 | } | ||
54 | |||
55 | module_init(mark_tg_init); | ||
56 | module_exit(mark_tg_exit); | ||
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c index a57c5cf018ec..a17dd0f589b2 100644 --- a/net/netfilter/xt_NFLOG.c +++ b/net/netfilter/xt_NFLOG.c | |||
@@ -22,7 +22,7 @@ MODULE_ALIAS("ipt_NFLOG"); | |||
22 | MODULE_ALIAS("ip6t_NFLOG"); | 22 | MODULE_ALIAS("ip6t_NFLOG"); |
23 | 23 | ||
24 | static unsigned int | 24 | static unsigned int |
25 | nflog_tg(struct sk_buff *skb, const struct xt_target_param *par) | 25 | nflog_tg(struct sk_buff *skb, const struct xt_action_param *par) |
26 | { | 26 | { |
27 | const struct xt_nflog_info *info = par->targinfo; | 27 | const struct xt_nflog_info *info = par->targinfo; |
28 | struct nf_loginfo li; | 28 | struct nf_loginfo li; |
@@ -37,15 +37,15 @@ nflog_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
37 | return XT_CONTINUE; | 37 | return XT_CONTINUE; |
38 | } | 38 | } |
39 | 39 | ||
40 | static bool nflog_tg_check(const struct xt_tgchk_param *par) | 40 | static int nflog_tg_check(const struct xt_tgchk_param *par) |
41 | { | 41 | { |
42 | const struct xt_nflog_info *info = par->targinfo; | 42 | const struct xt_nflog_info *info = par->targinfo; |
43 | 43 | ||
44 | if (info->flags & ~XT_NFLOG_MASK) | 44 | if (info->flags & ~XT_NFLOG_MASK) |
45 | return false; | 45 | return -EINVAL; |
46 | if (info->prefix[sizeof(info->prefix) - 1] != '\0') | 46 | if (info->prefix[sizeof(info->prefix) - 1] != '\0') |
47 | return false; | 47 | return -EINVAL; |
48 | return true; | 48 | return 0; |
49 | } | 49 | } |
50 | 50 | ||
51 | static struct xt_target nflog_tg_reg __read_mostly = { | 51 | static struct xt_target nflog_tg_reg __read_mostly = { |
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c index 12dcd7007c3e..039cce1bde3d 100644 --- a/net/netfilter/xt_NFQUEUE.c +++ b/net/netfilter/xt_NFQUEUE.c | |||
@@ -31,7 +31,7 @@ static u32 jhash_initval __read_mostly; | |||
31 | static bool rnd_inited __read_mostly; | 31 | static bool rnd_inited __read_mostly; |
32 | 32 | ||
33 | static unsigned int | 33 | static unsigned int |
34 | nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par) | 34 | nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par) |
35 | { | 35 | { |
36 | const struct xt_NFQ_info *tinfo = par->targinfo; | 36 | const struct xt_NFQ_info *tinfo = par->targinfo; |
37 | 37 | ||
@@ -49,17 +49,6 @@ static u32 hash_v4(const struct sk_buff *skb) | |||
49 | return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval); | 49 | return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval); |
50 | } | 50 | } |
51 | 51 | ||
52 | static unsigned int | ||
53 | nfqueue_tg4_v1(struct sk_buff *skb, const struct xt_target_param *par) | ||
54 | { | ||
55 | const struct xt_NFQ_info_v1 *info = par->targinfo; | ||
56 | u32 queue = info->queuenum; | ||
57 | |||
58 | if (info->queues_total > 1) | ||
59 | queue = hash_v4(skb) % info->queues_total + queue; | ||
60 | return NF_QUEUE_NR(queue); | ||
61 | } | ||
62 | |||
63 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | 52 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) |
64 | static u32 hash_v6(const struct sk_buff *skb) | 53 | static u32 hash_v6(const struct sk_buff *skb) |
65 | { | 54 | { |
@@ -73,20 +62,26 @@ static u32 hash_v6(const struct sk_buff *skb) | |||
73 | 62 | ||
74 | return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval); | 63 | return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval); |
75 | } | 64 | } |
65 | #endif | ||
76 | 66 | ||
77 | static unsigned int | 67 | static unsigned int |
78 | nfqueue_tg6_v1(struct sk_buff *skb, const struct xt_target_param *par) | 68 | nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par) |
79 | { | 69 | { |
80 | const struct xt_NFQ_info_v1 *info = par->targinfo; | 70 | const struct xt_NFQ_info_v1 *info = par->targinfo; |
81 | u32 queue = info->queuenum; | 71 | u32 queue = info->queuenum; |
82 | 72 | ||
83 | if (info->queues_total > 1) | 73 | if (info->queues_total > 1) { |
84 | queue = hash_v6(skb) % info->queues_total + queue; | 74 | if (par->family == NFPROTO_IPV4) |
75 | queue = hash_v4(skb) % info->queues_total + queue; | ||
76 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | ||
77 | else if (par->family == NFPROTO_IPV6) | ||
78 | queue = hash_v6(skb) % info->queues_total + queue; | ||
79 | #endif | ||
80 | } | ||
85 | return NF_QUEUE_NR(queue); | 81 | return NF_QUEUE_NR(queue); |
86 | } | 82 | } |
87 | #endif | ||
88 | 83 | ||
89 | static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par) | 84 | static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par) |
90 | { | 85 | { |
91 | const struct xt_NFQ_info_v1 *info = par->targinfo; | 86 | const struct xt_NFQ_info_v1 *info = par->targinfo; |
92 | u32 maxid; | 87 | u32 maxid; |
@@ -97,15 +92,15 @@ static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par) | |||
97 | } | 92 | } |
98 | if (info->queues_total == 0) { | 93 | if (info->queues_total == 0) { |
99 | pr_err("NFQUEUE: number of total queues is 0\n"); | 94 | pr_err("NFQUEUE: number of total queues is 0\n"); |
100 | return false; | 95 | return -EINVAL; |
101 | } | 96 | } |
102 | maxid = info->queues_total - 1 + info->queuenum; | 97 | maxid = info->queues_total - 1 + info->queuenum; |
103 | if (maxid > 0xffff) { | 98 | if (maxid > 0xffff) { |
104 | pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n", | 99 | pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n", |
105 | info->queues_total, maxid); | 100 | info->queues_total, maxid); |
106 | return false; | 101 | return -ERANGE; |
107 | } | 102 | } |
108 | return true; | 103 | return 0; |
109 | } | 104 | } |
110 | 105 | ||
111 | static struct xt_target nfqueue_tg_reg[] __read_mostly = { | 106 | static struct xt_target nfqueue_tg_reg[] __read_mostly = { |
@@ -119,23 +114,12 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = { | |||
119 | { | 114 | { |
120 | .name = "NFQUEUE", | 115 | .name = "NFQUEUE", |
121 | .revision = 1, | 116 | .revision = 1, |
122 | .family = NFPROTO_IPV4, | 117 | .family = NFPROTO_UNSPEC, |
123 | .checkentry = nfqueue_tg_v1_check, | ||
124 | .target = nfqueue_tg4_v1, | ||
125 | .targetsize = sizeof(struct xt_NFQ_info_v1), | ||
126 | .me = THIS_MODULE, | ||
127 | }, | ||
128 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | ||
129 | { | ||
130 | .name = "NFQUEUE", | ||
131 | .revision = 1, | ||
132 | .family = NFPROTO_IPV6, | ||
133 | .checkentry = nfqueue_tg_v1_check, | 118 | .checkentry = nfqueue_tg_v1_check, |
134 | .target = nfqueue_tg6_v1, | 119 | .target = nfqueue_tg_v1, |
135 | .targetsize = sizeof(struct xt_NFQ_info_v1), | 120 | .targetsize = sizeof(struct xt_NFQ_info_v1), |
136 | .me = THIS_MODULE, | 121 | .me = THIS_MODULE, |
137 | }, | 122 | }, |
138 | #endif | ||
139 | }; | 123 | }; |
140 | 124 | ||
141 | static int __init nfqueue_tg_init(void) | 125 | static int __init nfqueue_tg_init(void) |
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c index e7a0a54fd4ea..512b9123252f 100644 --- a/net/netfilter/xt_NOTRACK.c +++ b/net/netfilter/xt_NOTRACK.c | |||
@@ -13,7 +13,7 @@ MODULE_ALIAS("ipt_NOTRACK"); | |||
13 | MODULE_ALIAS("ip6t_NOTRACK"); | 13 | MODULE_ALIAS("ip6t_NOTRACK"); |
14 | 14 | ||
15 | static unsigned int | 15 | static unsigned int |
16 | notrack_tg(struct sk_buff *skb, const struct xt_target_param *par) | 16 | notrack_tg(struct sk_buff *skb, const struct xt_action_param *par) |
17 | { | 17 | { |
18 | /* Previously seen (loopback)? Ignore. */ | 18 | /* Previously seen (loopback)? Ignore. */ |
19 | if (skb->nfct != NULL) | 19 | if (skb->nfct != NULL) |
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index d16d55df4f61..69c01e10f8af 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c | |||
@@ -73,7 +73,7 @@ void xt_rateest_put(struct xt_rateest *est) | |||
73 | EXPORT_SYMBOL_GPL(xt_rateest_put); | 73 | EXPORT_SYMBOL_GPL(xt_rateest_put); |
74 | 74 | ||
75 | static unsigned int | 75 | static unsigned int |
76 | xt_rateest_tg(struct sk_buff *skb, const struct xt_target_param *par) | 76 | xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par) |
77 | { | 77 | { |
78 | const struct xt_rateest_target_info *info = par->targinfo; | 78 | const struct xt_rateest_target_info *info = par->targinfo; |
79 | struct gnet_stats_basic_packed *stats = &info->est->bstats; | 79 | struct gnet_stats_basic_packed *stats = &info->est->bstats; |
@@ -86,7 +86,7 @@ xt_rateest_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
86 | return XT_CONTINUE; | 86 | return XT_CONTINUE; |
87 | } | 87 | } |
88 | 88 | ||
89 | static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) | 89 | static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) |
90 | { | 90 | { |
91 | struct xt_rateest_target_info *info = par->targinfo; | 91 | struct xt_rateest_target_info *info = par->targinfo; |
92 | struct xt_rateest *est; | 92 | struct xt_rateest *est; |
@@ -94,6 +94,7 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) | |||
94 | struct nlattr opt; | 94 | struct nlattr opt; |
95 | struct gnet_estimator est; | 95 | struct gnet_estimator est; |
96 | } cfg; | 96 | } cfg; |
97 | int ret; | ||
97 | 98 | ||
98 | if (unlikely(!rnd_inited)) { | 99 | if (unlikely(!rnd_inited)) { |
99 | get_random_bytes(&jhash_rnd, sizeof(jhash_rnd)); | 100 | get_random_bytes(&jhash_rnd, sizeof(jhash_rnd)); |
@@ -110,12 +111,13 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) | |||
110 | (info->interval != est->params.interval || | 111 | (info->interval != est->params.interval || |
111 | info->ewma_log != est->params.ewma_log)) { | 112 | info->ewma_log != est->params.ewma_log)) { |
112 | xt_rateest_put(est); | 113 | xt_rateest_put(est); |
113 | return false; | 114 | return -EINVAL; |
114 | } | 115 | } |
115 | info->est = est; | 116 | info->est = est; |
116 | return true; | 117 | return 0; |
117 | } | 118 | } |
118 | 119 | ||
120 | ret = -ENOMEM; | ||
119 | est = kzalloc(sizeof(*est), GFP_KERNEL); | 121 | est = kzalloc(sizeof(*est), GFP_KERNEL); |
120 | if (!est) | 122 | if (!est) |
121 | goto err1; | 123 | goto err1; |
@@ -131,19 +133,19 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) | |||
131 | cfg.est.interval = info->interval; | 133 | cfg.est.interval = info->interval; |
132 | cfg.est.ewma_log = info->ewma_log; | 134 | cfg.est.ewma_log = info->ewma_log; |
133 | 135 | ||
134 | if (gen_new_estimator(&est->bstats, &est->rstats, &est->lock, | 136 | ret = gen_new_estimator(&est->bstats, &est->rstats, |
135 | &cfg.opt) < 0) | 137 | &est->lock, &cfg.opt); |
138 | if (ret < 0) | ||
136 | goto err2; | 139 | goto err2; |
137 | 140 | ||
138 | info->est = est; | 141 | info->est = est; |
139 | xt_rateest_hash_insert(est); | 142 | xt_rateest_hash_insert(est); |
140 | 143 | return 0; | |
141 | return true; | ||
142 | 144 | ||
143 | err2: | 145 | err2: |
144 | kfree(est); | 146 | kfree(est); |
145 | err1: | 147 | err1: |
146 | return false; | 148 | return ret; |
147 | } | 149 | } |
148 | 150 | ||
149 | static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par) | 151 | static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par) |
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c index 7a6f9e6f5dfa..23b2d6c486b5 100644 --- a/net/netfilter/xt_SECMARK.c +++ b/net/netfilter/xt_SECMARK.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
17 | #include <linux/selinux.h> | 18 | #include <linux/selinux.h> |
@@ -29,7 +30,7 @@ MODULE_ALIAS("ip6t_SECMARK"); | |||
29 | static u8 mode; | 30 | static u8 mode; |
30 | 31 | ||
31 | static unsigned int | 32 | static unsigned int |
32 | secmark_tg(struct sk_buff *skb, const struct xt_target_param *par) | 33 | secmark_tg(struct sk_buff *skb, const struct xt_action_param *par) |
33 | { | 34 | { |
34 | u32 secmark = 0; | 35 | u32 secmark = 0; |
35 | const struct xt_secmark_target_info *info = par->targinfo; | 36 | const struct xt_secmark_target_info *info = par->targinfo; |
@@ -49,7 +50,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
49 | return XT_CONTINUE; | 50 | return XT_CONTINUE; |
50 | } | 51 | } |
51 | 52 | ||
52 | static bool checkentry_selinux(struct xt_secmark_target_info *info) | 53 | static int checkentry_selinux(struct xt_secmark_target_info *info) |
53 | { | 54 | { |
54 | int err; | 55 | int err; |
55 | struct xt_secmark_target_selinux_info *sel = &info->u.sel; | 56 | struct xt_secmark_target_selinux_info *sel = &info->u.sel; |
@@ -59,58 +60,59 @@ static bool checkentry_selinux(struct xt_secmark_target_info *info) | |||
59 | err = selinux_string_to_sid(sel->selctx, &sel->selsid); | 60 | err = selinux_string_to_sid(sel->selctx, &sel->selsid); |
60 | if (err) { | 61 | if (err) { |
61 | if (err == -EINVAL) | 62 | if (err == -EINVAL) |
62 | printk(KERN_INFO PFX "invalid SELinux context \'%s\'\n", | 63 | pr_info("invalid SELinux context \'%s\'\n", |
63 | sel->selctx); | 64 | sel->selctx); |
64 | return false; | 65 | return err; |
65 | } | 66 | } |
66 | 67 | ||
67 | if (!sel->selsid) { | 68 | if (!sel->selsid) { |
68 | printk(KERN_INFO PFX "unable to map SELinux context \'%s\'\n", | 69 | pr_info("unable to map SELinux context \'%s\'\n", sel->selctx); |
69 | sel->selctx); | 70 | return -ENOENT; |
70 | return false; | ||
71 | } | 71 | } |
72 | 72 | ||
73 | err = selinux_secmark_relabel_packet_permission(sel->selsid); | 73 | err = selinux_secmark_relabel_packet_permission(sel->selsid); |
74 | if (err) { | 74 | if (err) { |
75 | printk(KERN_INFO PFX "unable to obtain relabeling permission\n"); | 75 | pr_info("unable to obtain relabeling permission\n"); |
76 | return false; | 76 | return err; |
77 | } | 77 | } |
78 | 78 | ||
79 | selinux_secmark_refcount_inc(); | 79 | selinux_secmark_refcount_inc(); |
80 | return true; | 80 | return 0; |
81 | } | 81 | } |
82 | 82 | ||
83 | static bool secmark_tg_check(const struct xt_tgchk_param *par) | 83 | static int secmark_tg_check(const struct xt_tgchk_param *par) |
84 | { | 84 | { |
85 | struct xt_secmark_target_info *info = par->targinfo; | 85 | struct xt_secmark_target_info *info = par->targinfo; |
86 | int err; | ||
86 | 87 | ||
87 | if (strcmp(par->table, "mangle") != 0 && | 88 | if (strcmp(par->table, "mangle") != 0 && |
88 | strcmp(par->table, "security") != 0) { | 89 | strcmp(par->table, "security") != 0) { |
89 | printk(KERN_INFO PFX "target only valid in the \'mangle\' " | 90 | pr_info("target only valid in the \'mangle\' " |
90 | "or \'security\' tables, not \'%s\'.\n", par->table); | 91 | "or \'security\' tables, not \'%s\'.\n", par->table); |
91 | return false; | 92 | return -EINVAL; |
92 | } | 93 | } |
93 | 94 | ||
94 | if (mode && mode != info->mode) { | 95 | if (mode && mode != info->mode) { |
95 | printk(KERN_INFO PFX "mode already set to %hu cannot mix with " | 96 | pr_info("mode already set to %hu cannot mix with " |
96 | "rules for mode %hu\n", mode, info->mode); | 97 | "rules for mode %hu\n", mode, info->mode); |
97 | return false; | 98 | return -EINVAL; |
98 | } | 99 | } |
99 | 100 | ||
100 | switch (info->mode) { | 101 | switch (info->mode) { |
101 | case SECMARK_MODE_SEL: | 102 | case SECMARK_MODE_SEL: |
102 | if (!checkentry_selinux(info)) | 103 | err = checkentry_selinux(info); |
103 | return false; | 104 | if (err <= 0) |
105 | return err; | ||
104 | break; | 106 | break; |
105 | 107 | ||
106 | default: | 108 | default: |
107 | printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode); | 109 | pr_info("invalid mode: %hu\n", info->mode); |
108 | return false; | 110 | return -EINVAL; |
109 | } | 111 | } |
110 | 112 | ||
111 | if (!mode) | 113 | if (!mode) |
112 | mode = info->mode; | 114 | mode = info->mode; |
113 | return true; | 115 | return 0; |
114 | } | 116 | } |
115 | 117 | ||
116 | static void secmark_tg_destroy(const struct xt_tgdtor_param *par) | 118 | static void secmark_tg_destroy(const struct xt_tgdtor_param *par) |
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index c5f4b9919e9a..62ec021fbd50 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
13 | #include <linux/ip.h> | 13 | #include <linux/ip.h> |
@@ -68,15 +68,14 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
68 | if (info->mss == XT_TCPMSS_CLAMP_PMTU) { | 68 | if (info->mss == XT_TCPMSS_CLAMP_PMTU) { |
69 | if (dst_mtu(skb_dst(skb)) <= minlen) { | 69 | if (dst_mtu(skb_dst(skb)) <= minlen) { |
70 | if (net_ratelimit()) | 70 | if (net_ratelimit()) |
71 | printk(KERN_ERR "xt_TCPMSS: " | 71 | pr_err("unknown or invalid path-MTU (%u)\n", |
72 | "unknown or invalid path-MTU (%u)\n", | ||
73 | dst_mtu(skb_dst(skb))); | 72 | dst_mtu(skb_dst(skb))); |
74 | return -1; | 73 | return -1; |
75 | } | 74 | } |
76 | if (in_mtu <= minlen) { | 75 | if (in_mtu <= minlen) { |
77 | if (net_ratelimit()) | 76 | if (net_ratelimit()) |
78 | printk(KERN_ERR "xt_TCPMSS: unknown or " | 77 | pr_err("unknown or invalid path-MTU (%u)\n", |
79 | "invalid path-MTU (%u)\n", in_mtu); | 78 | in_mtu); |
80 | return -1; | 79 | return -1; |
81 | } | 80 | } |
82 | newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen; | 81 | newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen; |
@@ -173,7 +172,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb, | |||
173 | } | 172 | } |
174 | 173 | ||
175 | static unsigned int | 174 | static unsigned int |
176 | tcpmss_tg4(struct sk_buff *skb, const struct xt_target_param *par) | 175 | tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par) |
177 | { | 176 | { |
178 | struct iphdr *iph = ip_hdr(skb); | 177 | struct iphdr *iph = ip_hdr(skb); |
179 | __be16 newlen; | 178 | __be16 newlen; |
@@ -196,7 +195,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_target_param *par) | |||
196 | 195 | ||
197 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | 196 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) |
198 | static unsigned int | 197 | static unsigned int |
199 | tcpmss_tg6(struct sk_buff *skb, const struct xt_target_param *par) | 198 | tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par) |
200 | { | 199 | { |
201 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 200 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
202 | u8 nexthdr; | 201 | u8 nexthdr; |
@@ -236,7 +235,7 @@ static inline bool find_syn_match(const struct xt_entry_match *m) | |||
236 | return false; | 235 | return false; |
237 | } | 236 | } |
238 | 237 | ||
239 | static bool tcpmss_tg4_check(const struct xt_tgchk_param *par) | 238 | static int tcpmss_tg4_check(const struct xt_tgchk_param *par) |
240 | { | 239 | { |
241 | const struct xt_tcpmss_info *info = par->targinfo; | 240 | const struct xt_tcpmss_info *info = par->targinfo; |
242 | const struct ipt_entry *e = par->entryinfo; | 241 | const struct ipt_entry *e = par->entryinfo; |
@@ -246,19 +245,19 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par) | |||
246 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | | 245 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | |
247 | (1 << NF_INET_LOCAL_OUT) | | 246 | (1 << NF_INET_LOCAL_OUT) | |
248 | (1 << NF_INET_POST_ROUTING))) != 0) { | 247 | (1 << NF_INET_POST_ROUTING))) != 0) { |
249 | printk("xt_TCPMSS: path-MTU clamping only supported in " | 248 | pr_info("path-MTU clamping only supported in " |
250 | "FORWARD, OUTPUT and POSTROUTING hooks\n"); | 249 | "FORWARD, OUTPUT and POSTROUTING hooks\n"); |
251 | return false; | 250 | return -EINVAL; |
252 | } | 251 | } |
253 | xt_ematch_foreach(ematch, e) | 252 | xt_ematch_foreach(ematch, e) |
254 | if (find_syn_match(ematch)) | 253 | if (find_syn_match(ematch)) |
255 | return true; | 254 | return 0; |
256 | printk("xt_TCPMSS: Only works on TCP SYN packets\n"); | 255 | pr_info("Only works on TCP SYN packets\n"); |
257 | return false; | 256 | return -EINVAL; |
258 | } | 257 | } |
259 | 258 | ||
260 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | 259 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) |
261 | static bool tcpmss_tg6_check(const struct xt_tgchk_param *par) | 260 | static int tcpmss_tg6_check(const struct xt_tgchk_param *par) |
262 | { | 261 | { |
263 | const struct xt_tcpmss_info *info = par->targinfo; | 262 | const struct xt_tcpmss_info *info = par->targinfo; |
264 | const struct ip6t_entry *e = par->entryinfo; | 263 | const struct ip6t_entry *e = par->entryinfo; |
@@ -268,15 +267,15 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par) | |||
268 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | | 267 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | |
269 | (1 << NF_INET_LOCAL_OUT) | | 268 | (1 << NF_INET_LOCAL_OUT) | |
270 | (1 << NF_INET_POST_ROUTING))) != 0) { | 269 | (1 << NF_INET_POST_ROUTING))) != 0) { |
271 | printk("xt_TCPMSS: path-MTU clamping only supported in " | 270 | pr_info("path-MTU clamping only supported in " |
272 | "FORWARD, OUTPUT and POSTROUTING hooks\n"); | 271 | "FORWARD, OUTPUT and POSTROUTING hooks\n"); |
273 | return false; | 272 | return -EINVAL; |
274 | } | 273 | } |
275 | xt_ematch_foreach(ematch, e) | 274 | xt_ematch_foreach(ematch, e) |
276 | if (find_syn_match(ematch)) | 275 | if (find_syn_match(ematch)) |
277 | return true; | 276 | return 0; |
278 | printk("xt_TCPMSS: Only works on TCP SYN packets\n"); | 277 | pr_info("Only works on TCP SYN packets\n"); |
279 | return false; | 278 | return -EINVAL; |
280 | } | 279 | } |
281 | #endif | 280 | #endif |
282 | 281 | ||
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c index 9dd8c8ef63eb..9dc9ecfdd546 100644 --- a/net/netfilter/xt_TCPOPTSTRIP.c +++ b/net/netfilter/xt_TCPOPTSTRIP.c | |||
@@ -3,7 +3,6 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2007 Sven Schnelle <svens@bitebene.org> | 4 | * Copyright (C) 2007 Sven Schnelle <svens@bitebene.org> |
5 | * Copyright © CC Computer Consultants GmbH, 2007 | 5 | * Copyright © CC Computer Consultants GmbH, 2007 |
6 | * Contact: Jan Engelhardt <jengelh@computergmbh.de> | ||
7 | * | 6 | * |
8 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -75,7 +74,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, | |||
75 | } | 74 | } |
76 | 75 | ||
77 | static unsigned int | 76 | static unsigned int |
78 | tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_target_param *par) | 77 | tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par) |
79 | { | 78 | { |
80 | return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb), | 79 | return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb), |
81 | sizeof(struct iphdr) + sizeof(struct tcphdr)); | 80 | sizeof(struct iphdr) + sizeof(struct tcphdr)); |
@@ -83,7 +82,7 @@ tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_target_param *par) | |||
83 | 82 | ||
84 | #if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE) | 83 | #if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE) |
85 | static unsigned int | 84 | static unsigned int |
86 | tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_target_param *par) | 85 | tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par) |
87 | { | 86 | { |
88 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 87 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
89 | int tcphoff; | 88 | int tcphoff; |
@@ -136,7 +135,7 @@ static void __exit tcpoptstrip_tg_exit(void) | |||
136 | 135 | ||
137 | module_init(tcpoptstrip_tg_init); | 136 | module_init(tcpoptstrip_tg_init); |
138 | module_exit(tcpoptstrip_tg_exit); | 137 | module_exit(tcpoptstrip_tg_exit); |
139 | MODULE_AUTHOR("Sven Schnelle <svens@bitebene.org>, Jan Engelhardt <jengelh@computergmbh.de>"); | 138 | MODULE_AUTHOR("Sven Schnelle <svens@bitebene.org>, Jan Engelhardt <jengelh@medozas.de>"); |
140 | MODULE_DESCRIPTION("Xtables: TCP option stripping"); | 139 | MODULE_DESCRIPTION("Xtables: TCP option stripping"); |
141 | MODULE_LICENSE("GPL"); | 140 | MODULE_LICENSE("GPL"); |
142 | MODULE_ALIAS("ipt_TCPOPTSTRIP"); | 141 | MODULE_ALIAS("ipt_TCPOPTSTRIP"); |
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c new file mode 100644 index 000000000000..859d9fd429c8 --- /dev/null +++ b/net/netfilter/xt_TEE.c | |||
@@ -0,0 +1,309 @@ | |||
1 | /* | ||
2 | * "TEE" target extension for Xtables | ||
3 | * Copyright © Sebastian Claßen, 2007 | ||
4 | * Jan Engelhardt, 2007-2010 | ||
5 | * | ||
6 | * based on ipt_ROUTE.c from Cédric de Launois | ||
7 | * <delaunois@info.ucl.be> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * version 2 or later, as published by the Free Software Foundation. | ||
12 | */ | ||
13 | #include <linux/ip.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/percpu.h> | ||
16 | #include <linux/route.h> | ||
17 | #include <linux/skbuff.h> | ||
18 | #include <linux/notifier.h> | ||
19 | #include <net/checksum.h> | ||
20 | #include <net/icmp.h> | ||
21 | #include <net/ip.h> | ||
22 | #include <net/ipv6.h> | ||
23 | #include <net/ip6_route.h> | ||
24 | #include <net/route.h> | ||
25 | #include <linux/netfilter/x_tables.h> | ||
26 | #include <linux/netfilter/xt_TEE.h> | ||
27 | |||
28 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
29 | # define WITH_CONNTRACK 1 | ||
30 | # include <net/netfilter/nf_conntrack.h> | ||
31 | #endif | ||
32 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
33 | # define WITH_IPV6 1 | ||
34 | #endif | ||
35 | |||
36 | struct xt_tee_priv { | ||
37 | struct notifier_block notifier; | ||
38 | struct xt_tee_tginfo *tginfo; | ||
39 | int oif; | ||
40 | }; | ||
41 | |||
42 | static const union nf_inet_addr tee_zero_address; | ||
43 | static DEFINE_PER_CPU(bool, tee_active); | ||
44 | |||
45 | static struct net *pick_net(struct sk_buff *skb) | ||
46 | { | ||
47 | #ifdef CONFIG_NET_NS | ||
48 | const struct dst_entry *dst; | ||
49 | |||
50 | if (skb->dev != NULL) | ||
51 | return dev_net(skb->dev); | ||
52 | dst = skb_dst(skb); | ||
53 | if (dst != NULL && dst->dev != NULL) | ||
54 | return dev_net(dst->dev); | ||
55 | #endif | ||
56 | return &init_net; | ||
57 | } | ||
58 | |||
59 | static bool | ||
60 | tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) | ||
61 | { | ||
62 | const struct iphdr *iph = ip_hdr(skb); | ||
63 | struct net *net = pick_net(skb); | ||
64 | struct rtable *rt; | ||
65 | struct flowi fl; | ||
66 | |||
67 | memset(&fl, 0, sizeof(fl)); | ||
68 | if (info->priv) { | ||
69 | if (info->priv->oif == -1) | ||
70 | return false; | ||
71 | fl.oif = info->priv->oif; | ||
72 | } | ||
73 | fl.nl_u.ip4_u.daddr = info->gw.ip; | ||
74 | fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); | ||
75 | fl.nl_u.ip4_u.scope = RT_SCOPE_UNIVERSE; | ||
76 | if (ip_route_output_key(net, &rt, &fl) != 0) | ||
77 | return false; | ||
78 | |||
79 | skb_dst_drop(skb); | ||
80 | skb_dst_set(skb, &rt->u.dst); | ||
81 | skb->dev = rt->u.dst.dev; | ||
82 | skb->protocol = htons(ETH_P_IP); | ||
83 | return true; | ||
84 | } | ||
85 | |||
86 | static unsigned int | ||
87 | tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) | ||
88 | { | ||
89 | const struct xt_tee_tginfo *info = par->targinfo; | ||
90 | struct iphdr *iph; | ||
91 | |||
92 | if (percpu_read(tee_active)) | ||
93 | return XT_CONTINUE; | ||
94 | /* | ||
95 | * Copy the skb, and route the copy. Will later return %XT_CONTINUE for | ||
96 | * the original skb, which should continue on its way as if nothing has | ||
97 | * happened. The copy should be independently delivered to the TEE | ||
98 | * --gateway. | ||
99 | */ | ||
100 | skb = pskb_copy(skb, GFP_ATOMIC); | ||
101 | if (skb == NULL) | ||
102 | return XT_CONTINUE; | ||
103 | |||
104 | #ifdef WITH_CONNTRACK | ||
105 | /* Avoid counting cloned packets towards the original connection. */ | ||
106 | nf_conntrack_put(skb->nfct); | ||
107 | skb->nfct = &nf_conntrack_untracked.ct_general; | ||
108 | skb->nfctinfo = IP_CT_NEW; | ||
109 | nf_conntrack_get(skb->nfct); | ||
110 | #endif | ||
111 | /* | ||
112 | * If we are in PREROUTING/INPUT, the checksum must be recalculated | ||
113 | * since the length could have changed as a result of defragmentation. | ||
114 | * | ||
115 | * We also decrease the TTL to mitigate potential TEE loops | ||
116 | * between two hosts. | ||
117 | * | ||
118 | * Set %IP_DF so that the original source is notified of a potentially | ||
119 | * decreased MTU on the clone route. IPv6 does this too. | ||
120 | */ | ||
121 | iph = ip_hdr(skb); | ||
122 | iph->frag_off |= htons(IP_DF); | ||
123 | if (par->hooknum == NF_INET_PRE_ROUTING || | ||
124 | par->hooknum == NF_INET_LOCAL_IN) | ||
125 | --iph->ttl; | ||
126 | ip_send_check(iph); | ||
127 | |||
128 | if (tee_tg_route4(skb, info)) { | ||
129 | percpu_write(tee_active, true); | ||
130 | ip_local_out(skb); | ||
131 | percpu_write(tee_active, false); | ||
132 | } else { | ||
133 | kfree_skb(skb); | ||
134 | } | ||
135 | return XT_CONTINUE; | ||
136 | } | ||
137 | |||
138 | #ifdef WITH_IPV6 | ||
139 | static bool | ||
140 | tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info) | ||
141 | { | ||
142 | const struct ipv6hdr *iph = ipv6_hdr(skb); | ||
143 | struct net *net = pick_net(skb); | ||
144 | struct dst_entry *dst; | ||
145 | struct flowi fl; | ||
146 | |||
147 | memset(&fl, 0, sizeof(fl)); | ||
148 | if (info->priv) { | ||
149 | if (info->priv->oif == -1) | ||
150 | return false; | ||
151 | fl.oif = info->priv->oif; | ||
152 | } | ||
153 | fl.nl_u.ip6_u.daddr = info->gw.in6; | ||
154 | fl.nl_u.ip6_u.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) | | ||
155 | (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]; | ||
156 | dst = ip6_route_output(net, NULL, &fl); | ||
157 | if (dst == NULL) | ||
158 | return false; | ||
159 | |||
160 | skb_dst_drop(skb); | ||
161 | skb_dst_set(skb, dst); | ||
162 | skb->dev = dst->dev; | ||
163 | skb->protocol = htons(ETH_P_IPV6); | ||
164 | return true; | ||
165 | } | ||
166 | |||
167 | static unsigned int | ||
168 | tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) | ||
169 | { | ||
170 | const struct xt_tee_tginfo *info = par->targinfo; | ||
171 | |||
172 | if (percpu_read(tee_active)) | ||
173 | return XT_CONTINUE; | ||
174 | skb = pskb_copy(skb, GFP_ATOMIC); | ||
175 | if (skb == NULL) | ||
176 | return XT_CONTINUE; | ||
177 | |||
178 | #ifdef WITH_CONNTRACK | ||
179 | nf_conntrack_put(skb->nfct); | ||
180 | skb->nfct = &nf_conntrack_untracked.ct_general; | ||
181 | skb->nfctinfo = IP_CT_NEW; | ||
182 | nf_conntrack_get(skb->nfct); | ||
183 | #endif | ||
184 | if (par->hooknum == NF_INET_PRE_ROUTING || | ||
185 | par->hooknum == NF_INET_LOCAL_IN) { | ||
186 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
187 | --iph->hop_limit; | ||
188 | } | ||
189 | if (tee_tg_route6(skb, info)) { | ||
190 | percpu_write(tee_active, true); | ||
191 | ip6_local_out(skb); | ||
192 | percpu_write(tee_active, false); | ||
193 | } else { | ||
194 | kfree_skb(skb); | ||
195 | } | ||
196 | return XT_CONTINUE; | ||
197 | } | ||
198 | #endif /* WITH_IPV6 */ | ||
199 | |||
200 | static int tee_netdev_event(struct notifier_block *this, unsigned long event, | ||
201 | void *ptr) | ||
202 | { | ||
203 | struct net_device *dev = ptr; | ||
204 | struct xt_tee_priv *priv; | ||
205 | |||
206 | priv = container_of(this, struct xt_tee_priv, notifier); | ||
207 | switch (event) { | ||
208 | case NETDEV_REGISTER: | ||
209 | if (!strcmp(dev->name, priv->tginfo->oif)) | ||
210 | priv->oif = dev->ifindex; | ||
211 | break; | ||
212 | case NETDEV_UNREGISTER: | ||
213 | if (dev->ifindex == priv->oif) | ||
214 | priv->oif = -1; | ||
215 | break; | ||
216 | case NETDEV_CHANGENAME: | ||
217 | if (!strcmp(dev->name, priv->tginfo->oif)) | ||
218 | priv->oif = dev->ifindex; | ||
219 | else if (dev->ifindex == priv->oif) | ||
220 | priv->oif = -1; | ||
221 | break; | ||
222 | } | ||
223 | |||
224 | return NOTIFY_DONE; | ||
225 | } | ||
226 | |||
227 | static int tee_tg_check(const struct xt_tgchk_param *par) | ||
228 | { | ||
229 | struct xt_tee_tginfo *info = par->targinfo; | ||
230 | struct xt_tee_priv *priv; | ||
231 | |||
232 | /* 0.0.0.0 and :: not allowed */ | ||
233 | if (memcmp(&info->gw, &tee_zero_address, | ||
234 | sizeof(tee_zero_address)) == 0) | ||
235 | return -EINVAL; | ||
236 | |||
237 | if (info->oif[0]) { | ||
238 | if (info->oif[sizeof(info->oif)-1] != '\0') | ||
239 | return -EINVAL; | ||
240 | |||
241 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
242 | if (priv == NULL) | ||
243 | return -ENOMEM; | ||
244 | |||
245 | priv->tginfo = info; | ||
246 | priv->oif = -1; | ||
247 | priv->notifier.notifier_call = tee_netdev_event; | ||
248 | info->priv = priv; | ||
249 | |||
250 | register_netdevice_notifier(&priv->notifier); | ||
251 | } else | ||
252 | info->priv = NULL; | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | static void tee_tg_destroy(const struct xt_tgdtor_param *par) | ||
258 | { | ||
259 | struct xt_tee_tginfo *info = par->targinfo; | ||
260 | |||
261 | if (info->priv) { | ||
262 | unregister_netdevice_notifier(&info->priv->notifier); | ||
263 | kfree(info->priv); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | static struct xt_target tee_tg_reg[] __read_mostly = { | ||
268 | { | ||
269 | .name = "TEE", | ||
270 | .revision = 1, | ||
271 | .family = NFPROTO_IPV4, | ||
272 | .target = tee_tg4, | ||
273 | .targetsize = sizeof(struct xt_tee_tginfo), | ||
274 | .checkentry = tee_tg_check, | ||
275 | .destroy = tee_tg_destroy, | ||
276 | .me = THIS_MODULE, | ||
277 | }, | ||
278 | #ifdef WITH_IPV6 | ||
279 | { | ||
280 | .name = "TEE", | ||
281 | .revision = 1, | ||
282 | .family = NFPROTO_IPV6, | ||
283 | .target = tee_tg6, | ||
284 | .targetsize = sizeof(struct xt_tee_tginfo), | ||
285 | .checkentry = tee_tg_check, | ||
286 | .destroy = tee_tg_destroy, | ||
287 | .me = THIS_MODULE, | ||
288 | }, | ||
289 | #endif | ||
290 | }; | ||
291 | |||
292 | static int __init tee_tg_init(void) | ||
293 | { | ||
294 | return xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg)); | ||
295 | } | ||
296 | |||
297 | static void __exit tee_tg_exit(void) | ||
298 | { | ||
299 | xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg)); | ||
300 | } | ||
301 | |||
302 | module_init(tee_tg_init); | ||
303 | module_exit(tee_tg_exit); | ||
304 | MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>"); | ||
305 | MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); | ||
306 | MODULE_DESCRIPTION("Xtables: Reroute packet copy"); | ||
307 | MODULE_LICENSE("GPL"); | ||
308 | MODULE_ALIAS("ipt_TEE"); | ||
309 | MODULE_ALIAS("ip6t_TEE"); | ||
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 1340c2fa3621..e1a0dedac258 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/ip.h> | 15 | #include <linux/ip.h> |
@@ -25,7 +25,7 @@ | |||
25 | #include <net/netfilter/nf_tproxy_core.h> | 25 | #include <net/netfilter/nf_tproxy_core.h> |
26 | 26 | ||
27 | static unsigned int | 27 | static unsigned int |
28 | tproxy_tg(struct sk_buff *skb, const struct xt_target_param *par) | 28 | tproxy_tg(struct sk_buff *skb, const struct xt_action_param *par) |
29 | { | 29 | { |
30 | const struct iphdr *iph = ip_hdr(skb); | 30 | const struct iphdr *iph = ip_hdr(skb); |
31 | const struct xt_tproxy_target_info *tgi = par->targinfo; | 31 | const struct xt_tproxy_target_info *tgi = par->targinfo; |
@@ -59,17 +59,17 @@ tproxy_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
59 | return NF_DROP; | 59 | return NF_DROP; |
60 | } | 60 | } |
61 | 61 | ||
62 | static bool tproxy_tg_check(const struct xt_tgchk_param *par) | 62 | static int tproxy_tg_check(const struct xt_tgchk_param *par) |
63 | { | 63 | { |
64 | const struct ipt_ip *i = par->entryinfo; | 64 | const struct ipt_ip *i = par->entryinfo; |
65 | 65 | ||
66 | if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) | 66 | if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) |
67 | && !(i->invflags & IPT_INV_PROTO)) | 67 | && !(i->invflags & IPT_INV_PROTO)) |
68 | return true; | 68 | return 0; |
69 | 69 | ||
70 | pr_info("xt_TPROXY: Can be used only in combination with " | 70 | pr_info("Can be used only in combination with " |
71 | "either -p tcp or -p udp\n"); | 71 | "either -p tcp or -p udp\n"); |
72 | return false; | 72 | return -EINVAL; |
73 | } | 73 | } |
74 | 74 | ||
75 | static struct xt_target tproxy_tg_reg __read_mostly = { | 75 | static struct xt_target tproxy_tg_reg __read_mostly = { |
diff --git a/net/netfilter/xt_TRACE.c b/net/netfilter/xt_TRACE.c index fbb04b86c46b..df48967af382 100644 --- a/net/netfilter/xt_TRACE.c +++ b/net/netfilter/xt_TRACE.c | |||
@@ -11,7 +11,7 @@ MODULE_ALIAS("ipt_TRACE"); | |||
11 | MODULE_ALIAS("ip6t_TRACE"); | 11 | MODULE_ALIAS("ip6t_TRACE"); |
12 | 12 | ||
13 | static unsigned int | 13 | static unsigned int |
14 | trace_tg(struct sk_buff *skb, const struct xt_target_param *par) | 14 | trace_tg(struct sk_buff *skb, const struct xt_action_param *par) |
15 | { | 15 | { |
16 | skb->nf_trace = 1; | 16 | skb->nf_trace = 1; |
17 | return XT_CONTINUE; | 17 | return XT_CONTINUE; |
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index 225ee3ecd69d..30b95a1c1c89 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
8 | #include <linux/module.h> | 9 | #include <linux/module.h> |
9 | #include <linux/skbuff.h> | 10 | #include <linux/skbuff.h> |
10 | #include <linux/jhash.h> | 11 | #include <linux/jhash.h> |
@@ -85,7 +86,7 @@ xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family) | |||
85 | } | 86 | } |
86 | 87 | ||
87 | static bool | 88 | static bool |
88 | xt_cluster_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 89 | xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) |
89 | { | 90 | { |
90 | struct sk_buff *pskb = (struct sk_buff *)skb; | 91 | struct sk_buff *pskb = (struct sk_buff *)skb; |
91 | const struct xt_cluster_match_info *info = par->matchinfo; | 92 | const struct xt_cluster_match_info *info = par->matchinfo; |
@@ -131,22 +132,22 @@ xt_cluster_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
131 | !!(info->flags & XT_CLUSTER_F_INV); | 132 | !!(info->flags & XT_CLUSTER_F_INV); |
132 | } | 133 | } |
133 | 134 | ||
134 | static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) | 135 | static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) |
135 | { | 136 | { |
136 | struct xt_cluster_match_info *info = par->matchinfo; | 137 | struct xt_cluster_match_info *info = par->matchinfo; |
137 | 138 | ||
138 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { | 139 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { |
139 | printk(KERN_ERR "xt_cluster: you have exceeded the maximum " | 140 | pr_info("you have exceeded the maximum " |
140 | "number of cluster nodes (%u > %u)\n", | 141 | "number of cluster nodes (%u > %u)\n", |
141 | info->total_nodes, XT_CLUSTER_NODES_MAX); | 142 | info->total_nodes, XT_CLUSTER_NODES_MAX); |
142 | return false; | 143 | return -EINVAL; |
143 | } | 144 | } |
144 | if (info->node_mask >= (1ULL << info->total_nodes)) { | 145 | if (info->node_mask >= (1ULL << info->total_nodes)) { |
145 | printk(KERN_ERR "xt_cluster: this node mask cannot be " | 146 | pr_info("this node mask cannot be " |
146 | "higher than the total number of nodes\n"); | 147 | "higher than the total number of nodes\n"); |
147 | return false; | 148 | return -EDOM; |
148 | } | 149 | } |
149 | return true; | 150 | return 0; |
150 | } | 151 | } |
151 | 152 | ||
152 | static struct xt_match xt_cluster_match __read_mostly = { | 153 | static struct xt_match xt_cluster_match __read_mostly = { |
diff --git a/net/netfilter/xt_comment.c b/net/netfilter/xt_comment.c index e82179832acd..5c861d2f21ca 100644 --- a/net/netfilter/xt_comment.c +++ b/net/netfilter/xt_comment.c | |||
@@ -16,7 +16,7 @@ MODULE_ALIAS("ipt_comment"); | |||
16 | MODULE_ALIAS("ip6t_comment"); | 16 | MODULE_ALIAS("ip6t_comment"); |
17 | 17 | ||
18 | static bool | 18 | static bool |
19 | comment_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 19 | comment_mt(const struct sk_buff *skb, struct xt_action_param *par) |
20 | { | 20 | { |
21 | /* We always match */ | 21 | /* We always match */ |
22 | return true; | 22 | return true; |
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index 955e6598a7f0..73517835303d 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* Kernel module to match connection tracking byte counter. | 1 | /* Kernel module to match connection tracking byte counter. |
2 | * GPL (C) 2002 Martin Devera (devik@cdi.cz). | 2 | * GPL (C) 2002 Martin Devera (devik@cdi.cz). |
3 | */ | 3 | */ |
4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
4 | #include <linux/module.h> | 5 | #include <linux/module.h> |
5 | #include <linux/bitops.h> | 6 | #include <linux/bitops.h> |
6 | #include <linux/skbuff.h> | 7 | #include <linux/skbuff.h> |
@@ -17,7 +18,7 @@ MODULE_ALIAS("ipt_connbytes"); | |||
17 | MODULE_ALIAS("ip6t_connbytes"); | 18 | MODULE_ALIAS("ip6t_connbytes"); |
18 | 19 | ||
19 | static bool | 20 | static bool |
20 | connbytes_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 21 | connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par) |
21 | { | 22 | { |
22 | const struct xt_connbytes_info *sinfo = par->matchinfo; | 23 | const struct xt_connbytes_info *sinfo = par->matchinfo; |
23 | const struct nf_conn *ct; | 24 | const struct nf_conn *ct; |
@@ -92,27 +93,26 @@ connbytes_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
92 | return what >= sinfo->count.from; | 93 | return what >= sinfo->count.from; |
93 | } | 94 | } |
94 | 95 | ||
95 | static bool connbytes_mt_check(const struct xt_mtchk_param *par) | 96 | static int connbytes_mt_check(const struct xt_mtchk_param *par) |
96 | { | 97 | { |
97 | const struct xt_connbytes_info *sinfo = par->matchinfo; | 98 | const struct xt_connbytes_info *sinfo = par->matchinfo; |
99 | int ret; | ||
98 | 100 | ||
99 | if (sinfo->what != XT_CONNBYTES_PKTS && | 101 | if (sinfo->what != XT_CONNBYTES_PKTS && |
100 | sinfo->what != XT_CONNBYTES_BYTES && | 102 | sinfo->what != XT_CONNBYTES_BYTES && |
101 | sinfo->what != XT_CONNBYTES_AVGPKT) | 103 | sinfo->what != XT_CONNBYTES_AVGPKT) |
102 | return false; | 104 | return -EINVAL; |
103 | 105 | ||
104 | if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL && | 106 | if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL && |
105 | sinfo->direction != XT_CONNBYTES_DIR_REPLY && | 107 | sinfo->direction != XT_CONNBYTES_DIR_REPLY && |
106 | sinfo->direction != XT_CONNBYTES_DIR_BOTH) | 108 | sinfo->direction != XT_CONNBYTES_DIR_BOTH) |
107 | return false; | 109 | return -EINVAL; |
108 | |||
109 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | ||
110 | printk(KERN_WARNING "can't load conntrack support for " | ||
111 | "proto=%u\n", par->family); | ||
112 | return false; | ||
113 | } | ||
114 | 110 | ||
115 | return true; | 111 | ret = nf_ct_l3proto_try_module_get(par->family); |
112 | if (ret < 0) | ||
113 | pr_info("cannot load conntrack support for proto=%u\n", | ||
114 | par->family); | ||
115 | return ret; | ||
116 | } | 116 | } |
117 | 117 | ||
118 | static void connbytes_mt_destroy(const struct xt_mtdtor_param *par) | 118 | static void connbytes_mt_destroy(const struct xt_mtdtor_param *par) |
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 388ca4596098..5c5b6b921b84 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c | |||
@@ -5,13 +5,13 @@ | |||
5 | * Nov 2002: Martin Bene <martin.bene@icomedias.com>: | 5 | * Nov 2002: Martin Bene <martin.bene@icomedias.com>: |
6 | * only ignore TIME_WAIT or gone connections | 6 | * only ignore TIME_WAIT or gone connections |
7 | * (C) CC Computer Consultants GmbH, 2007 | 7 | * (C) CC Computer Consultants GmbH, 2007 |
8 | * Contact: <jengelh@computergmbh.de> | ||
9 | * | 8 | * |
10 | * based on ... | 9 | * based on ... |
11 | * | 10 | * |
12 | * Kernel module to match connection tracking information. | 11 | * Kernel module to match connection tracking information. |
13 | * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au). | 12 | * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au). |
14 | */ | 13 | */ |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | #include <linux/in.h> | 15 | #include <linux/in.h> |
16 | #include <linux/in6.h> | 16 | #include <linux/in6.h> |
17 | #include <linux/ip.h> | 17 | #include <linux/ip.h> |
@@ -173,7 +173,7 @@ static int count_them(struct net *net, | |||
173 | } | 173 | } |
174 | 174 | ||
175 | static bool | 175 | static bool |
176 | connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 176 | connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) |
177 | { | 177 | { |
178 | struct net *net = dev_net(par->in ? par->in : par->out); | 178 | struct net *net = dev_net(par->in ? par->in : par->out); |
179 | const struct xt_connlimit_info *info = par->matchinfo; | 179 | const struct xt_connlimit_info *info = par->matchinfo; |
@@ -206,44 +206,46 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
206 | 206 | ||
207 | if (connections < 0) { | 207 | if (connections < 0) { |
208 | /* kmalloc failed, drop it entirely */ | 208 | /* kmalloc failed, drop it entirely */ |
209 | *par->hotdrop = true; | 209 | par->hotdrop = true; |
210 | return false; | 210 | return false; |
211 | } | 211 | } |
212 | 212 | ||
213 | return (connections > info->limit) ^ info->inverse; | 213 | return (connections > info->limit) ^ info->inverse; |
214 | 214 | ||
215 | hotdrop: | 215 | hotdrop: |
216 | *par->hotdrop = true; | 216 | par->hotdrop = true; |
217 | return false; | 217 | return false; |
218 | } | 218 | } |
219 | 219 | ||
220 | static bool connlimit_mt_check(const struct xt_mtchk_param *par) | 220 | static int connlimit_mt_check(const struct xt_mtchk_param *par) |
221 | { | 221 | { |
222 | struct xt_connlimit_info *info = par->matchinfo; | 222 | struct xt_connlimit_info *info = par->matchinfo; |
223 | unsigned int i; | 223 | unsigned int i; |
224 | int ret; | ||
224 | 225 | ||
225 | if (unlikely(!connlimit_rnd_inited)) { | 226 | if (unlikely(!connlimit_rnd_inited)) { |
226 | get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd)); | 227 | get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd)); |
227 | connlimit_rnd_inited = true; | 228 | connlimit_rnd_inited = true; |
228 | } | 229 | } |
229 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | 230 | ret = nf_ct_l3proto_try_module_get(par->family); |
230 | printk(KERN_WARNING "cannot load conntrack support for " | 231 | if (ret < 0) { |
231 | "address family %u\n", par->family); | 232 | pr_info("cannot load conntrack support for " |
232 | return false; | 233 | "address family %u\n", par->family); |
234 | return ret; | ||
233 | } | 235 | } |
234 | 236 | ||
235 | /* init private data */ | 237 | /* init private data */ |
236 | info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL); | 238 | info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL); |
237 | if (info->data == NULL) { | 239 | if (info->data == NULL) { |
238 | nf_ct_l3proto_module_put(par->family); | 240 | nf_ct_l3proto_module_put(par->family); |
239 | return false; | 241 | return -ENOMEM; |
240 | } | 242 | } |
241 | 243 | ||
242 | spin_lock_init(&info->data->lock); | 244 | spin_lock_init(&info->data->lock); |
243 | for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) | 245 | for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) |
244 | INIT_LIST_HEAD(&info->data->iphash[i]); | 246 | INIT_LIST_HEAD(&info->data->iphash[i]); |
245 | 247 | ||
246 | return true; | 248 | return 0; |
247 | } | 249 | } |
248 | 250 | ||
249 | static void connlimit_mt_destroy(const struct xt_mtdtor_param *par) | 251 | static void connlimit_mt_destroy(const struct xt_mtdtor_param *par) |
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 122aa8b0147b..7278145e6a68 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c | |||
@@ -1,10 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * xt_connmark - Netfilter module to match connection mark values | 2 | * xt_connmark - Netfilter module to operate on connection marks |
3 | * | 3 | * |
4 | * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> | 4 | * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> |
5 | * by Henrik Nordstrom <hno@marasystems.com> | 5 | * by Henrik Nordstrom <hno@marasystems.com> |
6 | * Copyright © CC Computer Consultants GmbH, 2007 - 2008 | 6 | * Copyright © CC Computer Consultants GmbH, 2007 - 2008 |
7 | * Jan Engelhardt <jengelh@computergmbh.de> | 7 | * Jan Engelhardt <jengelh@medozas.de> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
@@ -24,17 +24,74 @@ | |||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/skbuff.h> | 25 | #include <linux/skbuff.h> |
26 | #include <net/netfilter/nf_conntrack.h> | 26 | #include <net/netfilter/nf_conntrack.h> |
27 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
27 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
28 | #include <linux/netfilter/xt_connmark.h> | 29 | #include <linux/netfilter/xt_connmark.h> |
29 | 30 | ||
30 | MODULE_AUTHOR("Henrik Nordstrom <hno@marasystems.com>"); | 31 | MODULE_AUTHOR("Henrik Nordstrom <hno@marasystems.com>"); |
31 | MODULE_DESCRIPTION("Xtables: connection mark match"); | 32 | MODULE_DESCRIPTION("Xtables: connection mark operations"); |
32 | MODULE_LICENSE("GPL"); | 33 | MODULE_LICENSE("GPL"); |
34 | MODULE_ALIAS("ipt_CONNMARK"); | ||
35 | MODULE_ALIAS("ip6t_CONNMARK"); | ||
33 | MODULE_ALIAS("ipt_connmark"); | 36 | MODULE_ALIAS("ipt_connmark"); |
34 | MODULE_ALIAS("ip6t_connmark"); | 37 | MODULE_ALIAS("ip6t_connmark"); |
35 | 38 | ||
39 | static unsigned int | ||
40 | connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) | ||
41 | { | ||
42 | const struct xt_connmark_tginfo1 *info = par->targinfo; | ||
43 | enum ip_conntrack_info ctinfo; | ||
44 | struct nf_conn *ct; | ||
45 | u_int32_t newmark; | ||
46 | |||
47 | ct = nf_ct_get(skb, &ctinfo); | ||
48 | if (ct == NULL) | ||
49 | return XT_CONTINUE; | ||
50 | |||
51 | switch (info->mode) { | ||
52 | case XT_CONNMARK_SET: | ||
53 | newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; | ||
54 | if (ct->mark != newmark) { | ||
55 | ct->mark = newmark; | ||
56 | nf_conntrack_event_cache(IPCT_MARK, ct); | ||
57 | } | ||
58 | break; | ||
59 | case XT_CONNMARK_SAVE: | ||
60 | newmark = (ct->mark & ~info->ctmask) ^ | ||
61 | (skb->mark & info->nfmask); | ||
62 | if (ct->mark != newmark) { | ||
63 | ct->mark = newmark; | ||
64 | nf_conntrack_event_cache(IPCT_MARK, ct); | ||
65 | } | ||
66 | break; | ||
67 | case XT_CONNMARK_RESTORE: | ||
68 | newmark = (skb->mark & ~info->nfmask) ^ | ||
69 | (ct->mark & info->ctmask); | ||
70 | skb->mark = newmark; | ||
71 | break; | ||
72 | } | ||
73 | |||
74 | return XT_CONTINUE; | ||
75 | } | ||
76 | |||
77 | static int connmark_tg_check(const struct xt_tgchk_param *par) | ||
78 | { | ||
79 | int ret; | ||
80 | |||
81 | ret = nf_ct_l3proto_try_module_get(par->family); | ||
82 | if (ret < 0) | ||
83 | pr_info("cannot load conntrack support for proto=%u\n", | ||
84 | par->family); | ||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | static void connmark_tg_destroy(const struct xt_tgdtor_param *par) | ||
89 | { | ||
90 | nf_ct_l3proto_module_put(par->family); | ||
91 | } | ||
92 | |||
36 | static bool | 93 | static bool |
37 | connmark_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 94 | connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) |
38 | { | 95 | { |
39 | const struct xt_connmark_mtinfo1 *info = par->matchinfo; | 96 | const struct xt_connmark_mtinfo1 *info = par->matchinfo; |
40 | enum ip_conntrack_info ctinfo; | 97 | enum ip_conntrack_info ctinfo; |
@@ -47,14 +104,15 @@ connmark_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
47 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; | 104 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; |
48 | } | 105 | } |
49 | 106 | ||
50 | static bool connmark_mt_check(const struct xt_mtchk_param *par) | 107 | static int connmark_mt_check(const struct xt_mtchk_param *par) |
51 | { | 108 | { |
52 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | 109 | int ret; |
53 | printk(KERN_WARNING "cannot load conntrack support for " | 110 | |
54 | "proto=%u\n", par->family); | 111 | ret = nf_ct_l3proto_try_module_get(par->family); |
55 | return false; | 112 | if (ret < 0) |
56 | } | 113 | pr_info("cannot load conntrack support for proto=%u\n", |
57 | return true; | 114 | par->family); |
115 | return ret; | ||
58 | } | 116 | } |
59 | 117 | ||
60 | static void connmark_mt_destroy(const struct xt_mtdtor_param *par) | 118 | static void connmark_mt_destroy(const struct xt_mtdtor_param *par) |
@@ -62,6 +120,17 @@ static void connmark_mt_destroy(const struct xt_mtdtor_param *par) | |||
62 | nf_ct_l3proto_module_put(par->family); | 120 | nf_ct_l3proto_module_put(par->family); |
63 | } | 121 | } |
64 | 122 | ||
123 | static struct xt_target connmark_tg_reg __read_mostly = { | ||
124 | .name = "CONNMARK", | ||
125 | .revision = 1, | ||
126 | .family = NFPROTO_UNSPEC, | ||
127 | .checkentry = connmark_tg_check, | ||
128 | .target = connmark_tg, | ||
129 | .targetsize = sizeof(struct xt_connmark_tginfo1), | ||
130 | .destroy = connmark_tg_destroy, | ||
131 | .me = THIS_MODULE, | ||
132 | }; | ||
133 | |||
65 | static struct xt_match connmark_mt_reg __read_mostly = { | 134 | static struct xt_match connmark_mt_reg __read_mostly = { |
66 | .name = "connmark", | 135 | .name = "connmark", |
67 | .revision = 1, | 136 | .revision = 1, |
@@ -75,12 +144,23 @@ static struct xt_match connmark_mt_reg __read_mostly = { | |||
75 | 144 | ||
76 | static int __init connmark_mt_init(void) | 145 | static int __init connmark_mt_init(void) |
77 | { | 146 | { |
78 | return xt_register_match(&connmark_mt_reg); | 147 | int ret; |
148 | |||
149 | ret = xt_register_target(&connmark_tg_reg); | ||
150 | if (ret < 0) | ||
151 | return ret; | ||
152 | ret = xt_register_match(&connmark_mt_reg); | ||
153 | if (ret < 0) { | ||
154 | xt_unregister_target(&connmark_tg_reg); | ||
155 | return ret; | ||
156 | } | ||
157 | return 0; | ||
79 | } | 158 | } |
80 | 159 | ||
81 | static void __exit connmark_mt_exit(void) | 160 | static void __exit connmark_mt_exit(void) |
82 | { | 161 | { |
83 | xt_unregister_match(&connmark_mt_reg); | 162 | xt_unregister_match(&connmark_mt_reg); |
163 | xt_unregister_target(&connmark_tg_reg); | ||
84 | } | 164 | } |
85 | 165 | ||
86 | module_init(connmark_mt_init); | 166 | module_init(connmark_mt_init); |
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index ae66305f0fe5..39681f10291c 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <net/ipv6.h> | 15 | #include <net/ipv6.h> |
@@ -113,7 +113,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info, | |||
113 | } | 113 | } |
114 | 114 | ||
115 | static bool | 115 | static bool |
116 | conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par, | 116 | conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, |
117 | u16 state_mask, u16 status_mask) | 117 | u16 state_mask, u16 status_mask) |
118 | { | 118 | { |
119 | const struct xt_conntrack_mtinfo2 *info = par->matchinfo; | 119 | const struct xt_conntrack_mtinfo2 *info = par->matchinfo; |
@@ -191,7 +191,7 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par, | |||
191 | } | 191 | } |
192 | 192 | ||
193 | static bool | 193 | static bool |
194 | conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par) | 194 | conntrack_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) |
195 | { | 195 | { |
196 | const struct xt_conntrack_mtinfo1 *info = par->matchinfo; | 196 | const struct xt_conntrack_mtinfo1 *info = par->matchinfo; |
197 | 197 | ||
@@ -199,21 +199,22 @@ conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par) | |||
199 | } | 199 | } |
200 | 200 | ||
201 | static bool | 201 | static bool |
202 | conntrack_mt_v2(const struct sk_buff *skb, const struct xt_match_param *par) | 202 | conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par) |
203 | { | 203 | { |
204 | const struct xt_conntrack_mtinfo2 *info = par->matchinfo; | 204 | const struct xt_conntrack_mtinfo2 *info = par->matchinfo; |
205 | 205 | ||
206 | return conntrack_mt(skb, par, info->state_mask, info->status_mask); | 206 | return conntrack_mt(skb, par, info->state_mask, info->status_mask); |
207 | } | 207 | } |
208 | 208 | ||
209 | static bool conntrack_mt_check(const struct xt_mtchk_param *par) | 209 | static int conntrack_mt_check(const struct xt_mtchk_param *par) |
210 | { | 210 | { |
211 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | 211 | int ret; |
212 | printk(KERN_WARNING "can't load conntrack support for " | 212 | |
213 | "proto=%u\n", par->family); | 213 | ret = nf_ct_l3proto_try_module_get(par->family); |
214 | return false; | 214 | if (ret < 0) |
215 | } | 215 | pr_info("cannot load conntrack support for proto=%u\n", |
216 | return true; | 216 | par->family); |
217 | return ret; | ||
217 | } | 218 | } |
218 | 219 | ||
219 | static void conntrack_mt_destroy(const struct xt_mtdtor_param *par) | 220 | static void conntrack_mt_destroy(const struct xt_mtdtor_param *par) |
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c index 395af5943ffd..b63d2a3d80ba 100644 --- a/net/netfilter/xt_dccp.c +++ b/net/netfilter/xt_dccp.c | |||
@@ -96,7 +96,7 @@ match_option(u_int8_t option, const struct sk_buff *skb, unsigned int protoff, | |||
96 | } | 96 | } |
97 | 97 | ||
98 | static bool | 98 | static bool |
99 | dccp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 99 | dccp_mt(const struct sk_buff *skb, struct xt_action_param *par) |
100 | { | 100 | { |
101 | const struct xt_dccp_info *info = par->matchinfo; | 101 | const struct xt_dccp_info *info = par->matchinfo; |
102 | const struct dccp_hdr *dh; | 102 | const struct dccp_hdr *dh; |
@@ -107,7 +107,7 @@ dccp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
107 | 107 | ||
108 | dh = skb_header_pointer(skb, par->thoff, sizeof(_dh), &_dh); | 108 | dh = skb_header_pointer(skb, par->thoff, sizeof(_dh), &_dh); |
109 | if (dh == NULL) { | 109 | if (dh == NULL) { |
110 | *par->hotdrop = true; | 110 | par->hotdrop = true; |
111 | return false; | 111 | return false; |
112 | } | 112 | } |
113 | 113 | ||
@@ -120,17 +120,21 @@ dccp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
120 | && DCCHECK(match_types(dh, info->typemask), | 120 | && DCCHECK(match_types(dh, info->typemask), |
121 | XT_DCCP_TYPE, info->flags, info->invflags) | 121 | XT_DCCP_TYPE, info->flags, info->invflags) |
122 | && DCCHECK(match_option(info->option, skb, par->thoff, dh, | 122 | && DCCHECK(match_option(info->option, skb, par->thoff, dh, |
123 | par->hotdrop), | 123 | &par->hotdrop), |
124 | XT_DCCP_OPTION, info->flags, info->invflags); | 124 | XT_DCCP_OPTION, info->flags, info->invflags); |
125 | } | 125 | } |
126 | 126 | ||
127 | static bool dccp_mt_check(const struct xt_mtchk_param *par) | 127 | static int dccp_mt_check(const struct xt_mtchk_param *par) |
128 | { | 128 | { |
129 | const struct xt_dccp_info *info = par->matchinfo; | 129 | const struct xt_dccp_info *info = par->matchinfo; |
130 | 130 | ||
131 | return !(info->flags & ~XT_DCCP_VALID_FLAGS) | 131 | if (info->flags & ~XT_DCCP_VALID_FLAGS) |
132 | && !(info->invflags & ~XT_DCCP_VALID_FLAGS) | 132 | return -EINVAL; |
133 | && !(info->invflags & ~info->flags); | 133 | if (info->invflags & ~XT_DCCP_VALID_FLAGS) |
134 | return -EINVAL; | ||
135 | if (info->invflags & ~info->flags) | ||
136 | return -EINVAL; | ||
137 | return 0; | ||
134 | } | 138 | } |
135 | 139 | ||
136 | static struct xt_match dccp_mt_reg[] __read_mostly = { | 140 | static struct xt_match dccp_mt_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c index 0280d3a8c161..64670fc5d0e1 100644 --- a/net/netfilter/xt_dscp.c +++ b/net/netfilter/xt_dscp.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
12 | #include <linux/ip.h> | 12 | #include <linux/ip.h> |
@@ -25,7 +25,7 @@ MODULE_ALIAS("ipt_tos"); | |||
25 | MODULE_ALIAS("ip6t_tos"); | 25 | MODULE_ALIAS("ip6t_tos"); |
26 | 26 | ||
27 | static bool | 27 | static bool |
28 | dscp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 28 | dscp_mt(const struct sk_buff *skb, struct xt_action_param *par) |
29 | { | 29 | { |
30 | const struct xt_dscp_info *info = par->matchinfo; | 30 | const struct xt_dscp_info *info = par->matchinfo; |
31 | u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT; | 31 | u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT; |
@@ -34,7 +34,7 @@ dscp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
34 | } | 34 | } |
35 | 35 | ||
36 | static bool | 36 | static bool |
37 | dscp_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 37 | dscp_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
38 | { | 38 | { |
39 | const struct xt_dscp_info *info = par->matchinfo; | 39 | const struct xt_dscp_info *info = par->matchinfo; |
40 | u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT; | 40 | u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT; |
@@ -42,23 +42,23 @@ dscp_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
42 | return (dscp == info->dscp) ^ !!info->invert; | 42 | return (dscp == info->dscp) ^ !!info->invert; |
43 | } | 43 | } |
44 | 44 | ||
45 | static bool dscp_mt_check(const struct xt_mtchk_param *par) | 45 | static int dscp_mt_check(const struct xt_mtchk_param *par) |
46 | { | 46 | { |
47 | const struct xt_dscp_info *info = par->matchinfo; | 47 | const struct xt_dscp_info *info = par->matchinfo; |
48 | 48 | ||
49 | if (info->dscp > XT_DSCP_MAX) { | 49 | if (info->dscp > XT_DSCP_MAX) { |
50 | printk(KERN_ERR "xt_dscp: dscp %x out of range\n", info->dscp); | 50 | pr_info("dscp %x out of range\n", info->dscp); |
51 | return false; | 51 | return -EDOM; |
52 | } | 52 | } |
53 | 53 | ||
54 | return true; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
57 | static bool tos_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 57 | static bool tos_mt(const struct sk_buff *skb, struct xt_action_param *par) |
58 | { | 58 | { |
59 | const struct xt_tos_match_info *info = par->matchinfo; | 59 | const struct xt_tos_match_info *info = par->matchinfo; |
60 | 60 | ||
61 | if (par->match->family == NFPROTO_IPV4) | 61 | if (par->family == NFPROTO_IPV4) |
62 | return ((ip_hdr(skb)->tos & info->tos_mask) == | 62 | return ((ip_hdr(skb)->tos & info->tos_mask) == |
63 | info->tos_value) ^ !!info->invert; | 63 | info->tos_value) ^ !!info->invert; |
64 | else | 64 | else |
diff --git a/net/netfilter/xt_esp.c b/net/netfilter/xt_esp.c index 609439967c2c..171ba82b5902 100644 --- a/net/netfilter/xt_esp.c +++ b/net/netfilter/xt_esp.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
12 | #include <linux/in.h> | 12 | #include <linux/in.h> |
@@ -24,25 +24,19 @@ MODULE_DESCRIPTION("Xtables: IPsec-ESP packet match"); | |||
24 | MODULE_ALIAS("ipt_esp"); | 24 | MODULE_ALIAS("ipt_esp"); |
25 | MODULE_ALIAS("ip6t_esp"); | 25 | MODULE_ALIAS("ip6t_esp"); |
26 | 26 | ||
27 | #if 0 | ||
28 | #define duprintf(format, args...) printk(format , ## args) | ||
29 | #else | ||
30 | #define duprintf(format, args...) | ||
31 | #endif | ||
32 | |||
33 | /* Returns 1 if the spi is matched by the range, 0 otherwise */ | 27 | /* Returns 1 if the spi is matched by the range, 0 otherwise */ |
34 | static inline bool | 28 | static inline bool |
35 | spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert) | 29 | spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert) |
36 | { | 30 | { |
37 | bool r; | 31 | bool r; |
38 | duprintf("esp spi_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ', | 32 | pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n", |
39 | min, spi, max); | 33 | invert ? '!' : ' ', min, spi, max); |
40 | r = (spi >= min && spi <= max) ^ invert; | 34 | r = (spi >= min && spi <= max) ^ invert; |
41 | duprintf(" result %s\n", r ? "PASS" : "FAILED"); | 35 | pr_debug(" result %s\n", r ? "PASS" : "FAILED"); |
42 | return r; | 36 | return r; |
43 | } | 37 | } |
44 | 38 | ||
45 | static bool esp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 39 | static bool esp_mt(const struct sk_buff *skb, struct xt_action_param *par) |
46 | { | 40 | { |
47 | const struct ip_esp_hdr *eh; | 41 | const struct ip_esp_hdr *eh; |
48 | struct ip_esp_hdr _esp; | 42 | struct ip_esp_hdr _esp; |
@@ -57,8 +51,8 @@ static bool esp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
57 | /* We've been asked to examine this packet, and we | 51 | /* We've been asked to examine this packet, and we |
58 | * can't. Hence, no choice but to drop. | 52 | * can't. Hence, no choice but to drop. |
59 | */ | 53 | */ |
60 | duprintf("Dropping evil ESP tinygram.\n"); | 54 | pr_debug("Dropping evil ESP tinygram.\n"); |
61 | *par->hotdrop = true; | 55 | par->hotdrop = true; |
62 | return false; | 56 | return false; |
63 | } | 57 | } |
64 | 58 | ||
@@ -66,16 +60,16 @@ static bool esp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
66 | !!(espinfo->invflags & XT_ESP_INV_SPI)); | 60 | !!(espinfo->invflags & XT_ESP_INV_SPI)); |
67 | } | 61 | } |
68 | 62 | ||
69 | static bool esp_mt_check(const struct xt_mtchk_param *par) | 63 | static int esp_mt_check(const struct xt_mtchk_param *par) |
70 | { | 64 | { |
71 | const struct xt_esp *espinfo = par->matchinfo; | 65 | const struct xt_esp *espinfo = par->matchinfo; |
72 | 66 | ||
73 | if (espinfo->invflags & ~XT_ESP_INV_MASK) { | 67 | if (espinfo->invflags & ~XT_ESP_INV_MASK) { |
74 | duprintf("xt_esp: unknown flags %X\n", espinfo->invflags); | 68 | pr_debug("unknown flags %X\n", espinfo->invflags); |
75 | return false; | 69 | return -EINVAL; |
76 | } | 70 | } |
77 | 71 | ||
78 | return true; | 72 | return 0; |
79 | } | 73 | } |
80 | 74 | ||
81 | static struct xt_match esp_mt_reg[] __read_mostly = { | 75 | static struct xt_match esp_mt_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 215a64835de8..b46a8390896d 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * | 7 | * |
8 | * Development of this code was funded by Astaro AG, http://www.astaro.com/ | 8 | * Development of this code was funded by Astaro AG, http://www.astaro.com/ |
9 | */ | 9 | */ |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
12 | #include <linux/random.h> | 13 | #include <linux/random.h> |
@@ -36,7 +37,7 @@ | |||
36 | 37 | ||
37 | MODULE_LICENSE("GPL"); | 38 | MODULE_LICENSE("GPL"); |
38 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 39 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
39 | MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>"); | 40 | MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); |
40 | MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match"); | 41 | MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match"); |
41 | MODULE_ALIAS("ipt_hashlimit"); | 42 | MODULE_ALIAS("ipt_hashlimit"); |
42 | MODULE_ALIAS("ip6t_hashlimit"); | 43 | MODULE_ALIAS("ip6t_hashlimit"); |
@@ -80,12 +81,14 @@ struct dsthash_ent { | |||
80 | struct dsthash_dst dst; | 81 | struct dsthash_dst dst; |
81 | 82 | ||
82 | /* modified structure members in the end */ | 83 | /* modified structure members in the end */ |
84 | spinlock_t lock; | ||
83 | unsigned long expires; /* precalculated expiry time */ | 85 | unsigned long expires; /* precalculated expiry time */ |
84 | struct { | 86 | struct { |
85 | unsigned long prev; /* last modification */ | 87 | unsigned long prev; /* last modification */ |
86 | u_int32_t credit; | 88 | u_int32_t credit; |
87 | u_int32_t credit_cap, cost; | 89 | u_int32_t credit_cap, cost; |
88 | } rateinfo; | 90 | } rateinfo; |
91 | struct rcu_head rcu; | ||
89 | }; | 92 | }; |
90 | 93 | ||
91 | struct xt_hashlimit_htable { | 94 | struct xt_hashlimit_htable { |
@@ -142,9 +145,11 @@ dsthash_find(const struct xt_hashlimit_htable *ht, | |||
142 | u_int32_t hash = hash_dst(ht, dst); | 145 | u_int32_t hash = hash_dst(ht, dst); |
143 | 146 | ||
144 | if (!hlist_empty(&ht->hash[hash])) { | 147 | if (!hlist_empty(&ht->hash[hash])) { |
145 | hlist_for_each_entry(ent, pos, &ht->hash[hash], node) | 148 | hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node) |
146 | if (dst_cmp(ent, dst)) | 149 | if (dst_cmp(ent, dst)) { |
150 | spin_lock(&ent->lock); | ||
147 | return ent; | 151 | return ent; |
152 | } | ||
148 | } | 153 | } |
149 | return NULL; | 154 | return NULL; |
150 | } | 155 | } |
@@ -156,9 +161,10 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht, | |||
156 | { | 161 | { |
157 | struct dsthash_ent *ent; | 162 | struct dsthash_ent *ent; |
158 | 163 | ||
164 | spin_lock(&ht->lock); | ||
159 | /* initialize hash with random val at the time we allocate | 165 | /* initialize hash with random val at the time we allocate |
160 | * the first hashtable entry */ | 166 | * the first hashtable entry */ |
161 | if (!ht->rnd_initialized) { | 167 | if (unlikely(!ht->rnd_initialized)) { |
162 | get_random_bytes(&ht->rnd, sizeof(ht->rnd)); | 168 | get_random_bytes(&ht->rnd, sizeof(ht->rnd)); |
163 | ht->rnd_initialized = true; | 169 | ht->rnd_initialized = true; |
164 | } | 170 | } |
@@ -166,106 +172,40 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht, | |||
166 | if (ht->cfg.max && ht->count >= ht->cfg.max) { | 172 | if (ht->cfg.max && ht->count >= ht->cfg.max) { |
167 | /* FIXME: do something. question is what.. */ | 173 | /* FIXME: do something. question is what.. */ |
168 | if (net_ratelimit()) | 174 | if (net_ratelimit()) |
169 | printk(KERN_WARNING | 175 | pr_err("max count of %u reached\n", ht->cfg.max); |
170 | "xt_hashlimit: max count of %u reached\n", | 176 | ent = NULL; |
171 | ht->cfg.max); | 177 | } else |
172 | return NULL; | 178 | ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); |
173 | } | ||
174 | |||
175 | ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); | ||
176 | if (!ent) { | 179 | if (!ent) { |
177 | if (net_ratelimit()) | 180 | if (net_ratelimit()) |
178 | printk(KERN_ERR | 181 | pr_err("cannot allocate dsthash_ent\n"); |
179 | "xt_hashlimit: can't allocate dsthash_ent\n"); | 182 | } else { |
180 | return NULL; | 183 | memcpy(&ent->dst, dst, sizeof(ent->dst)); |
181 | } | 184 | spin_lock_init(&ent->lock); |
182 | memcpy(&ent->dst, dst, sizeof(ent->dst)); | ||
183 | 185 | ||
184 | hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]); | 186 | spin_lock(&ent->lock); |
185 | ht->count++; | 187 | hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]); |
188 | ht->count++; | ||
189 | } | ||
190 | spin_unlock(&ht->lock); | ||
186 | return ent; | 191 | return ent; |
187 | } | 192 | } |
188 | 193 | ||
189 | static inline void | 194 | static void dsthash_free_rcu(struct rcu_head *head) |
190 | dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent) | ||
191 | { | 195 | { |
192 | hlist_del(&ent->node); | 196 | struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu); |
197 | |||
193 | kmem_cache_free(hashlimit_cachep, ent); | 198 | kmem_cache_free(hashlimit_cachep, ent); |
194 | ht->count--; | ||
195 | } | 199 | } |
196 | static void htable_gc(unsigned long htlong); | ||
197 | 200 | ||
198 | static int htable_create_v0(struct net *net, struct xt_hashlimit_info *minfo, u_int8_t family) | 201 | static inline void |
202 | dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent) | ||
199 | { | 203 | { |
200 | struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); | 204 | hlist_del_rcu(&ent->node); |
201 | struct xt_hashlimit_htable *hinfo; | 205 | call_rcu_bh(&ent->rcu, dsthash_free_rcu); |
202 | unsigned int size; | 206 | ht->count--; |
203 | unsigned int i; | ||
204 | |||
205 | if (minfo->cfg.size) | ||
206 | size = minfo->cfg.size; | ||
207 | else { | ||
208 | size = ((totalram_pages << PAGE_SHIFT) / 16384) / | ||
209 | sizeof(struct list_head); | ||
210 | if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) | ||
211 | size = 8192; | ||
212 | if (size < 16) | ||
213 | size = 16; | ||
214 | } | ||
215 | /* FIXME: don't use vmalloc() here or anywhere else -HW */ | ||
216 | hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) + | ||
217 | sizeof(struct list_head) * size); | ||
218 | if (!hinfo) { | ||
219 | printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n"); | ||
220 | return -1; | ||
221 | } | ||
222 | minfo->hinfo = hinfo; | ||
223 | |||
224 | /* copy match config into hashtable config */ | ||
225 | hinfo->cfg.mode = minfo->cfg.mode; | ||
226 | hinfo->cfg.avg = minfo->cfg.avg; | ||
227 | hinfo->cfg.burst = minfo->cfg.burst; | ||
228 | hinfo->cfg.max = minfo->cfg.max; | ||
229 | hinfo->cfg.gc_interval = minfo->cfg.gc_interval; | ||
230 | hinfo->cfg.expire = minfo->cfg.expire; | ||
231 | |||
232 | if (family == NFPROTO_IPV4) | ||
233 | hinfo->cfg.srcmask = hinfo->cfg.dstmask = 32; | ||
234 | else | ||
235 | hinfo->cfg.srcmask = hinfo->cfg.dstmask = 128; | ||
236 | |||
237 | hinfo->cfg.size = size; | ||
238 | if (!hinfo->cfg.max) | ||
239 | hinfo->cfg.max = 8 * hinfo->cfg.size; | ||
240 | else if (hinfo->cfg.max < hinfo->cfg.size) | ||
241 | hinfo->cfg.max = hinfo->cfg.size; | ||
242 | |||
243 | for (i = 0; i < hinfo->cfg.size; i++) | ||
244 | INIT_HLIST_HEAD(&hinfo->hash[i]); | ||
245 | |||
246 | hinfo->use = 1; | ||
247 | hinfo->count = 0; | ||
248 | hinfo->family = family; | ||
249 | hinfo->rnd_initialized = false; | ||
250 | spin_lock_init(&hinfo->lock); | ||
251 | hinfo->pde = proc_create_data(minfo->name, 0, | ||
252 | (family == NFPROTO_IPV4) ? | ||
253 | hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit, | ||
254 | &dl_file_ops, hinfo); | ||
255 | if (!hinfo->pde) { | ||
256 | vfree(hinfo); | ||
257 | return -1; | ||
258 | } | ||
259 | hinfo->net = net; | ||
260 | |||
261 | setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo); | ||
262 | hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); | ||
263 | add_timer(&hinfo->timer); | ||
264 | |||
265 | hlist_add_head(&hinfo->node, &hashlimit_net->htables); | ||
266 | |||
267 | return 0; | ||
268 | } | 207 | } |
208 | static void htable_gc(unsigned long htlong); | ||
269 | 209 | ||
270 | static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, | 210 | static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, |
271 | u_int8_t family) | 211 | u_int8_t family) |
@@ -288,10 +228,8 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, | |||
288 | /* FIXME: don't use vmalloc() here or anywhere else -HW */ | 228 | /* FIXME: don't use vmalloc() here or anywhere else -HW */ |
289 | hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) + | 229 | hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) + |
290 | sizeof(struct list_head) * size); | 230 | sizeof(struct list_head) * size); |
291 | if (hinfo == NULL) { | 231 | if (hinfo == NULL) |
292 | printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n"); | 232 | return -ENOMEM; |
293 | return -1; | ||
294 | } | ||
295 | minfo->hinfo = hinfo; | 233 | minfo->hinfo = hinfo; |
296 | 234 | ||
297 | /* copy match config into hashtable config */ | 235 | /* copy match config into hashtable config */ |
@@ -317,7 +255,7 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, | |||
317 | &dl_file_ops, hinfo); | 255 | &dl_file_ops, hinfo); |
318 | if (hinfo->pde == NULL) { | 256 | if (hinfo->pde == NULL) { |
319 | vfree(hinfo); | 257 | vfree(hinfo); |
320 | return -1; | 258 | return -ENOMEM; |
321 | } | 259 | } |
322 | hinfo->net = net; | 260 | hinfo->net = net; |
323 | 261 | ||
@@ -578,58 +516,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, | |||
578 | } | 516 | } |
579 | 517 | ||
580 | static bool | 518 | static bool |
581 | hashlimit_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | 519 | hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) |
582 | { | ||
583 | const struct xt_hashlimit_info *r = par->matchinfo; | ||
584 | struct xt_hashlimit_htable *hinfo = r->hinfo; | ||
585 | unsigned long now = jiffies; | ||
586 | struct dsthash_ent *dh; | ||
587 | struct dsthash_dst dst; | ||
588 | |||
589 | if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) | ||
590 | goto hotdrop; | ||
591 | |||
592 | spin_lock_bh(&hinfo->lock); | ||
593 | dh = dsthash_find(hinfo, &dst); | ||
594 | if (!dh) { | ||
595 | dh = dsthash_alloc_init(hinfo, &dst); | ||
596 | if (!dh) { | ||
597 | spin_unlock_bh(&hinfo->lock); | ||
598 | goto hotdrop; | ||
599 | } | ||
600 | |||
601 | dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); | ||
602 | dh->rateinfo.prev = jiffies; | ||
603 | dh->rateinfo.credit = user2credits(hinfo->cfg.avg * | ||
604 | hinfo->cfg.burst); | ||
605 | dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * | ||
606 | hinfo->cfg.burst); | ||
607 | dh->rateinfo.cost = user2credits(hinfo->cfg.avg); | ||
608 | } else { | ||
609 | /* update expiration timeout */ | ||
610 | dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); | ||
611 | rateinfo_recalc(dh, now); | ||
612 | } | ||
613 | |||
614 | if (dh->rateinfo.credit >= dh->rateinfo.cost) { | ||
615 | /* We're underlimit. */ | ||
616 | dh->rateinfo.credit -= dh->rateinfo.cost; | ||
617 | spin_unlock_bh(&hinfo->lock); | ||
618 | return true; | ||
619 | } | ||
620 | |||
621 | spin_unlock_bh(&hinfo->lock); | ||
622 | |||
623 | /* default case: we're overlimit, thus don't match */ | ||
624 | return false; | ||
625 | |||
626 | hotdrop: | ||
627 | *par->hotdrop = true; | ||
628 | return false; | ||
629 | } | ||
630 | |||
631 | static bool | ||
632 | hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) | ||
633 | { | 520 | { |
634 | const struct xt_hashlimit_mtinfo1 *info = par->matchinfo; | 521 | const struct xt_hashlimit_mtinfo1 *info = par->matchinfo; |
635 | struct xt_hashlimit_htable *hinfo = info->hinfo; | 522 | struct xt_hashlimit_htable *hinfo = info->hinfo; |
@@ -640,15 +527,14 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
640 | if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) | 527 | if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) |
641 | goto hotdrop; | 528 | goto hotdrop; |
642 | 529 | ||
643 | spin_lock_bh(&hinfo->lock); | 530 | rcu_read_lock_bh(); |
644 | dh = dsthash_find(hinfo, &dst); | 531 | dh = dsthash_find(hinfo, &dst); |
645 | if (dh == NULL) { | 532 | if (dh == NULL) { |
646 | dh = dsthash_alloc_init(hinfo, &dst); | 533 | dh = dsthash_alloc_init(hinfo, &dst); |
647 | if (dh == NULL) { | 534 | if (dh == NULL) { |
648 | spin_unlock_bh(&hinfo->lock); | 535 | rcu_read_unlock_bh(); |
649 | goto hotdrop; | 536 | goto hotdrop; |
650 | } | 537 | } |
651 | |||
652 | dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); | 538 | dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); |
653 | dh->rateinfo.prev = jiffies; | 539 | dh->rateinfo.prev = jiffies; |
654 | dh->rateinfo.credit = user2credits(hinfo->cfg.avg * | 540 | dh->rateinfo.credit = user2credits(hinfo->cfg.avg * |
@@ -665,96 +551,58 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
665 | if (dh->rateinfo.credit >= dh->rateinfo.cost) { | 551 | if (dh->rateinfo.credit >= dh->rateinfo.cost) { |
666 | /* below the limit */ | 552 | /* below the limit */ |
667 | dh->rateinfo.credit -= dh->rateinfo.cost; | 553 | dh->rateinfo.credit -= dh->rateinfo.cost; |
668 | spin_unlock_bh(&hinfo->lock); | 554 | spin_unlock(&dh->lock); |
555 | rcu_read_unlock_bh(); | ||
669 | return !(info->cfg.mode & XT_HASHLIMIT_INVERT); | 556 | return !(info->cfg.mode & XT_HASHLIMIT_INVERT); |
670 | } | 557 | } |
671 | 558 | ||
672 | spin_unlock_bh(&hinfo->lock); | 559 | spin_unlock(&dh->lock); |
560 | rcu_read_unlock_bh(); | ||
673 | /* default match is underlimit - so over the limit, we need to invert */ | 561 | /* default match is underlimit - so over the limit, we need to invert */ |
674 | return info->cfg.mode & XT_HASHLIMIT_INVERT; | 562 | return info->cfg.mode & XT_HASHLIMIT_INVERT; |
675 | 563 | ||
676 | hotdrop: | 564 | hotdrop: |
677 | *par->hotdrop = true; | 565 | par->hotdrop = true; |
678 | return false; | 566 | return false; |
679 | } | 567 | } |
680 | 568 | ||
681 | static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par) | 569 | static int hashlimit_mt_check(const struct xt_mtchk_param *par) |
682 | { | ||
683 | struct net *net = par->net; | ||
684 | struct xt_hashlimit_info *r = par->matchinfo; | ||
685 | |||
686 | /* Check for overflow. */ | ||
687 | if (r->cfg.burst == 0 || | ||
688 | user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) { | ||
689 | printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n", | ||
690 | r->cfg.avg, r->cfg.burst); | ||
691 | return false; | ||
692 | } | ||
693 | if (r->cfg.mode == 0 || | ||
694 | r->cfg.mode > (XT_HASHLIMIT_HASH_DPT | | ||
695 | XT_HASHLIMIT_HASH_DIP | | ||
696 | XT_HASHLIMIT_HASH_SIP | | ||
697 | XT_HASHLIMIT_HASH_SPT)) | ||
698 | return false; | ||
699 | if (!r->cfg.gc_interval) | ||
700 | return false; | ||
701 | if (!r->cfg.expire) | ||
702 | return false; | ||
703 | if (r->name[sizeof(r->name) - 1] != '\0') | ||
704 | return false; | ||
705 | |||
706 | mutex_lock(&hashlimit_mutex); | ||
707 | r->hinfo = htable_find_get(net, r->name, par->match->family); | ||
708 | if (!r->hinfo && htable_create_v0(net, r, par->match->family) != 0) { | ||
709 | mutex_unlock(&hashlimit_mutex); | ||
710 | return false; | ||
711 | } | ||
712 | mutex_unlock(&hashlimit_mutex); | ||
713 | |||
714 | return true; | ||
715 | } | ||
716 | |||
717 | static bool hashlimit_mt_check(const struct xt_mtchk_param *par) | ||
718 | { | 570 | { |
719 | struct net *net = par->net; | 571 | struct net *net = par->net; |
720 | struct xt_hashlimit_mtinfo1 *info = par->matchinfo; | 572 | struct xt_hashlimit_mtinfo1 *info = par->matchinfo; |
573 | int ret; | ||
721 | 574 | ||
722 | /* Check for overflow. */ | 575 | /* Check for overflow. */ |
723 | if (info->cfg.burst == 0 || | 576 | if (info->cfg.burst == 0 || |
724 | user2credits(info->cfg.avg * info->cfg.burst) < | 577 | user2credits(info->cfg.avg * info->cfg.burst) < |
725 | user2credits(info->cfg.avg)) { | 578 | user2credits(info->cfg.avg)) { |
726 | printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n", | 579 | pr_info("overflow, try lower: %u/%u\n", |
727 | info->cfg.avg, info->cfg.burst); | 580 | info->cfg.avg, info->cfg.burst); |
728 | return false; | 581 | return -ERANGE; |
729 | } | 582 | } |
730 | if (info->cfg.gc_interval == 0 || info->cfg.expire == 0) | 583 | if (info->cfg.gc_interval == 0 || info->cfg.expire == 0) |
731 | return false; | 584 | return -EINVAL; |
732 | if (info->name[sizeof(info->name)-1] != '\0') | 585 | if (info->name[sizeof(info->name)-1] != '\0') |
733 | return false; | 586 | return -EINVAL; |
734 | if (par->match->family == NFPROTO_IPV4) { | 587 | if (par->family == NFPROTO_IPV4) { |
735 | if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32) | 588 | if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32) |
736 | return false; | 589 | return -EINVAL; |
737 | } else { | 590 | } else { |
738 | if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128) | 591 | if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128) |
739 | return false; | 592 | return -EINVAL; |
740 | } | 593 | } |
741 | 594 | ||
742 | mutex_lock(&hashlimit_mutex); | 595 | mutex_lock(&hashlimit_mutex); |
743 | info->hinfo = htable_find_get(net, info->name, par->match->family); | 596 | info->hinfo = htable_find_get(net, info->name, par->family); |
744 | if (!info->hinfo && htable_create(net, info, par->match->family) != 0) { | 597 | if (info->hinfo == NULL) { |
745 | mutex_unlock(&hashlimit_mutex); | 598 | ret = htable_create(net, info, par->family); |
746 | return false; | 599 | if (ret < 0) { |
600 | mutex_unlock(&hashlimit_mutex); | ||
601 | return ret; | ||
602 | } | ||
747 | } | 603 | } |
748 | mutex_unlock(&hashlimit_mutex); | 604 | mutex_unlock(&hashlimit_mutex); |
749 | return true; | 605 | return 0; |
750 | } | ||
751 | |||
752 | static void | ||
753 | hashlimit_mt_destroy_v0(const struct xt_mtdtor_param *par) | ||
754 | { | ||
755 | const struct xt_hashlimit_info *r = par->matchinfo; | ||
756 | |||
757 | htable_put(r->hinfo); | ||
758 | } | 606 | } |
759 | 607 | ||
760 | static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par) | 608 | static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par) |
@@ -764,47 +612,8 @@ static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par) | |||
764 | htable_put(info->hinfo); | 612 | htable_put(info->hinfo); |
765 | } | 613 | } |
766 | 614 | ||
767 | #ifdef CONFIG_COMPAT | ||
768 | struct compat_xt_hashlimit_info { | ||
769 | char name[IFNAMSIZ]; | ||
770 | struct hashlimit_cfg cfg; | ||
771 | compat_uptr_t hinfo; | ||
772 | compat_uptr_t master; | ||
773 | }; | ||
774 | |||
775 | static void hashlimit_mt_compat_from_user(void *dst, const void *src) | ||
776 | { | ||
777 | int off = offsetof(struct compat_xt_hashlimit_info, hinfo); | ||
778 | |||
779 | memcpy(dst, src, off); | ||
780 | memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off); | ||
781 | } | ||
782 | |||
783 | static int hashlimit_mt_compat_to_user(void __user *dst, const void *src) | ||
784 | { | ||
785 | int off = offsetof(struct compat_xt_hashlimit_info, hinfo); | ||
786 | |||
787 | return copy_to_user(dst, src, off) ? -EFAULT : 0; | ||
788 | } | ||
789 | #endif | ||
790 | |||
791 | static struct xt_match hashlimit_mt_reg[] __read_mostly = { | 615 | static struct xt_match hashlimit_mt_reg[] __read_mostly = { |
792 | { | 616 | { |
793 | .name = "hashlimit", | ||
794 | .revision = 0, | ||
795 | .family = NFPROTO_IPV4, | ||
796 | .match = hashlimit_mt_v0, | ||
797 | .matchsize = sizeof(struct xt_hashlimit_info), | ||
798 | #ifdef CONFIG_COMPAT | ||
799 | .compatsize = sizeof(struct compat_xt_hashlimit_info), | ||
800 | .compat_from_user = hashlimit_mt_compat_from_user, | ||
801 | .compat_to_user = hashlimit_mt_compat_to_user, | ||
802 | #endif | ||
803 | .checkentry = hashlimit_mt_check_v0, | ||
804 | .destroy = hashlimit_mt_destroy_v0, | ||
805 | .me = THIS_MODULE | ||
806 | }, | ||
807 | { | ||
808 | .name = "hashlimit", | 617 | .name = "hashlimit", |
809 | .revision = 1, | 618 | .revision = 1, |
810 | .family = NFPROTO_IPV4, | 619 | .family = NFPROTO_IPV4, |
@@ -816,20 +625,6 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { | |||
816 | }, | 625 | }, |
817 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | 626 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) |
818 | { | 627 | { |
819 | .name = "hashlimit", | ||
820 | .family = NFPROTO_IPV6, | ||
821 | .match = hashlimit_mt_v0, | ||
822 | .matchsize = sizeof(struct xt_hashlimit_info), | ||
823 | #ifdef CONFIG_COMPAT | ||
824 | .compatsize = sizeof(struct compat_xt_hashlimit_info), | ||
825 | .compat_from_user = hashlimit_mt_compat_from_user, | ||
826 | .compat_to_user = hashlimit_mt_compat_to_user, | ||
827 | #endif | ||
828 | .checkentry = hashlimit_mt_check_v0, | ||
829 | .destroy = hashlimit_mt_destroy_v0, | ||
830 | .me = THIS_MODULE | ||
831 | }, | ||
832 | { | ||
833 | .name = "hashlimit", | 628 | .name = "hashlimit", |
834 | .revision = 1, | 629 | .revision = 1, |
835 | .family = NFPROTO_IPV6, | 630 | .family = NFPROTO_IPV6, |
@@ -888,12 +683,15 @@ static void dl_seq_stop(struct seq_file *s, void *v) | |||
888 | static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, | 683 | static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, |
889 | struct seq_file *s) | 684 | struct seq_file *s) |
890 | { | 685 | { |
686 | int res; | ||
687 | |||
688 | spin_lock(&ent->lock); | ||
891 | /* recalculate to show accurate numbers */ | 689 | /* recalculate to show accurate numbers */ |
892 | rateinfo_recalc(ent, jiffies); | 690 | rateinfo_recalc(ent, jiffies); |
893 | 691 | ||
894 | switch (family) { | 692 | switch (family) { |
895 | case NFPROTO_IPV4: | 693 | case NFPROTO_IPV4: |
896 | return seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n", | 694 | res = seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n", |
897 | (long)(ent->expires - jiffies)/HZ, | 695 | (long)(ent->expires - jiffies)/HZ, |
898 | &ent->dst.ip.src, | 696 | &ent->dst.ip.src, |
899 | ntohs(ent->dst.src_port), | 697 | ntohs(ent->dst.src_port), |
@@ -901,9 +699,10 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, | |||
901 | ntohs(ent->dst.dst_port), | 699 | ntohs(ent->dst.dst_port), |
902 | ent->rateinfo.credit, ent->rateinfo.credit_cap, | 700 | ent->rateinfo.credit, ent->rateinfo.credit_cap, |
903 | ent->rateinfo.cost); | 701 | ent->rateinfo.cost); |
702 | break; | ||
904 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | 703 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) |
905 | case NFPROTO_IPV6: | 704 | case NFPROTO_IPV6: |
906 | return seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n", | 705 | res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n", |
907 | (long)(ent->expires - jiffies)/HZ, | 706 | (long)(ent->expires - jiffies)/HZ, |
908 | &ent->dst.ip6.src, | 707 | &ent->dst.ip6.src, |
909 | ntohs(ent->dst.src_port), | 708 | ntohs(ent->dst.src_port), |
@@ -911,11 +710,14 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, | |||
911 | ntohs(ent->dst.dst_port), | 710 | ntohs(ent->dst.dst_port), |
912 | ent->rateinfo.credit, ent->rateinfo.credit_cap, | 711 | ent->rateinfo.credit, ent->rateinfo.credit_cap, |
913 | ent->rateinfo.cost); | 712 | ent->rateinfo.cost); |
713 | break; | ||
914 | #endif | 714 | #endif |
915 | default: | 715 | default: |
916 | BUG(); | 716 | BUG(); |
917 | return 0; | 717 | res = 0; |
918 | } | 718 | } |
719 | spin_unlock(&ent->lock); | ||
720 | return res; | ||
919 | } | 721 | } |
920 | 722 | ||
921 | static int dl_seq_show(struct seq_file *s, void *v) | 723 | static int dl_seq_show(struct seq_file *s, void *v) |
@@ -1024,7 +826,7 @@ static int __init hashlimit_mt_init(void) | |||
1024 | sizeof(struct dsthash_ent), 0, 0, | 826 | sizeof(struct dsthash_ent), 0, 0, |
1025 | NULL); | 827 | NULL); |
1026 | if (!hashlimit_cachep) { | 828 | if (!hashlimit_cachep) { |
1027 | printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n"); | 829 | pr_warning("unable to create slab cache\n"); |
1028 | goto err2; | 830 | goto err2; |
1029 | } | 831 | } |
1030 | return 0; | 832 | return 0; |
@@ -1039,9 +841,11 @@ err1: | |||
1039 | 841 | ||
1040 | static void __exit hashlimit_mt_exit(void) | 842 | static void __exit hashlimit_mt_exit(void) |
1041 | { | 843 | { |
1042 | kmem_cache_destroy(hashlimit_cachep); | ||
1043 | xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); | 844 | xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); |
1044 | unregister_pernet_subsys(&hashlimit_net_ops); | 845 | unregister_pernet_subsys(&hashlimit_net_ops); |
846 | |||
847 | rcu_barrier_bh(); | ||
848 | kmem_cache_destroy(hashlimit_cachep); | ||
1045 | } | 849 | } |
1046 | 850 | ||
1047 | module_init(hashlimit_mt_init); | 851 | module_init(hashlimit_mt_init); |
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c index 64fc7f277221..9f4ab00c8050 100644 --- a/net/netfilter/xt_helper.c +++ b/net/netfilter/xt_helper.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
12 | #include <linux/netfilter.h> | 12 | #include <linux/netfilter.h> |
@@ -24,7 +24,7 @@ MODULE_ALIAS("ip6t_helper"); | |||
24 | 24 | ||
25 | 25 | ||
26 | static bool | 26 | static bool |
27 | helper_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 27 | helper_mt(const struct sk_buff *skb, struct xt_action_param *par) |
28 | { | 28 | { |
29 | const struct xt_helper_info *info = par->matchinfo; | 29 | const struct xt_helper_info *info = par->matchinfo; |
30 | const struct nf_conn *ct; | 30 | const struct nf_conn *ct; |
@@ -54,17 +54,19 @@ helper_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
54 | return ret; | 54 | return ret; |
55 | } | 55 | } |
56 | 56 | ||
57 | static bool helper_mt_check(const struct xt_mtchk_param *par) | 57 | static int helper_mt_check(const struct xt_mtchk_param *par) |
58 | { | 58 | { |
59 | struct xt_helper_info *info = par->matchinfo; | 59 | struct xt_helper_info *info = par->matchinfo; |
60 | int ret; | ||
60 | 61 | ||
61 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | 62 | ret = nf_ct_l3proto_try_module_get(par->family); |
62 | printk(KERN_WARNING "can't load conntrack support for " | 63 | if (ret < 0) { |
63 | "proto=%u\n", par->family); | 64 | pr_info("cannot load conntrack support for proto=%u\n", |
64 | return false; | 65 | par->family); |
66 | return ret; | ||
65 | } | 67 | } |
66 | info->name[29] = '\0'; | 68 | info->name[29] = '\0'; |
67 | return true; | 69 | return 0; |
68 | } | 70 | } |
69 | 71 | ||
70 | static void helper_mt_destroy(const struct xt_mtdtor_param *par) | 72 | static void helper_mt_destroy(const struct xt_mtdtor_param *par) |
diff --git a/net/netfilter/xt_hl.c b/net/netfilter/xt_hl.c index 7726154c87b2..7d12221ead89 100644 --- a/net/netfilter/xt_hl.c +++ b/net/netfilter/xt_hl.c | |||
@@ -25,7 +25,7 @@ MODULE_LICENSE("GPL"); | |||
25 | MODULE_ALIAS("ipt_ttl"); | 25 | MODULE_ALIAS("ipt_ttl"); |
26 | MODULE_ALIAS("ip6t_hl"); | 26 | MODULE_ALIAS("ip6t_hl"); |
27 | 27 | ||
28 | static bool ttl_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 28 | static bool ttl_mt(const struct sk_buff *skb, struct xt_action_param *par) |
29 | { | 29 | { |
30 | const struct ipt_ttl_info *info = par->matchinfo; | 30 | const struct ipt_ttl_info *info = par->matchinfo; |
31 | const u8 ttl = ip_hdr(skb)->ttl; | 31 | const u8 ttl = ip_hdr(skb)->ttl; |
@@ -39,16 +39,12 @@ static bool ttl_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
39 | return ttl < info->ttl; | 39 | return ttl < info->ttl; |
40 | case IPT_TTL_GT: | 40 | case IPT_TTL_GT: |
41 | return ttl > info->ttl; | 41 | return ttl > info->ttl; |
42 | default: | ||
43 | printk(KERN_WARNING "ipt_ttl: unknown mode %d\n", | ||
44 | info->mode); | ||
45 | return false; | ||
46 | } | 42 | } |
47 | 43 | ||
48 | return false; | 44 | return false; |
49 | } | 45 | } |
50 | 46 | ||
51 | static bool hl_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 47 | static bool hl_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
52 | { | 48 | { |
53 | const struct ip6t_hl_info *info = par->matchinfo; | 49 | const struct ip6t_hl_info *info = par->matchinfo; |
54 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); | 50 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); |
@@ -56,20 +52,12 @@ static bool hl_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
56 | switch (info->mode) { | 52 | switch (info->mode) { |
57 | case IP6T_HL_EQ: | 53 | case IP6T_HL_EQ: |
58 | return ip6h->hop_limit == info->hop_limit; | 54 | return ip6h->hop_limit == info->hop_limit; |
59 | break; | ||
60 | case IP6T_HL_NE: | 55 | case IP6T_HL_NE: |
61 | return ip6h->hop_limit != info->hop_limit; | 56 | return ip6h->hop_limit != info->hop_limit; |
62 | break; | ||
63 | case IP6T_HL_LT: | 57 | case IP6T_HL_LT: |
64 | return ip6h->hop_limit < info->hop_limit; | 58 | return ip6h->hop_limit < info->hop_limit; |
65 | break; | ||
66 | case IP6T_HL_GT: | 59 | case IP6T_HL_GT: |
67 | return ip6h->hop_limit > info->hop_limit; | 60 | return ip6h->hop_limit > info->hop_limit; |
68 | break; | ||
69 | default: | ||
70 | printk(KERN_WARNING "ip6t_hl: unknown mode %d\n", | ||
71 | info->mode); | ||
72 | return false; | ||
73 | } | 61 | } |
74 | 62 | ||
75 | return false; | 63 | return false; |
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c index ffc96387d556..88f7c3511c72 100644 --- a/net/netfilter/xt_iprange.c +++ b/net/netfilter/xt_iprange.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
13 | #include <linux/ip.h> | 14 | #include <linux/ip.h> |
@@ -16,7 +17,7 @@ | |||
16 | #include <linux/netfilter/xt_iprange.h> | 17 | #include <linux/netfilter/xt_iprange.h> |
17 | 18 | ||
18 | static bool | 19 | static bool |
19 | iprange_mt4(const struct sk_buff *skb, const struct xt_match_param *par) | 20 | iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par) |
20 | { | 21 | { |
21 | const struct xt_iprange_mtinfo *info = par->matchinfo; | 22 | const struct xt_iprange_mtinfo *info = par->matchinfo; |
22 | const struct iphdr *iph = ip_hdr(skb); | 23 | const struct iphdr *iph = ip_hdr(skb); |
@@ -67,7 +68,7 @@ iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b) | |||
67 | } | 68 | } |
68 | 69 | ||
69 | static bool | 70 | static bool |
70 | iprange_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 71 | iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
71 | { | 72 | { |
72 | const struct xt_iprange_mtinfo *info = par->matchinfo; | 73 | const struct xt_iprange_mtinfo *info = par->matchinfo; |
73 | const struct ipv6hdr *iph = ipv6_hdr(skb); | 74 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c index c4871ca6c86d..176e5570a999 100644 --- a/net/netfilter/xt_length.c +++ b/net/netfilter/xt_length.c | |||
@@ -21,7 +21,7 @@ MODULE_ALIAS("ipt_length"); | |||
21 | MODULE_ALIAS("ip6t_length"); | 21 | MODULE_ALIAS("ip6t_length"); |
22 | 22 | ||
23 | static bool | 23 | static bool |
24 | length_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 24 | length_mt(const struct sk_buff *skb, struct xt_action_param *par) |
25 | { | 25 | { |
26 | const struct xt_length_info *info = par->matchinfo; | 26 | const struct xt_length_info *info = par->matchinfo; |
27 | u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len); | 27 | u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len); |
@@ -30,7 +30,7 @@ length_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
30 | } | 30 | } |
31 | 31 | ||
32 | static bool | 32 | static bool |
33 | length_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 33 | length_mt6(const struct sk_buff *skb, struct xt_action_param *par) |
34 | { | 34 | { |
35 | const struct xt_length_info *info = par->matchinfo; | 35 | const struct xt_length_info *info = par->matchinfo; |
36 | const u_int16_t pktlen = ntohs(ipv6_hdr(skb)->payload_len) + | 36 | const u_int16_t pktlen = ntohs(ipv6_hdr(skb)->payload_len) + |
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index e5d7e1ffb1a4..32b7a579a032 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
8 | 9 | ||
9 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
@@ -64,7 +65,7 @@ static DEFINE_SPINLOCK(limit_lock); | |||
64 | #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) | 65 | #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) |
65 | 66 | ||
66 | static bool | 67 | static bool |
67 | limit_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 68 | limit_mt(const struct sk_buff *skb, struct xt_action_param *par) |
68 | { | 69 | { |
69 | const struct xt_rateinfo *r = par->matchinfo; | 70 | const struct xt_rateinfo *r = par->matchinfo; |
70 | struct xt_limit_priv *priv = r->master; | 71 | struct xt_limit_priv *priv = r->master; |
@@ -98,7 +99,7 @@ user2credits(u_int32_t user) | |||
98 | return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE; | 99 | return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE; |
99 | } | 100 | } |
100 | 101 | ||
101 | static bool limit_mt_check(const struct xt_mtchk_param *par) | 102 | static int limit_mt_check(const struct xt_mtchk_param *par) |
102 | { | 103 | { |
103 | struct xt_rateinfo *r = par->matchinfo; | 104 | struct xt_rateinfo *r = par->matchinfo; |
104 | struct xt_limit_priv *priv; | 105 | struct xt_limit_priv *priv; |
@@ -106,14 +107,14 @@ static bool limit_mt_check(const struct xt_mtchk_param *par) | |||
106 | /* Check for overflow. */ | 107 | /* Check for overflow. */ |
107 | if (r->burst == 0 | 108 | if (r->burst == 0 |
108 | || user2credits(r->avg * r->burst) < user2credits(r->avg)) { | 109 | || user2credits(r->avg * r->burst) < user2credits(r->avg)) { |
109 | printk("Overflow in xt_limit, try lower: %u/%u\n", | 110 | pr_info("Overflow, try lower: %u/%u\n", |
110 | r->avg, r->burst); | 111 | r->avg, r->burst); |
111 | return false; | 112 | return -ERANGE; |
112 | } | 113 | } |
113 | 114 | ||
114 | priv = kmalloc(sizeof(*priv), GFP_KERNEL); | 115 | priv = kmalloc(sizeof(*priv), GFP_KERNEL); |
115 | if (priv == NULL) | 116 | if (priv == NULL) |
116 | return false; | 117 | return -ENOMEM; |
117 | 118 | ||
118 | /* For SMP, we only want to use one set of state. */ | 119 | /* For SMP, we only want to use one set of state. */ |
119 | r->master = priv; | 120 | r->master = priv; |
@@ -125,7 +126,7 @@ static bool limit_mt_check(const struct xt_mtchk_param *par) | |||
125 | r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */ | 126 | r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */ |
126 | r->cost = user2credits(r->avg); | 127 | r->cost = user2credits(r->avg); |
127 | } | 128 | } |
128 | return true; | 129 | return 0; |
129 | } | 130 | } |
130 | 131 | ||
131 | static void limit_mt_destroy(const struct xt_mtdtor_param *par) | 132 | static void limit_mt_destroy(const struct xt_mtdtor_param *par) |
diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c index c2007116ce5b..8160f6b1435d 100644 --- a/net/netfilter/xt_mac.c +++ b/net/netfilter/xt_mac.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
13 | #include <linux/if_arp.h> | ||
13 | #include <linux/if_ether.h> | 14 | #include <linux/if_ether.h> |
14 | #include <linux/etherdevice.h> | 15 | #include <linux/etherdevice.h> |
15 | 16 | ||
@@ -24,16 +25,20 @@ MODULE_DESCRIPTION("Xtables: MAC address match"); | |||
24 | MODULE_ALIAS("ipt_mac"); | 25 | MODULE_ALIAS("ipt_mac"); |
25 | MODULE_ALIAS("ip6t_mac"); | 26 | MODULE_ALIAS("ip6t_mac"); |
26 | 27 | ||
27 | static bool mac_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 28 | static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par) |
28 | { | 29 | { |
29 | const struct xt_mac_info *info = par->matchinfo; | 30 | const struct xt_mac_info *info = par->matchinfo; |
30 | 31 | bool ret; | |
31 | /* Is mac pointer valid? */ | 32 | |
32 | return skb_mac_header(skb) >= skb->head && | 33 | if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER) |
33 | skb_mac_header(skb) + ETH_HLEN <= skb->data | 34 | return false; |
34 | /* If so, compare... */ | 35 | if (skb_mac_header(skb) < skb->head) |
35 | && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr)) | 36 | return false; |
36 | ^ info->invert); | 37 | if (skb_mac_header(skb) + ETH_HLEN > skb->data) |
38 | return false; | ||
39 | ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0; | ||
40 | ret ^= info->invert; | ||
41 | return ret; | ||
37 | } | 42 | } |
38 | 43 | ||
39 | static struct xt_match mac_mt_reg __read_mostly = { | 44 | static struct xt_match mac_mt_reg __read_mostly = { |
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c index 1db07d8125f8..23345238711b 100644 --- a/net/netfilter/xt_mark.c +++ b/net/netfilter/xt_mark.c | |||
@@ -18,18 +18,38 @@ | |||
18 | 18 | ||
19 | MODULE_LICENSE("GPL"); | 19 | MODULE_LICENSE("GPL"); |
20 | MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); | 20 | MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); |
21 | MODULE_DESCRIPTION("Xtables: packet mark match"); | 21 | MODULE_DESCRIPTION("Xtables: packet mark operations"); |
22 | MODULE_ALIAS("ipt_mark"); | 22 | MODULE_ALIAS("ipt_mark"); |
23 | MODULE_ALIAS("ip6t_mark"); | 23 | MODULE_ALIAS("ip6t_mark"); |
24 | MODULE_ALIAS("ipt_MARK"); | ||
25 | MODULE_ALIAS("ip6t_MARK"); | ||
26 | |||
27 | static unsigned int | ||
28 | mark_tg(struct sk_buff *skb, const struct xt_action_param *par) | ||
29 | { | ||
30 | const struct xt_mark_tginfo2 *info = par->targinfo; | ||
31 | |||
32 | skb->mark = (skb->mark & ~info->mask) ^ info->mark; | ||
33 | return XT_CONTINUE; | ||
34 | } | ||
24 | 35 | ||
25 | static bool | 36 | static bool |
26 | mark_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 37 | mark_mt(const struct sk_buff *skb, struct xt_action_param *par) |
27 | { | 38 | { |
28 | const struct xt_mark_mtinfo1 *info = par->matchinfo; | 39 | const struct xt_mark_mtinfo1 *info = par->matchinfo; |
29 | 40 | ||
30 | return ((skb->mark & info->mask) == info->mark) ^ info->invert; | 41 | return ((skb->mark & info->mask) == info->mark) ^ info->invert; |
31 | } | 42 | } |
32 | 43 | ||
44 | static struct xt_target mark_tg_reg __read_mostly = { | ||
45 | .name = "MARK", | ||
46 | .revision = 2, | ||
47 | .family = NFPROTO_UNSPEC, | ||
48 | .target = mark_tg, | ||
49 | .targetsize = sizeof(struct xt_mark_tginfo2), | ||
50 | .me = THIS_MODULE, | ||
51 | }; | ||
52 | |||
33 | static struct xt_match mark_mt_reg __read_mostly = { | 53 | static struct xt_match mark_mt_reg __read_mostly = { |
34 | .name = "mark", | 54 | .name = "mark", |
35 | .revision = 1, | 55 | .revision = 1, |
@@ -41,12 +61,23 @@ static struct xt_match mark_mt_reg __read_mostly = { | |||
41 | 61 | ||
42 | static int __init mark_mt_init(void) | 62 | static int __init mark_mt_init(void) |
43 | { | 63 | { |
44 | return xt_register_match(&mark_mt_reg); | 64 | int ret; |
65 | |||
66 | ret = xt_register_target(&mark_tg_reg); | ||
67 | if (ret < 0) | ||
68 | return ret; | ||
69 | ret = xt_register_match(&mark_mt_reg); | ||
70 | if (ret < 0) { | ||
71 | xt_unregister_target(&mark_tg_reg); | ||
72 | return ret; | ||
73 | } | ||
74 | return 0; | ||
45 | } | 75 | } |
46 | 76 | ||
47 | static void __exit mark_mt_exit(void) | 77 | static void __exit mark_mt_exit(void) |
48 | { | 78 | { |
49 | xt_unregister_match(&mark_mt_reg); | 79 | xt_unregister_match(&mark_mt_reg); |
80 | xt_unregister_target(&mark_tg_reg); | ||
50 | } | 81 | } |
51 | 82 | ||
52 | module_init(mark_mt_init); | 83 | module_init(mark_mt_init); |
diff --git a/net/netfilter/xt_multiport.c b/net/netfilter/xt_multiport.c index d06bb2dd3900..ac1d3c3d09e7 100644 --- a/net/netfilter/xt_multiport.c +++ b/net/netfilter/xt_multiport.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/udp.h> | 14 | #include <linux/udp.h> |
@@ -26,29 +26,6 @@ MODULE_DESCRIPTION("Xtables: multiple port matching for TCP, UDP, UDP-Lite, SCTP | |||
26 | MODULE_ALIAS("ipt_multiport"); | 26 | MODULE_ALIAS("ipt_multiport"); |
27 | MODULE_ALIAS("ip6t_multiport"); | 27 | MODULE_ALIAS("ip6t_multiport"); |
28 | 28 | ||
29 | #if 0 | ||
30 | #define duprintf(format, args...) printk(format , ## args) | ||
31 | #else | ||
32 | #define duprintf(format, args...) | ||
33 | #endif | ||
34 | |||
35 | /* Returns 1 if the port is matched by the test, 0 otherwise. */ | ||
36 | static inline bool | ||
37 | ports_match_v0(const u_int16_t *portlist, enum xt_multiport_flags flags, | ||
38 | u_int8_t count, u_int16_t src, u_int16_t dst) | ||
39 | { | ||
40 | unsigned int i; | ||
41 | for (i = 0; i < count; i++) { | ||
42 | if (flags != XT_MULTIPORT_DESTINATION && portlist[i] == src) | ||
43 | return true; | ||
44 | |||
45 | if (flags != XT_MULTIPORT_SOURCE && portlist[i] == dst) | ||
46 | return true; | ||
47 | } | ||
48 | |||
49 | return false; | ||
50 | } | ||
51 | |||
52 | /* Returns 1 if the port is matched by the test, 0 otherwise. */ | 29 | /* Returns 1 if the port is matched by the test, 0 otherwise. */ |
53 | static inline bool | 30 | static inline bool |
54 | ports_match_v1(const struct xt_multiport_v1 *minfo, | 31 | ports_match_v1(const struct xt_multiport_v1 *minfo, |
@@ -63,7 +40,7 @@ ports_match_v1(const struct xt_multiport_v1 *minfo, | |||
63 | if (minfo->pflags[i]) { | 40 | if (minfo->pflags[i]) { |
64 | /* range port matching */ | 41 | /* range port matching */ |
65 | e = minfo->ports[++i]; | 42 | e = minfo->ports[++i]; |
66 | duprintf("src or dst matches with %d-%d?\n", s, e); | 43 | pr_debug("src or dst matches with %d-%d?\n", s, e); |
67 | 44 | ||
68 | if (minfo->flags == XT_MULTIPORT_SOURCE | 45 | if (minfo->flags == XT_MULTIPORT_SOURCE |
69 | && src >= s && src <= e) | 46 | && src >= s && src <= e) |
@@ -77,7 +54,7 @@ ports_match_v1(const struct xt_multiport_v1 *minfo, | |||
77 | return true ^ minfo->invert; | 54 | return true ^ minfo->invert; |
78 | } else { | 55 | } else { |
79 | /* exact port matching */ | 56 | /* exact port matching */ |
80 | duprintf("src or dst matches with %d?\n", s); | 57 | pr_debug("src or dst matches with %d?\n", s); |
81 | 58 | ||
82 | if (minfo->flags == XT_MULTIPORT_SOURCE | 59 | if (minfo->flags == XT_MULTIPORT_SOURCE |
83 | && src == s) | 60 | && src == s) |
@@ -95,31 +72,7 @@ ports_match_v1(const struct xt_multiport_v1 *minfo, | |||
95 | } | 72 | } |
96 | 73 | ||
97 | static bool | 74 | static bool |
98 | multiport_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | 75 | multiport_mt(const struct sk_buff *skb, struct xt_action_param *par) |
99 | { | ||
100 | const __be16 *pptr; | ||
101 | __be16 _ports[2]; | ||
102 | const struct xt_multiport *multiinfo = par->matchinfo; | ||
103 | |||
104 | if (par->fragoff != 0) | ||
105 | return false; | ||
106 | |||
107 | pptr = skb_header_pointer(skb, par->thoff, sizeof(_ports), _ports); | ||
108 | if (pptr == NULL) { | ||
109 | /* We've been asked to examine this packet, and we | ||
110 | * can't. Hence, no choice but to drop. | ||
111 | */ | ||
112 | duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n"); | ||
113 | *par->hotdrop = true; | ||
114 | return false; | ||
115 | } | ||
116 | |||
117 | return ports_match_v0(multiinfo->ports, multiinfo->flags, | ||
118 | multiinfo->count, ntohs(pptr[0]), ntohs(pptr[1])); | ||
119 | } | ||
120 | |||
121 | static bool | ||
122 | multiport_mt(const struct sk_buff *skb, const struct xt_match_param *par) | ||
123 | { | 76 | { |
124 | const __be16 *pptr; | 77 | const __be16 *pptr; |
125 | __be16 _ports[2]; | 78 | __be16 _ports[2]; |
@@ -133,8 +86,8 @@ multiport_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
133 | /* We've been asked to examine this packet, and we | 86 | /* We've been asked to examine this packet, and we |
134 | * can't. Hence, no choice but to drop. | 87 | * can't. Hence, no choice but to drop. |
135 | */ | 88 | */ |
136 | duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n"); | 89 | pr_debug("Dropping evil offset=0 tinygram.\n"); |
137 | *par->hotdrop = true; | 90 | par->hotdrop = true; |
138 | return false; | 91 | return false; |
139 | } | 92 | } |
140 | 93 | ||
@@ -158,55 +111,28 @@ check(u_int16_t proto, | |||
158 | && count <= XT_MULTI_PORTS; | 111 | && count <= XT_MULTI_PORTS; |
159 | } | 112 | } |
160 | 113 | ||
161 | static bool multiport_mt_check_v0(const struct xt_mtchk_param *par) | 114 | static int multiport_mt_check(const struct xt_mtchk_param *par) |
162 | { | ||
163 | const struct ipt_ip *ip = par->entryinfo; | ||
164 | const struct xt_multiport *multiinfo = par->matchinfo; | ||
165 | |||
166 | return check(ip->proto, ip->invflags, multiinfo->flags, | ||
167 | multiinfo->count); | ||
168 | } | ||
169 | |||
170 | static bool multiport_mt_check(const struct xt_mtchk_param *par) | ||
171 | { | 115 | { |
172 | const struct ipt_ip *ip = par->entryinfo; | 116 | const struct ipt_ip *ip = par->entryinfo; |
173 | const struct xt_multiport_v1 *multiinfo = par->matchinfo; | 117 | const struct xt_multiport_v1 *multiinfo = par->matchinfo; |
174 | 118 | ||
175 | return check(ip->proto, ip->invflags, multiinfo->flags, | 119 | return check(ip->proto, ip->invflags, multiinfo->flags, |
176 | multiinfo->count); | 120 | multiinfo->count) ? 0 : -EINVAL; |
177 | } | 121 | } |
178 | 122 | ||
179 | static bool multiport_mt6_check_v0(const struct xt_mtchk_param *par) | 123 | static int multiport_mt6_check(const struct xt_mtchk_param *par) |
180 | { | ||
181 | const struct ip6t_ip6 *ip = par->entryinfo; | ||
182 | const struct xt_multiport *multiinfo = par->matchinfo; | ||
183 | |||
184 | return check(ip->proto, ip->invflags, multiinfo->flags, | ||
185 | multiinfo->count); | ||
186 | } | ||
187 | |||
188 | static bool multiport_mt6_check(const struct xt_mtchk_param *par) | ||
189 | { | 124 | { |
190 | const struct ip6t_ip6 *ip = par->entryinfo; | 125 | const struct ip6t_ip6 *ip = par->entryinfo; |
191 | const struct xt_multiport_v1 *multiinfo = par->matchinfo; | 126 | const struct xt_multiport_v1 *multiinfo = par->matchinfo; |
192 | 127 | ||
193 | return check(ip->proto, ip->invflags, multiinfo->flags, | 128 | return check(ip->proto, ip->invflags, multiinfo->flags, |
194 | multiinfo->count); | 129 | multiinfo->count) ? 0 : -EINVAL; |
195 | } | 130 | } |
196 | 131 | ||
197 | static struct xt_match multiport_mt_reg[] __read_mostly = { | 132 | static struct xt_match multiport_mt_reg[] __read_mostly = { |
198 | { | 133 | { |
199 | .name = "multiport", | 134 | .name = "multiport", |
200 | .family = NFPROTO_IPV4, | 135 | .family = NFPROTO_IPV4, |
201 | .revision = 0, | ||
202 | .checkentry = multiport_mt_check_v0, | ||
203 | .match = multiport_mt_v0, | ||
204 | .matchsize = sizeof(struct xt_multiport), | ||
205 | .me = THIS_MODULE, | ||
206 | }, | ||
207 | { | ||
208 | .name = "multiport", | ||
209 | .family = NFPROTO_IPV4, | ||
210 | .revision = 1, | 136 | .revision = 1, |
211 | .checkentry = multiport_mt_check, | 137 | .checkentry = multiport_mt_check, |
212 | .match = multiport_mt, | 138 | .match = multiport_mt, |
@@ -216,15 +142,6 @@ static struct xt_match multiport_mt_reg[] __read_mostly = { | |||
216 | { | 142 | { |
217 | .name = "multiport", | 143 | .name = "multiport", |
218 | .family = NFPROTO_IPV6, | 144 | .family = NFPROTO_IPV6, |
219 | .revision = 0, | ||
220 | .checkentry = multiport_mt6_check_v0, | ||
221 | .match = multiport_mt_v0, | ||
222 | .matchsize = sizeof(struct xt_multiport), | ||
223 | .me = THIS_MODULE, | ||
224 | }, | ||
225 | { | ||
226 | .name = "multiport", | ||
227 | .family = NFPROTO_IPV6, | ||
228 | .revision = 1, | 145 | .revision = 1, |
229 | .checkentry = multiport_mt6_check, | 146 | .checkentry = multiport_mt6_check, |
230 | .match = multiport_mt, | 147 | .match = multiport_mt, |
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c index 4169e200588d..4327e101c047 100644 --- a/net/netfilter/xt_osf.c +++ b/net/netfilter/xt_osf.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | 19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | 22 | ||
@@ -193,8 +193,8 @@ static inline int xt_osf_ttl(const struct sk_buff *skb, const struct xt_osf_info | |||
193 | return ip->ttl == f_ttl; | 193 | return ip->ttl == f_ttl; |
194 | } | 194 | } |
195 | 195 | ||
196 | static bool xt_osf_match_packet(const struct sk_buff *skb, | 196 | static bool |
197 | const struct xt_match_param *p) | 197 | xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p) |
198 | { | 198 | { |
199 | const struct xt_osf_info *info = p->matchinfo; | 199 | const struct xt_osf_info *info = p->matchinfo; |
200 | const struct iphdr *ip = ip_hdr(skb); | 200 | const struct iphdr *ip = ip_hdr(skb); |
@@ -382,14 +382,14 @@ static int __init xt_osf_init(void) | |||
382 | 382 | ||
383 | err = nfnetlink_subsys_register(&xt_osf_nfnetlink); | 383 | err = nfnetlink_subsys_register(&xt_osf_nfnetlink); |
384 | if (err < 0) { | 384 | if (err < 0) { |
385 | printk(KERN_ERR "Failed (%d) to register OSF nsfnetlink helper.\n", err); | 385 | pr_err("Failed to register OSF nsfnetlink helper (%d)\n", err); |
386 | goto err_out_exit; | 386 | goto err_out_exit; |
387 | } | 387 | } |
388 | 388 | ||
389 | err = xt_register_match(&xt_osf_match); | 389 | err = xt_register_match(&xt_osf_match); |
390 | if (err) { | 390 | if (err) { |
391 | printk(KERN_ERR "Failed (%d) to register OS fingerprint " | 391 | pr_err("Failed to register OS fingerprint " |
392 | "matching module.\n", err); | 392 | "matching module (%d)\n", err); |
393 | goto err_out_remove; | 393 | goto err_out_remove; |
394 | } | 394 | } |
395 | 395 | ||
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c index d24c76dffee2..772d7389b337 100644 --- a/net/netfilter/xt_owner.c +++ b/net/netfilter/xt_owner.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/netfilter/xt_owner.h> | 18 | #include <linux/netfilter/xt_owner.h> |
19 | 19 | ||
20 | static bool | 20 | static bool |
21 | owner_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 21 | owner_mt(const struct sk_buff *skb, struct xt_action_param *par) |
22 | { | 22 | { |
23 | const struct xt_owner_match_info *info = par->matchinfo; | 23 | const struct xt_owner_match_info *info = par->matchinfo; |
24 | const struct file *filp; | 24 | const struct file *filp; |
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index 8d28ca5848bc..d7ca16b8b8df 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
13 | #include <linux/netfilter_bridge.h> | 13 | #include <linux/netfilter_bridge.h> |
@@ -22,7 +22,7 @@ MODULE_ALIAS("ip6t_physdev"); | |||
22 | 22 | ||
23 | 23 | ||
24 | static bool | 24 | static bool |
25 | physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 25 | physdev_mt(const struct sk_buff *skb, struct xt_action_param *par) |
26 | { | 26 | { |
27 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 27 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
28 | const struct xt_physdev_info *info = par->matchinfo; | 28 | const struct xt_physdev_info *info = par->matchinfo; |
@@ -83,25 +83,25 @@ match_outdev: | |||
83 | return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT)); | 83 | return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT)); |
84 | } | 84 | } |
85 | 85 | ||
86 | static bool physdev_mt_check(const struct xt_mtchk_param *par) | 86 | static int physdev_mt_check(const struct xt_mtchk_param *par) |
87 | { | 87 | { |
88 | const struct xt_physdev_info *info = par->matchinfo; | 88 | const struct xt_physdev_info *info = par->matchinfo; |
89 | 89 | ||
90 | if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || | 90 | if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || |
91 | info->bitmask & ~XT_PHYSDEV_OP_MASK) | 91 | info->bitmask & ~XT_PHYSDEV_OP_MASK) |
92 | return false; | 92 | return -EINVAL; |
93 | if (info->bitmask & XT_PHYSDEV_OP_OUT && | 93 | if (info->bitmask & XT_PHYSDEV_OP_OUT && |
94 | (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || | 94 | (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || |
95 | info->invert & XT_PHYSDEV_OP_BRIDGED) && | 95 | info->invert & XT_PHYSDEV_OP_BRIDGED) && |
96 | par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | | 96 | par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | |
97 | (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { | 97 | (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { |
98 | printk(KERN_WARNING "physdev match: using --physdev-out in the " | 98 | pr_info("using --physdev-out in the OUTPUT, FORWARD and " |
99 | "OUTPUT, FORWARD and POSTROUTING chains for non-bridged " | 99 | "POSTROUTING chains for non-bridged traffic is not " |
100 | "traffic is not supported anymore.\n"); | 100 | "supported anymore.\n"); |
101 | if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) | 101 | if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) |
102 | return false; | 102 | return -EINVAL; |
103 | } | 103 | } |
104 | return true; | 104 | return 0; |
105 | } | 105 | } |
106 | 106 | ||
107 | static struct xt_match physdev_mt_reg __read_mostly = { | 107 | static struct xt_match physdev_mt_reg __read_mostly = { |
diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c index 69da1d3a1d85..5b645cb598fc 100644 --- a/net/netfilter/xt_pkttype.c +++ b/net/netfilter/xt_pkttype.c | |||
@@ -23,7 +23,7 @@ MODULE_ALIAS("ipt_pkttype"); | |||
23 | MODULE_ALIAS("ip6t_pkttype"); | 23 | MODULE_ALIAS("ip6t_pkttype"); |
24 | 24 | ||
25 | static bool | 25 | static bool |
26 | pkttype_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 26 | pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par) |
27 | { | 27 | { |
28 | const struct xt_pkttype_info *info = par->matchinfo; | 28 | const struct xt_pkttype_info *info = par->matchinfo; |
29 | u_int8_t type; | 29 | u_int8_t type; |
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c index 4cbfebda8fa1..f23e97bb42d7 100644 --- a/net/netfilter/xt_policy.c +++ b/net/netfilter/xt_policy.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
@@ -110,15 +110,15 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info, | |||
110 | } | 110 | } |
111 | 111 | ||
112 | static bool | 112 | static bool |
113 | policy_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 113 | policy_mt(const struct sk_buff *skb, struct xt_action_param *par) |
114 | { | 114 | { |
115 | const struct xt_policy_info *info = par->matchinfo; | 115 | const struct xt_policy_info *info = par->matchinfo; |
116 | int ret; | 116 | int ret; |
117 | 117 | ||
118 | if (info->flags & XT_POLICY_MATCH_IN) | 118 | if (info->flags & XT_POLICY_MATCH_IN) |
119 | ret = match_policy_in(skb, info, par->match->family); | 119 | ret = match_policy_in(skb, info, par->family); |
120 | else | 120 | else |
121 | ret = match_policy_out(skb, info, par->match->family); | 121 | ret = match_policy_out(skb, info, par->family); |
122 | 122 | ||
123 | if (ret < 0) | 123 | if (ret < 0) |
124 | ret = info->flags & XT_POLICY_MATCH_NONE ? true : false; | 124 | ret = info->flags & XT_POLICY_MATCH_NONE ? true : false; |
@@ -128,32 +128,29 @@ policy_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
128 | return ret; | 128 | return ret; |
129 | } | 129 | } |
130 | 130 | ||
131 | static bool policy_mt_check(const struct xt_mtchk_param *par) | 131 | static int policy_mt_check(const struct xt_mtchk_param *par) |
132 | { | 132 | { |
133 | const struct xt_policy_info *info = par->matchinfo; | 133 | const struct xt_policy_info *info = par->matchinfo; |
134 | 134 | ||
135 | if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) { | 135 | if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) { |
136 | printk(KERN_ERR "xt_policy: neither incoming nor " | 136 | pr_info("neither incoming nor outgoing policy selected\n"); |
137 | "outgoing policy selected\n"); | 137 | return -EINVAL; |
138 | return false; | ||
139 | } | 138 | } |
140 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | | 139 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | |
141 | (1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) { | 140 | (1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) { |
142 | printk(KERN_ERR "xt_policy: output policy not valid in " | 141 | pr_info("output policy not valid in PREROUTING and INPUT\n"); |
143 | "PRE_ROUTING and INPUT\n"); | 142 | return -EINVAL; |
144 | return false; | ||
145 | } | 143 | } |
146 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | | 144 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | |
147 | (1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) { | 145 | (1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) { |
148 | printk(KERN_ERR "xt_policy: input policy not valid in " | 146 | pr_info("input policy not valid in POSTROUTING and OUTPUT\n"); |
149 | "POST_ROUTING and OUTPUT\n"); | 147 | return -EINVAL; |
150 | return false; | ||
151 | } | 148 | } |
152 | if (info->len > XT_POLICY_MAX_ELEM) { | 149 | if (info->len > XT_POLICY_MAX_ELEM) { |
153 | printk(KERN_ERR "xt_policy: too many policy elements\n"); | 150 | pr_info("too many policy elements\n"); |
154 | return false; | 151 | return -EINVAL; |
155 | } | 152 | } |
156 | return true; | 153 | return 0; |
157 | } | 154 | } |
158 | 155 | ||
159 | static struct xt_match policy_mt_reg[] __read_mostly = { | 156 | static struct xt_match policy_mt_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c index 2d5562498c43..b4f7dfea5980 100644 --- a/net/netfilter/xt_quota.c +++ b/net/netfilter/xt_quota.c | |||
@@ -23,7 +23,7 @@ MODULE_ALIAS("ip6t_quota"); | |||
23 | static DEFINE_SPINLOCK(quota_lock); | 23 | static DEFINE_SPINLOCK(quota_lock); |
24 | 24 | ||
25 | static bool | 25 | static bool |
26 | quota_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 26 | quota_mt(const struct sk_buff *skb, struct xt_action_param *par) |
27 | { | 27 | { |
28 | struct xt_quota_info *q = (void *)par->matchinfo; | 28 | struct xt_quota_info *q = (void *)par->matchinfo; |
29 | struct xt_quota_priv *priv = q->master; | 29 | struct xt_quota_priv *priv = q->master; |
@@ -44,19 +44,19 @@ quota_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
44 | return ret; | 44 | return ret; |
45 | } | 45 | } |
46 | 46 | ||
47 | static bool quota_mt_check(const struct xt_mtchk_param *par) | 47 | static int quota_mt_check(const struct xt_mtchk_param *par) |
48 | { | 48 | { |
49 | struct xt_quota_info *q = par->matchinfo; | 49 | struct xt_quota_info *q = par->matchinfo; |
50 | 50 | ||
51 | if (q->flags & ~XT_QUOTA_MASK) | 51 | if (q->flags & ~XT_QUOTA_MASK) |
52 | return false; | 52 | return -EINVAL; |
53 | 53 | ||
54 | q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); | 54 | q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); |
55 | if (q->master == NULL) | 55 | if (q->master == NULL) |
56 | return false; | 56 | return -ENOMEM; |
57 | 57 | ||
58 | q->master->quota = q->quota; | 58 | q->master->quota = q->quota; |
59 | return true; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | static void quota_mt_destroy(const struct xt_mtdtor_param *par) | 62 | static void quota_mt_destroy(const struct xt_mtdtor_param *par) |
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c index 4fc6a917f6de..76a083184d8e 100644 --- a/net/netfilter/xt_rateest.c +++ b/net/netfilter/xt_rateest.c | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | 16 | ||
17 | static bool | 17 | static bool |
18 | xt_rateest_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 18 | xt_rateest_mt(const struct sk_buff *skb, struct xt_action_param *par) |
19 | { | 19 | { |
20 | const struct xt_rateest_match_info *info = par->matchinfo; | 20 | const struct xt_rateest_match_info *info = par->matchinfo; |
21 | struct gnet_stats_rate_est *r; | 21 | struct gnet_stats_rate_est *r; |
@@ -74,10 +74,11 @@ xt_rateest_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
74 | return ret; | 74 | return ret; |
75 | } | 75 | } |
76 | 76 | ||
77 | static bool xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) | 77 | static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) |
78 | { | 78 | { |
79 | struct xt_rateest_match_info *info = par->matchinfo; | 79 | struct xt_rateest_match_info *info = par->matchinfo; |
80 | struct xt_rateest *est1, *est2; | 80 | struct xt_rateest *est1, *est2; |
81 | int ret = false; | ||
81 | 82 | ||
82 | if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | | 83 | if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | |
83 | XT_RATEEST_MATCH_REL)) != 1) | 84 | XT_RATEEST_MATCH_REL)) != 1) |
@@ -95,6 +96,7 @@ static bool xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) | |||
95 | goto err1; | 96 | goto err1; |
96 | } | 97 | } |
97 | 98 | ||
99 | ret = -ENOENT; | ||
98 | est1 = xt_rateest_lookup(info->name1); | 100 | est1 = xt_rateest_lookup(info->name1); |
99 | if (!est1) | 101 | if (!est1) |
100 | goto err1; | 102 | goto err1; |
@@ -109,12 +111,12 @@ static bool xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) | |||
109 | 111 | ||
110 | info->est1 = est1; | 112 | info->est1 = est1; |
111 | info->est2 = est2; | 113 | info->est2 = est2; |
112 | return true; | 114 | return 0; |
113 | 115 | ||
114 | err2: | 116 | err2: |
115 | xt_rateest_put(est1); | 117 | xt_rateest_put(est1); |
116 | err1: | 118 | err1: |
117 | return false; | 119 | return -EINVAL; |
118 | } | 120 | } |
119 | 121 | ||
120 | static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par) | 122 | static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par) |
diff --git a/net/netfilter/xt_realm.c b/net/netfilter/xt_realm.c index 484d1689bfde..459a7b256eb2 100644 --- a/net/netfilter/xt_realm.c +++ b/net/netfilter/xt_realm.c | |||
@@ -22,7 +22,7 @@ MODULE_DESCRIPTION("Xtables: Routing realm match"); | |||
22 | MODULE_ALIAS("ipt_realm"); | 22 | MODULE_ALIAS("ipt_realm"); |
23 | 23 | ||
24 | static bool | 24 | static bool |
25 | realm_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 25 | realm_mt(const struct sk_buff *skb, struct xt_action_param *par) |
26 | { | 26 | { |
27 | const struct xt_realm_info *info = par->matchinfo; | 27 | const struct xt_realm_info *info = par->matchinfo; |
28 | const struct dst_entry *dst = skb_dst(skb); | 28 | const struct dst_entry *dst = skb_dst(skb); |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 834b736857cb..76aec6a44762 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * Author: Stephen Frost <sfrost@snowman.net> | 12 | * Author: Stephen Frost <sfrost@snowman.net> |
13 | * Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org | 13 | * Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org |
14 | */ | 14 | */ |
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
16 | #include <linux/ip.h> | 17 | #include <linux/ip.h> |
17 | #include <linux/ipv6.h> | 18 | #include <linux/ipv6.h> |
@@ -35,8 +36,8 @@ | |||
35 | #include <linux/netfilter/xt_recent.h> | 36 | #include <linux/netfilter/xt_recent.h> |
36 | 37 | ||
37 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | 38 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); |
38 | MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>"); | 39 | MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); |
39 | MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching for IPv4"); | 40 | MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching"); |
40 | MODULE_LICENSE("GPL"); | 41 | MODULE_LICENSE("GPL"); |
41 | MODULE_ALIAS("ipt_recent"); | 42 | MODULE_ALIAS("ipt_recent"); |
42 | MODULE_ALIAS("ip6t_recent"); | 43 | MODULE_ALIAS("ip6t_recent"); |
@@ -51,14 +52,14 @@ module_param(ip_list_tot, uint, 0400); | |||
51 | module_param(ip_pkt_list_tot, uint, 0400); | 52 | module_param(ip_pkt_list_tot, uint, 0400); |
52 | module_param(ip_list_hash_size, uint, 0400); | 53 | module_param(ip_list_hash_size, uint, 0400); |
53 | module_param(ip_list_perms, uint, 0400); | 54 | module_param(ip_list_perms, uint, 0400); |
54 | module_param(ip_list_uid, uint, 0400); | 55 | module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR); |
55 | module_param(ip_list_gid, uint, 0400); | 56 | module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR); |
56 | MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list"); | 57 | MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list"); |
57 | MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)"); | 58 | MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)"); |
58 | MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs"); | 59 | MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs"); |
59 | MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files"); | 60 | MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files"); |
60 | MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files"); | 61 | MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files"); |
61 | MODULE_PARM_DESC(ip_list_gid,"owning group of /proc/net/xt_recent/* files"); | 62 | MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files"); |
62 | 63 | ||
63 | struct recent_entry { | 64 | struct recent_entry { |
64 | struct list_head list; | 65 | struct list_head list; |
@@ -84,9 +85,6 @@ struct recent_net { | |||
84 | struct list_head tables; | 85 | struct list_head tables; |
85 | #ifdef CONFIG_PROC_FS | 86 | #ifdef CONFIG_PROC_FS |
86 | struct proc_dir_entry *xt_recent; | 87 | struct proc_dir_entry *xt_recent; |
87 | #ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT | ||
88 | struct proc_dir_entry *ipt_recent; | ||
89 | #endif | ||
90 | #endif | 88 | #endif |
91 | }; | 89 | }; |
92 | 90 | ||
@@ -147,6 +145,25 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e) | |||
147 | t->entries--; | 145 | t->entries--; |
148 | } | 146 | } |
149 | 147 | ||
148 | /* | ||
149 | * Drop entries with timestamps older then 'time'. | ||
150 | */ | ||
151 | static void recent_entry_reap(struct recent_table *t, unsigned long time) | ||
152 | { | ||
153 | struct recent_entry *e; | ||
154 | |||
155 | /* | ||
156 | * The head of the LRU list is always the oldest entry. | ||
157 | */ | ||
158 | e = list_entry(t->lru_list.next, struct recent_entry, lru_list); | ||
159 | |||
160 | /* | ||
161 | * The last time stamp is the most recent. | ||
162 | */ | ||
163 | if (time_after(time, e->stamps[e->index-1])) | ||
164 | recent_entry_remove(t, e); | ||
165 | } | ||
166 | |||
150 | static struct recent_entry * | 167 | static struct recent_entry * |
151 | recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr, | 168 | recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr, |
152 | u_int16_t family, u_int8_t ttl) | 169 | u_int16_t family, u_int8_t ttl) |
@@ -207,7 +224,7 @@ static void recent_table_flush(struct recent_table *t) | |||
207 | } | 224 | } |
208 | 225 | ||
209 | static bool | 226 | static bool |
210 | recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 227 | recent_mt(const struct sk_buff *skb, struct xt_action_param *par) |
211 | { | 228 | { |
212 | struct net *net = dev_net(par->in ? par->in : par->out); | 229 | struct net *net = dev_net(par->in ? par->in : par->out); |
213 | struct recent_net *recent_net = recent_pernet(net); | 230 | struct recent_net *recent_net = recent_pernet(net); |
@@ -218,7 +235,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
218 | u_int8_t ttl; | 235 | u_int8_t ttl; |
219 | bool ret = info->invert; | 236 | bool ret = info->invert; |
220 | 237 | ||
221 | if (par->match->family == NFPROTO_IPV4) { | 238 | if (par->family == NFPROTO_IPV4) { |
222 | const struct iphdr *iph = ip_hdr(skb); | 239 | const struct iphdr *iph = ip_hdr(skb); |
223 | 240 | ||
224 | if (info->side == XT_RECENT_DEST) | 241 | if (info->side == XT_RECENT_DEST) |
@@ -244,14 +261,14 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
244 | 261 | ||
245 | spin_lock_bh(&recent_lock); | 262 | spin_lock_bh(&recent_lock); |
246 | t = recent_table_lookup(recent_net, info->name); | 263 | t = recent_table_lookup(recent_net, info->name); |
247 | e = recent_entry_lookup(t, &addr, par->match->family, | 264 | e = recent_entry_lookup(t, &addr, par->family, |
248 | (info->check_set & XT_RECENT_TTL) ? ttl : 0); | 265 | (info->check_set & XT_RECENT_TTL) ? ttl : 0); |
249 | if (e == NULL) { | 266 | if (e == NULL) { |
250 | if (!(info->check_set & XT_RECENT_SET)) | 267 | if (!(info->check_set & XT_RECENT_SET)) |
251 | goto out; | 268 | goto out; |
252 | e = recent_entry_init(t, &addr, par->match->family, ttl); | 269 | e = recent_entry_init(t, &addr, par->family, ttl); |
253 | if (e == NULL) | 270 | if (e == NULL) |
254 | *par->hotdrop = true; | 271 | par->hotdrop = true; |
255 | ret = !ret; | 272 | ret = !ret; |
256 | goto out; | 273 | goto out; |
257 | } | 274 | } |
@@ -273,6 +290,10 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
273 | break; | 290 | break; |
274 | } | 291 | } |
275 | } | 292 | } |
293 | |||
294 | /* info->seconds must be non-zero */ | ||
295 | if (info->check_set & XT_RECENT_REAP) | ||
296 | recent_entry_reap(t, time); | ||
276 | } | 297 | } |
277 | 298 | ||
278 | if (info->check_set & XT_RECENT_SET || | 299 | if (info->check_set & XT_RECENT_SET || |
@@ -285,7 +306,7 @@ out: | |||
285 | return ret; | 306 | return ret; |
286 | } | 307 | } |
287 | 308 | ||
288 | static bool recent_mt_check(const struct xt_mtchk_param *par) | 309 | static int recent_mt_check(const struct xt_mtchk_param *par) |
289 | { | 310 | { |
290 | struct recent_net *recent_net = recent_pernet(par->net); | 311 | struct recent_net *recent_net = recent_pernet(par->net); |
291 | const struct xt_recent_mtinfo *info = par->matchinfo; | 312 | const struct xt_recent_mtinfo *info = par->matchinfo; |
@@ -294,41 +315,51 @@ static bool recent_mt_check(const struct xt_mtchk_param *par) | |||
294 | struct proc_dir_entry *pde; | 315 | struct proc_dir_entry *pde; |
295 | #endif | 316 | #endif |
296 | unsigned i; | 317 | unsigned i; |
297 | bool ret = false; | 318 | int ret = -EINVAL; |
298 | 319 | ||
299 | if (unlikely(!hash_rnd_inited)) { | 320 | if (unlikely(!hash_rnd_inited)) { |
300 | get_random_bytes(&hash_rnd, sizeof(hash_rnd)); | 321 | get_random_bytes(&hash_rnd, sizeof(hash_rnd)); |
301 | hash_rnd_inited = true; | 322 | hash_rnd_inited = true; |
302 | } | 323 | } |
324 | if (info->check_set & ~XT_RECENT_VALID_FLAGS) { | ||
325 | pr_info("Unsupported user space flags (%08x)\n", | ||
326 | info->check_set); | ||
327 | return -EINVAL; | ||
328 | } | ||
303 | if (hweight8(info->check_set & | 329 | if (hweight8(info->check_set & |
304 | (XT_RECENT_SET | XT_RECENT_REMOVE | | 330 | (XT_RECENT_SET | XT_RECENT_REMOVE | |
305 | XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1) | 331 | XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1) |
306 | return false; | 332 | return -EINVAL; |
307 | if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) && | 333 | if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) && |
308 | (info->seconds || info->hit_count)) | 334 | (info->seconds || info->hit_count || |
309 | return false; | 335 | (info->check_set & XT_RECENT_MODIFIERS))) |
336 | return -EINVAL; | ||
337 | if ((info->check_set & XT_RECENT_REAP) && !info->seconds) | ||
338 | return -EINVAL; | ||
310 | if (info->hit_count > ip_pkt_list_tot) { | 339 | if (info->hit_count > ip_pkt_list_tot) { |
311 | pr_info(KBUILD_MODNAME ": hitcount (%u) is larger than " | 340 | pr_info("hitcount (%u) is larger than " |
312 | "packets to be remembered (%u)\n", | 341 | "packets to be remembered (%u)\n", |
313 | info->hit_count, ip_pkt_list_tot); | 342 | info->hit_count, ip_pkt_list_tot); |
314 | return false; | 343 | return -EINVAL; |
315 | } | 344 | } |
316 | if (info->name[0] == '\0' || | 345 | if (info->name[0] == '\0' || |
317 | strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) | 346 | strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) |
318 | return false; | 347 | return -EINVAL; |
319 | 348 | ||
320 | mutex_lock(&recent_mutex); | 349 | mutex_lock(&recent_mutex); |
321 | t = recent_table_lookup(recent_net, info->name); | 350 | t = recent_table_lookup(recent_net, info->name); |
322 | if (t != NULL) { | 351 | if (t != NULL) { |
323 | t->refcnt++; | 352 | t->refcnt++; |
324 | ret = true; | 353 | ret = 0; |
325 | goto out; | 354 | goto out; |
326 | } | 355 | } |
327 | 356 | ||
328 | t = kzalloc(sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size, | 357 | t = kzalloc(sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size, |
329 | GFP_KERNEL); | 358 | GFP_KERNEL); |
330 | if (t == NULL) | 359 | if (t == NULL) { |
360 | ret = -ENOMEM; | ||
331 | goto out; | 361 | goto out; |
362 | } | ||
332 | t->refcnt = 1; | 363 | t->refcnt = 1; |
333 | strcpy(t->name, info->name); | 364 | strcpy(t->name, info->name); |
334 | INIT_LIST_HEAD(&t->lru_list); | 365 | INIT_LIST_HEAD(&t->lru_list); |
@@ -339,26 +370,16 @@ static bool recent_mt_check(const struct xt_mtchk_param *par) | |||
339 | &recent_mt_fops, t); | 370 | &recent_mt_fops, t); |
340 | if (pde == NULL) { | 371 | if (pde == NULL) { |
341 | kfree(t); | 372 | kfree(t); |
342 | goto out; | 373 | ret = -ENOMEM; |
343 | } | ||
344 | pde->uid = ip_list_uid; | ||
345 | pde->gid = ip_list_gid; | ||
346 | #ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT | ||
347 | pde = proc_create_data(t->name, ip_list_perms, recent_net->ipt_recent, | ||
348 | &recent_old_fops, t); | ||
349 | if (pde == NULL) { | ||
350 | remove_proc_entry(t->name, recent_net->xt_recent); | ||
351 | kfree(t); | ||
352 | goto out; | 374 | goto out; |
353 | } | 375 | } |
354 | pde->uid = ip_list_uid; | 376 | pde->uid = ip_list_uid; |
355 | pde->gid = ip_list_gid; | 377 | pde->gid = ip_list_gid; |
356 | #endif | 378 | #endif |
357 | #endif | ||
358 | spin_lock_bh(&recent_lock); | 379 | spin_lock_bh(&recent_lock); |
359 | list_add_tail(&t->list, &recent_net->tables); | 380 | list_add_tail(&t->list, &recent_net->tables); |
360 | spin_unlock_bh(&recent_lock); | 381 | spin_unlock_bh(&recent_lock); |
361 | ret = true; | 382 | ret = 0; |
362 | out: | 383 | out: |
363 | mutex_unlock(&recent_mutex); | 384 | mutex_unlock(&recent_mutex); |
364 | return ret; | 385 | return ret; |
@@ -377,9 +398,6 @@ static void recent_mt_destroy(const struct xt_mtdtor_param *par) | |||
377 | list_del(&t->list); | 398 | list_del(&t->list); |
378 | spin_unlock_bh(&recent_lock); | 399 | spin_unlock_bh(&recent_lock); |
379 | #ifdef CONFIG_PROC_FS | 400 | #ifdef CONFIG_PROC_FS |
380 | #ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT | ||
381 | remove_proc_entry(t->name, recent_net->ipt_recent); | ||
382 | #endif | ||
383 | remove_proc_entry(t->name, recent_net->xt_recent); | 401 | remove_proc_entry(t->name, recent_net->xt_recent); |
384 | #endif | 402 | #endif |
385 | recent_table_flush(t); | 403 | recent_table_flush(t); |
@@ -471,84 +489,6 @@ static int recent_seq_open(struct inode *inode, struct file *file) | |||
471 | return 0; | 489 | return 0; |
472 | } | 490 | } |
473 | 491 | ||
474 | #ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT | ||
475 | static int recent_old_seq_open(struct inode *inode, struct file *filp) | ||
476 | { | ||
477 | static bool warned_of_old; | ||
478 | |||
479 | if (unlikely(!warned_of_old)) { | ||
480 | printk(KERN_INFO KBUILD_MODNAME ": Use of /proc/net/ipt_recent" | ||
481 | " is deprecated; use /proc/net/xt_recent.\n"); | ||
482 | warned_of_old = true; | ||
483 | } | ||
484 | return recent_seq_open(inode, filp); | ||
485 | } | ||
486 | |||
487 | static ssize_t recent_old_proc_write(struct file *file, | ||
488 | const char __user *input, | ||
489 | size_t size, loff_t *loff) | ||
490 | { | ||
491 | const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); | ||
492 | struct recent_table *t = pde->data; | ||
493 | struct recent_entry *e; | ||
494 | char buf[sizeof("+255.255.255.255")], *c = buf; | ||
495 | union nf_inet_addr addr = {}; | ||
496 | int add; | ||
497 | |||
498 | if (size > sizeof(buf)) | ||
499 | size = sizeof(buf); | ||
500 | if (copy_from_user(buf, input, size)) | ||
501 | return -EFAULT; | ||
502 | |||
503 | c = skip_spaces(c); | ||
504 | |||
505 | if (size - (c - buf) < 5) | ||
506 | return c - buf; | ||
507 | if (!strncmp(c, "clear", 5)) { | ||
508 | c += 5; | ||
509 | spin_lock_bh(&recent_lock); | ||
510 | recent_table_flush(t); | ||
511 | spin_unlock_bh(&recent_lock); | ||
512 | return c - buf; | ||
513 | } | ||
514 | |||
515 | switch (*c) { | ||
516 | case '-': | ||
517 | add = 0; | ||
518 | c++; | ||
519 | break; | ||
520 | case '+': | ||
521 | c++; | ||
522 | default: | ||
523 | add = 1; | ||
524 | break; | ||
525 | } | ||
526 | addr.ip = in_aton(c); | ||
527 | |||
528 | spin_lock_bh(&recent_lock); | ||
529 | e = recent_entry_lookup(t, &addr, NFPROTO_IPV4, 0); | ||
530 | if (e == NULL) { | ||
531 | if (add) | ||
532 | recent_entry_init(t, &addr, NFPROTO_IPV4, 0); | ||
533 | } else { | ||
534 | if (add) | ||
535 | recent_entry_update(t, e); | ||
536 | else | ||
537 | recent_entry_remove(t, e); | ||
538 | } | ||
539 | spin_unlock_bh(&recent_lock); | ||
540 | return size; | ||
541 | } | ||
542 | |||
543 | static const struct file_operations recent_old_fops = { | ||
544 | .open = recent_old_seq_open, | ||
545 | .read = seq_read, | ||
546 | .write = recent_old_proc_write, | ||
547 | .release = seq_release_private, | ||
548 | .owner = THIS_MODULE, | ||
549 | }; | ||
550 | #endif | ||
551 | |||
552 | static ssize_t | 492 | static ssize_t |
553 | recent_mt_proc_write(struct file *file, const char __user *input, | 493 | recent_mt_proc_write(struct file *file, const char __user *input, |
554 | size_t size, loff_t *loff) | 494 | size_t size, loff_t *loff) |
@@ -585,7 +525,7 @@ recent_mt_proc_write(struct file *file, const char __user *input, | |||
585 | add = true; | 525 | add = true; |
586 | break; | 526 | break; |
587 | default: | 527 | default: |
588 | printk(KERN_INFO KBUILD_MODNAME ": Need +ip, -ip or /\n"); | 528 | pr_info("Need \"+ip\", \"-ip\" or \"/\"\n"); |
589 | return -EINVAL; | 529 | return -EINVAL; |
590 | } | 530 | } |
591 | 531 | ||
@@ -600,8 +540,7 @@ recent_mt_proc_write(struct file *file, const char __user *input, | |||
600 | } | 540 | } |
601 | 541 | ||
602 | if (!succ) { | 542 | if (!succ) { |
603 | printk(KERN_INFO KBUILD_MODNAME ": illegal address written " | 543 | pr_info("illegal address written to procfs\n"); |
604 | "to procfs\n"); | ||
605 | return -EINVAL; | 544 | return -EINVAL; |
606 | } | 545 | } |
607 | 546 | ||
@@ -637,21 +576,11 @@ static int __net_init recent_proc_net_init(struct net *net) | |||
637 | recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net); | 576 | recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net); |
638 | if (!recent_net->xt_recent) | 577 | if (!recent_net->xt_recent) |
639 | return -ENOMEM; | 578 | return -ENOMEM; |
640 | #ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT | ||
641 | recent_net->ipt_recent = proc_mkdir("ipt_recent", net->proc_net); | ||
642 | if (!recent_net->ipt_recent) { | ||
643 | proc_net_remove(net, "xt_recent"); | ||
644 | return -ENOMEM; | ||
645 | } | ||
646 | #endif | ||
647 | return 0; | 579 | return 0; |
648 | } | 580 | } |
649 | 581 | ||
650 | static void __net_exit recent_proc_net_exit(struct net *net) | 582 | static void __net_exit recent_proc_net_exit(struct net *net) |
651 | { | 583 | { |
652 | #ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT | ||
653 | proc_net_remove(net, "ipt_recent"); | ||
654 | #endif | ||
655 | proc_net_remove(net, "xt_recent"); | 584 | proc_net_remove(net, "xt_recent"); |
656 | } | 585 | } |
657 | #else | 586 | #else |
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c index a189ada9128f..c04fcf385c59 100644 --- a/net/netfilter/xt_sctp.c +++ b/net/netfilter/xt_sctp.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
1 | #include <linux/module.h> | 2 | #include <linux/module.h> |
2 | #include <linux/skbuff.h> | 3 | #include <linux/skbuff.h> |
3 | #include <net/ip.h> | 4 | #include <net/ip.h> |
@@ -15,12 +16,6 @@ MODULE_DESCRIPTION("Xtables: SCTP protocol packet match"); | |||
15 | MODULE_ALIAS("ipt_sctp"); | 16 | MODULE_ALIAS("ipt_sctp"); |
16 | MODULE_ALIAS("ip6t_sctp"); | 17 | MODULE_ALIAS("ip6t_sctp"); |
17 | 18 | ||
18 | #ifdef DEBUG_SCTP | ||
19 | #define duprintf(format, args...) printk(format , ## args) | ||
20 | #else | ||
21 | #define duprintf(format, args...) | ||
22 | #endif | ||
23 | |||
24 | #define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \ | 19 | #define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \ |
25 | || (!!((invflag) & (option)) ^ (cond))) | 20 | || (!!((invflag) & (option)) ^ (cond))) |
26 | 21 | ||
@@ -52,7 +47,7 @@ match_packet(const struct sk_buff *skb, | |||
52 | const struct xt_sctp_flag_info *flag_info = info->flag_info; | 47 | const struct xt_sctp_flag_info *flag_info = info->flag_info; |
53 | int flag_count = info->flag_count; | 48 | int flag_count = info->flag_count; |
54 | 49 | ||
55 | #ifdef DEBUG_SCTP | 50 | #ifdef DEBUG |
56 | int i = 0; | 51 | int i = 0; |
57 | #endif | 52 | #endif |
58 | 53 | ||
@@ -62,17 +57,19 @@ match_packet(const struct sk_buff *skb, | |||
62 | do { | 57 | do { |
63 | sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch); | 58 | sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch); |
64 | if (sch == NULL || sch->length == 0) { | 59 | if (sch == NULL || sch->length == 0) { |
65 | duprintf("Dropping invalid SCTP packet.\n"); | 60 | pr_debug("Dropping invalid SCTP packet.\n"); |
66 | *hotdrop = true; | 61 | *hotdrop = true; |
67 | return false; | 62 | return false; |
68 | } | 63 | } |
69 | 64 | #ifdef DEBUG | |
70 | duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n", | 65 | pr_debug("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d" |
71 | ++i, offset, sch->type, htons(sch->length), sch->flags); | 66 | "\tflags: %x\n", |
72 | 67 | ++i, offset, sch->type, htons(sch->length), | |
68 | sch->flags); | ||
69 | #endif | ||
73 | offset += (ntohs(sch->length) + 3) & ~3; | 70 | offset += (ntohs(sch->length) + 3) & ~3; |
74 | 71 | ||
75 | duprintf("skb->len: %d\toffset: %d\n", skb->len, offset); | 72 | pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset); |
76 | 73 | ||
77 | if (SCTP_CHUNKMAP_IS_SET(info->chunkmap, sch->type)) { | 74 | if (SCTP_CHUNKMAP_IS_SET(info->chunkmap, sch->type)) { |
78 | switch (chunk_match_type) { | 75 | switch (chunk_match_type) { |
@@ -117,24 +114,24 @@ match_packet(const struct sk_buff *skb, | |||
117 | } | 114 | } |
118 | 115 | ||
119 | static bool | 116 | static bool |
120 | sctp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 117 | sctp_mt(const struct sk_buff *skb, struct xt_action_param *par) |
121 | { | 118 | { |
122 | const struct xt_sctp_info *info = par->matchinfo; | 119 | const struct xt_sctp_info *info = par->matchinfo; |
123 | const sctp_sctphdr_t *sh; | 120 | const sctp_sctphdr_t *sh; |
124 | sctp_sctphdr_t _sh; | 121 | sctp_sctphdr_t _sh; |
125 | 122 | ||
126 | if (par->fragoff != 0) { | 123 | if (par->fragoff != 0) { |
127 | duprintf("Dropping non-first fragment.. FIXME\n"); | 124 | pr_debug("Dropping non-first fragment.. FIXME\n"); |
128 | return false; | 125 | return false; |
129 | } | 126 | } |
130 | 127 | ||
131 | sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh); | 128 | sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh); |
132 | if (sh == NULL) { | 129 | if (sh == NULL) { |
133 | duprintf("Dropping evil TCP offset=0 tinygram.\n"); | 130 | pr_debug("Dropping evil TCP offset=0 tinygram.\n"); |
134 | *par->hotdrop = true; | 131 | par->hotdrop = true; |
135 | return false; | 132 | return false; |
136 | } | 133 | } |
137 | duprintf("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest)); | 134 | pr_debug("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest)); |
138 | 135 | ||
139 | return SCCHECK(ntohs(sh->source) >= info->spts[0] | 136 | return SCCHECK(ntohs(sh->source) >= info->spts[0] |
140 | && ntohs(sh->source) <= info->spts[1], | 137 | && ntohs(sh->source) <= info->spts[1], |
@@ -143,22 +140,26 @@ sctp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
143 | && ntohs(sh->dest) <= info->dpts[1], | 140 | && ntohs(sh->dest) <= info->dpts[1], |
144 | XT_SCTP_DEST_PORTS, info->flags, info->invflags) | 141 | XT_SCTP_DEST_PORTS, info->flags, info->invflags) |
145 | && SCCHECK(match_packet(skb, par->thoff + sizeof(sctp_sctphdr_t), | 142 | && SCCHECK(match_packet(skb, par->thoff + sizeof(sctp_sctphdr_t), |
146 | info, par->hotdrop), | 143 | info, &par->hotdrop), |
147 | XT_SCTP_CHUNK_TYPES, info->flags, info->invflags); | 144 | XT_SCTP_CHUNK_TYPES, info->flags, info->invflags); |
148 | } | 145 | } |
149 | 146 | ||
150 | static bool sctp_mt_check(const struct xt_mtchk_param *par) | 147 | static int sctp_mt_check(const struct xt_mtchk_param *par) |
151 | { | 148 | { |
152 | const struct xt_sctp_info *info = par->matchinfo; | 149 | const struct xt_sctp_info *info = par->matchinfo; |
153 | 150 | ||
154 | return !(info->flags & ~XT_SCTP_VALID_FLAGS) | 151 | if (info->flags & ~XT_SCTP_VALID_FLAGS) |
155 | && !(info->invflags & ~XT_SCTP_VALID_FLAGS) | 152 | return -EINVAL; |
156 | && !(info->invflags & ~info->flags) | 153 | if (info->invflags & ~XT_SCTP_VALID_FLAGS) |
157 | && ((!(info->flags & XT_SCTP_CHUNK_TYPES)) || | 154 | return -EINVAL; |
158 | (info->chunk_match_type & | 155 | if (info->invflags & ~info->flags) |
159 | (SCTP_CHUNK_MATCH_ALL | 156 | return -EINVAL; |
160 | | SCTP_CHUNK_MATCH_ANY | 157 | if (!(info->flags & XT_SCTP_CHUNK_TYPES)) |
161 | | SCTP_CHUNK_MATCH_ONLY))); | 158 | return 0; |
159 | if (info->chunk_match_type & (SCTP_CHUNK_MATCH_ALL | | ||
160 | SCTP_CHUNK_MATCH_ANY | SCTP_CHUNK_MATCH_ONLY)) | ||
161 | return 0; | ||
162 | return -EINVAL; | ||
162 | } | 163 | } |
163 | 164 | ||
164 | static struct xt_match sctp_mt_reg[] __read_mostly = { | 165 | static struct xt_match sctp_mt_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 6a902564d24f..3d54c236a1ba 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/netfilter/x_tables.h> | 15 | #include <linux/netfilter/x_tables.h> |
@@ -88,7 +88,7 @@ extract_icmp_fields(const struct sk_buff *skb, | |||
88 | 88 | ||
89 | 89 | ||
90 | static bool | 90 | static bool |
91 | socket_match(const struct sk_buff *skb, const struct xt_match_param *par, | 91 | socket_match(const struct sk_buff *skb, struct xt_action_param *par, |
92 | const struct xt_socket_mtinfo1 *info) | 92 | const struct xt_socket_mtinfo1 *info) |
93 | { | 93 | { |
94 | const struct iphdr *iph = ip_hdr(skb); | 94 | const struct iphdr *iph = ip_hdr(skb); |
@@ -165,8 +165,7 @@ socket_match(const struct sk_buff *skb, const struct xt_match_param *par, | |||
165 | sk = NULL; | 165 | sk = NULL; |
166 | } | 166 | } |
167 | 167 | ||
168 | pr_debug("socket match: proto %u %08x:%u -> %08x:%u " | 168 | pr_debug("proto %u %08x:%u -> %08x:%u (orig %08x:%u) sock %p\n", |
169 | "(orig %08x:%u) sock %p\n", | ||
170 | protocol, ntohl(saddr), ntohs(sport), | 169 | protocol, ntohl(saddr), ntohs(sport), |
171 | ntohl(daddr), ntohs(dport), | 170 | ntohl(daddr), ntohs(dport), |
172 | ntohl(iph->daddr), hp ? ntohs(hp->dest) : 0, sk); | 171 | ntohl(iph->daddr), hp ? ntohs(hp->dest) : 0, sk); |
@@ -175,13 +174,13 @@ socket_match(const struct sk_buff *skb, const struct xt_match_param *par, | |||
175 | } | 174 | } |
176 | 175 | ||
177 | static bool | 176 | static bool |
178 | socket_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | 177 | socket_mt_v0(const struct sk_buff *skb, struct xt_action_param *par) |
179 | { | 178 | { |
180 | return socket_match(skb, par, NULL); | 179 | return socket_match(skb, par, NULL); |
181 | } | 180 | } |
182 | 181 | ||
183 | static bool | 182 | static bool |
184 | socket_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par) | 183 | socket_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) |
185 | { | 184 | { |
186 | return socket_match(skb, par, par->matchinfo); | 185 | return socket_match(skb, par, par->matchinfo); |
187 | } | 186 | } |
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c index 4c946cbd731f..e12e053d3782 100644 --- a/net/netfilter/xt_state.c +++ b/net/netfilter/xt_state.c | |||
@@ -21,7 +21,7 @@ MODULE_ALIAS("ipt_state"); | |||
21 | MODULE_ALIAS("ip6t_state"); | 21 | MODULE_ALIAS("ip6t_state"); |
22 | 22 | ||
23 | static bool | 23 | static bool |
24 | state_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 24 | state_mt(const struct sk_buff *skb, struct xt_action_param *par) |
25 | { | 25 | { |
26 | const struct xt_state_info *sinfo = par->matchinfo; | 26 | const struct xt_state_info *sinfo = par->matchinfo; |
27 | enum ip_conntrack_info ctinfo; | 27 | enum ip_conntrack_info ctinfo; |
@@ -37,50 +37,40 @@ state_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
37 | return (sinfo->statemask & statebit); | 37 | return (sinfo->statemask & statebit); |
38 | } | 38 | } |
39 | 39 | ||
40 | static bool state_mt_check(const struct xt_mtchk_param *par) | 40 | static int state_mt_check(const struct xt_mtchk_param *par) |
41 | { | 41 | { |
42 | if (nf_ct_l3proto_try_module_get(par->match->family) < 0) { | 42 | int ret; |
43 | printk(KERN_WARNING "can't load conntrack support for " | 43 | |
44 | "proto=%u\n", par->match->family); | 44 | ret = nf_ct_l3proto_try_module_get(par->family); |
45 | return false; | 45 | if (ret < 0) |
46 | } | 46 | pr_info("cannot load conntrack support for proto=%u\n", |
47 | return true; | 47 | par->family); |
48 | return ret; | ||
48 | } | 49 | } |
49 | 50 | ||
50 | static void state_mt_destroy(const struct xt_mtdtor_param *par) | 51 | static void state_mt_destroy(const struct xt_mtdtor_param *par) |
51 | { | 52 | { |
52 | nf_ct_l3proto_module_put(par->match->family); | 53 | nf_ct_l3proto_module_put(par->family); |
53 | } | 54 | } |
54 | 55 | ||
55 | static struct xt_match state_mt_reg[] __read_mostly = { | 56 | static struct xt_match state_mt_reg __read_mostly = { |
56 | { | 57 | .name = "state", |
57 | .name = "state", | 58 | .family = NFPROTO_UNSPEC, |
58 | .family = NFPROTO_IPV4, | 59 | .checkentry = state_mt_check, |
59 | .checkentry = state_mt_check, | 60 | .match = state_mt, |
60 | .match = state_mt, | 61 | .destroy = state_mt_destroy, |
61 | .destroy = state_mt_destroy, | 62 | .matchsize = sizeof(struct xt_state_info), |
62 | .matchsize = sizeof(struct xt_state_info), | 63 | .me = THIS_MODULE, |
63 | .me = THIS_MODULE, | ||
64 | }, | ||
65 | { | ||
66 | .name = "state", | ||
67 | .family = NFPROTO_IPV6, | ||
68 | .checkentry = state_mt_check, | ||
69 | .match = state_mt, | ||
70 | .destroy = state_mt_destroy, | ||
71 | .matchsize = sizeof(struct xt_state_info), | ||
72 | .me = THIS_MODULE, | ||
73 | }, | ||
74 | }; | 64 | }; |
75 | 65 | ||
76 | static int __init state_mt_init(void) | 66 | static int __init state_mt_init(void) |
77 | { | 67 | { |
78 | return xt_register_matches(state_mt_reg, ARRAY_SIZE(state_mt_reg)); | 68 | return xt_register_match(&state_mt_reg); |
79 | } | 69 | } |
80 | 70 | ||
81 | static void __exit state_mt_exit(void) | 71 | static void __exit state_mt_exit(void) |
82 | { | 72 | { |
83 | xt_unregister_matches(state_mt_reg, ARRAY_SIZE(state_mt_reg)); | 73 | xt_unregister_match(&state_mt_reg); |
84 | } | 74 | } |
85 | 75 | ||
86 | module_init(state_mt_init); | 76 | module_init(state_mt_init); |
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c index 937ce0633e99..96e62b8fd6b1 100644 --- a/net/netfilter/xt_statistic.c +++ b/net/netfilter/xt_statistic.c | |||
@@ -30,7 +30,7 @@ MODULE_ALIAS("ip6t_statistic"); | |||
30 | static DEFINE_SPINLOCK(nth_lock); | 30 | static DEFINE_SPINLOCK(nth_lock); |
31 | 31 | ||
32 | static bool | 32 | static bool |
33 | statistic_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 33 | statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) |
34 | { | 34 | { |
35 | const struct xt_statistic_info *info = par->matchinfo; | 35 | const struct xt_statistic_info *info = par->matchinfo; |
36 | bool ret = info->flags & XT_STATISTIC_INVERT; | 36 | bool ret = info->flags & XT_STATISTIC_INVERT; |
@@ -53,22 +53,20 @@ statistic_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
53 | return ret; | 53 | return ret; |
54 | } | 54 | } |
55 | 55 | ||
56 | static bool statistic_mt_check(const struct xt_mtchk_param *par) | 56 | static int statistic_mt_check(const struct xt_mtchk_param *par) |
57 | { | 57 | { |
58 | struct xt_statistic_info *info = par->matchinfo; | 58 | struct xt_statistic_info *info = par->matchinfo; |
59 | 59 | ||
60 | if (info->mode > XT_STATISTIC_MODE_MAX || | 60 | if (info->mode > XT_STATISTIC_MODE_MAX || |
61 | info->flags & ~XT_STATISTIC_MASK) | 61 | info->flags & ~XT_STATISTIC_MASK) |
62 | return false; | 62 | return -EINVAL; |
63 | 63 | ||
64 | info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); | 64 | info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); |
65 | if (info->master == NULL) { | 65 | if (info->master == NULL) |
66 | printk(KERN_ERR KBUILD_MODNAME ": Out of memory\n"); | 66 | return -ENOMEM; |
67 | return false; | ||
68 | } | ||
69 | info->master->count = info->u.nth.count; | 67 | info->master->count = info->u.nth.count; |
70 | 68 | ||
71 | return true; | 69 | return 0; |
72 | } | 70 | } |
73 | 71 | ||
74 | static void statistic_mt_destroy(const struct xt_mtdtor_param *par) | 72 | static void statistic_mt_destroy(const struct xt_mtdtor_param *par) |
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index 96801ffd8af8..d3c48b14ab94 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c | |||
@@ -23,16 +23,14 @@ MODULE_ALIAS("ipt_string"); | |||
23 | MODULE_ALIAS("ip6t_string"); | 23 | MODULE_ALIAS("ip6t_string"); |
24 | 24 | ||
25 | static bool | 25 | static bool |
26 | string_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 26 | string_mt(const struct sk_buff *skb, struct xt_action_param *par) |
27 | { | 27 | { |
28 | const struct xt_string_info *conf = par->matchinfo; | 28 | const struct xt_string_info *conf = par->matchinfo; |
29 | struct ts_state state; | 29 | struct ts_state state; |
30 | int invert; | 30 | bool invert; |
31 | 31 | ||
32 | memset(&state, 0, sizeof(struct ts_state)); | 32 | memset(&state, 0, sizeof(struct ts_state)); |
33 | 33 | invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT; | |
34 | invert = (par->match->revision == 0 ? conf->u.v0.invert : | ||
35 | conf->u.v1.flags & XT_STRING_FLAG_INVERT); | ||
36 | 34 | ||
37 | return (skb_find_text((struct sk_buff *)skb, conf->from_offset, | 35 | return (skb_find_text((struct sk_buff *)skb, conf->from_offset, |
38 | conf->to_offset, conf->config, &state) | 36 | conf->to_offset, conf->config, &state) |
@@ -41,7 +39,7 @@ string_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
41 | 39 | ||
42 | #define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m)) | 40 | #define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m)) |
43 | 41 | ||
44 | static bool string_mt_check(const struct xt_mtchk_param *par) | 42 | static int string_mt_check(const struct xt_mtchk_param *par) |
45 | { | 43 | { |
46 | struct xt_string_info *conf = par->matchinfo; | 44 | struct xt_string_info *conf = par->matchinfo; |
47 | struct ts_config *ts_conf; | 45 | struct ts_config *ts_conf; |
@@ -49,26 +47,23 @@ static bool string_mt_check(const struct xt_mtchk_param *par) | |||
49 | 47 | ||
50 | /* Damn, can't handle this case properly with iptables... */ | 48 | /* Damn, can't handle this case properly with iptables... */ |
51 | if (conf->from_offset > conf->to_offset) | 49 | if (conf->from_offset > conf->to_offset) |
52 | return false; | 50 | return -EINVAL; |
53 | if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0') | 51 | if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0') |
54 | return false; | 52 | return -EINVAL; |
55 | if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) | 53 | if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) |
56 | return false; | 54 | return -EINVAL; |
57 | if (par->match->revision == 1) { | 55 | if (conf->u.v1.flags & |
58 | if (conf->u.v1.flags & | 56 | ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT)) |
59 | ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT)) | 57 | return -EINVAL; |
60 | return false; | 58 | if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE) |
61 | if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE) | 59 | flags |= TS_IGNORECASE; |
62 | flags |= TS_IGNORECASE; | ||
63 | } | ||
64 | ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, | 60 | ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, |
65 | GFP_KERNEL, flags); | 61 | GFP_KERNEL, flags); |
66 | if (IS_ERR(ts_conf)) | 62 | if (IS_ERR(ts_conf)) |
67 | return false; | 63 | return PTR_ERR(ts_conf); |
68 | 64 | ||
69 | conf->config = ts_conf; | 65 | conf->config = ts_conf; |
70 | 66 | return 0; | |
71 | return true; | ||
72 | } | 67 | } |
73 | 68 | ||
74 | static void string_mt_destroy(const struct xt_mtdtor_param *par) | 69 | static void string_mt_destroy(const struct xt_mtdtor_param *par) |
@@ -76,38 +71,25 @@ static void string_mt_destroy(const struct xt_mtdtor_param *par) | |||
76 | textsearch_destroy(STRING_TEXT_PRIV(par->matchinfo)->config); | 71 | textsearch_destroy(STRING_TEXT_PRIV(par->matchinfo)->config); |
77 | } | 72 | } |
78 | 73 | ||
79 | static struct xt_match xt_string_mt_reg[] __read_mostly = { | 74 | static struct xt_match xt_string_mt_reg __read_mostly = { |
80 | { | 75 | .name = "string", |
81 | .name = "string", | 76 | .revision = 1, |
82 | .revision = 0, | 77 | .family = NFPROTO_UNSPEC, |
83 | .family = NFPROTO_UNSPEC, | 78 | .checkentry = string_mt_check, |
84 | .checkentry = string_mt_check, | 79 | .match = string_mt, |
85 | .match = string_mt, | 80 | .destroy = string_mt_destroy, |
86 | .destroy = string_mt_destroy, | 81 | .matchsize = sizeof(struct xt_string_info), |
87 | .matchsize = sizeof(struct xt_string_info), | 82 | .me = THIS_MODULE, |
88 | .me = THIS_MODULE | ||
89 | }, | ||
90 | { | ||
91 | .name = "string", | ||
92 | .revision = 1, | ||
93 | .family = NFPROTO_UNSPEC, | ||
94 | .checkentry = string_mt_check, | ||
95 | .match = string_mt, | ||
96 | .destroy = string_mt_destroy, | ||
97 | .matchsize = sizeof(struct xt_string_info), | ||
98 | .me = THIS_MODULE | ||
99 | }, | ||
100 | }; | 83 | }; |
101 | 84 | ||
102 | static int __init string_mt_init(void) | 85 | static int __init string_mt_init(void) |
103 | { | 86 | { |
104 | return xt_register_matches(xt_string_mt_reg, | 87 | return xt_register_match(&xt_string_mt_reg); |
105 | ARRAY_SIZE(xt_string_mt_reg)); | ||
106 | } | 88 | } |
107 | 89 | ||
108 | static void __exit string_mt_exit(void) | 90 | static void __exit string_mt_exit(void) |
109 | { | 91 | { |
110 | xt_unregister_matches(xt_string_mt_reg, ARRAY_SIZE(xt_string_mt_reg)); | 92 | xt_unregister_match(&xt_string_mt_reg); |
111 | } | 93 | } |
112 | 94 | ||
113 | module_init(string_mt_init); | 95 | module_init(string_mt_init); |
diff --git a/net/netfilter/xt_tcpmss.c b/net/netfilter/xt_tcpmss.c index 4809b34b10f8..c53d4d18eadf 100644 --- a/net/netfilter/xt_tcpmss.c +++ b/net/netfilter/xt_tcpmss.c | |||
@@ -25,7 +25,7 @@ MODULE_ALIAS("ipt_tcpmss"); | |||
25 | MODULE_ALIAS("ip6t_tcpmss"); | 25 | MODULE_ALIAS("ip6t_tcpmss"); |
26 | 26 | ||
27 | static bool | 27 | static bool |
28 | tcpmss_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 28 | tcpmss_mt(const struct sk_buff *skb, struct xt_action_param *par) |
29 | { | 29 | { |
30 | const struct xt_tcpmss_match_info *info = par->matchinfo; | 30 | const struct xt_tcpmss_match_info *info = par->matchinfo; |
31 | const struct tcphdr *th; | 31 | const struct tcphdr *th; |
@@ -73,7 +73,7 @@ out: | |||
73 | return info->invert; | 73 | return info->invert; |
74 | 74 | ||
75 | dropit: | 75 | dropit: |
76 | *par->hotdrop = true; | 76 | par->hotdrop = true; |
77 | return false; | 77 | return false; |
78 | } | 78 | } |
79 | 79 | ||
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c index 1ebdc4934eed..c14d4645daa3 100644 --- a/net/netfilter/xt_tcpudp.c +++ b/net/netfilter/xt_tcpudp.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
1 | #include <linux/types.h> | 2 | #include <linux/types.h> |
2 | #include <linux/module.h> | 3 | #include <linux/module.h> |
3 | #include <net/ip.h> | 4 | #include <net/ip.h> |
@@ -19,13 +20,6 @@ MODULE_ALIAS("ipt_tcp"); | |||
19 | MODULE_ALIAS("ip6t_udp"); | 20 | MODULE_ALIAS("ip6t_udp"); |
20 | MODULE_ALIAS("ip6t_tcp"); | 21 | MODULE_ALIAS("ip6t_tcp"); |
21 | 22 | ||
22 | #ifdef DEBUG_IP_FIREWALL_USER | ||
23 | #define duprintf(format, args...) printk(format , ## args) | ||
24 | #else | ||
25 | #define duprintf(format, args...) | ||
26 | #endif | ||
27 | |||
28 | |||
29 | /* Returns 1 if the port is matched by the range, 0 otherwise */ | 23 | /* Returns 1 if the port is matched by the range, 0 otherwise */ |
30 | static inline bool | 24 | static inline bool |
31 | port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert) | 25 | port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert) |
@@ -46,7 +40,7 @@ tcp_find_option(u_int8_t option, | |||
46 | u_int8_t _opt[60 - sizeof(struct tcphdr)]; | 40 | u_int8_t _opt[60 - sizeof(struct tcphdr)]; |
47 | unsigned int i; | 41 | unsigned int i; |
48 | 42 | ||
49 | duprintf("tcp_match: finding option\n"); | 43 | pr_debug("finding option\n"); |
50 | 44 | ||
51 | if (!optlen) | 45 | if (!optlen) |
52 | return invert; | 46 | return invert; |
@@ -68,7 +62,7 @@ tcp_find_option(u_int8_t option, | |||
68 | return invert; | 62 | return invert; |
69 | } | 63 | } |
70 | 64 | ||
71 | static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 65 | static bool tcp_mt(const struct sk_buff *skb, struct xt_action_param *par) |
72 | { | 66 | { |
73 | const struct tcphdr *th; | 67 | const struct tcphdr *th; |
74 | struct tcphdr _tcph; | 68 | struct tcphdr _tcph; |
@@ -82,8 +76,8 @@ static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
82 | flag overwrite to pass the direction checks. | 76 | flag overwrite to pass the direction checks. |
83 | */ | 77 | */ |
84 | if (par->fragoff == 1) { | 78 | if (par->fragoff == 1) { |
85 | duprintf("Dropping evil TCP offset=1 frag.\n"); | 79 | pr_debug("Dropping evil TCP offset=1 frag.\n"); |
86 | *par->hotdrop = true; | 80 | par->hotdrop = true; |
87 | } | 81 | } |
88 | /* Must not be a fragment. */ | 82 | /* Must not be a fragment. */ |
89 | return false; | 83 | return false; |
@@ -95,8 +89,8 @@ static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
95 | if (th == NULL) { | 89 | if (th == NULL) { |
96 | /* We've been asked to examine this packet, and we | 90 | /* We've been asked to examine this packet, and we |
97 | can't. Hence, no choice but to drop. */ | 91 | can't. Hence, no choice but to drop. */ |
98 | duprintf("Dropping evil TCP offset=0 tinygram.\n"); | 92 | pr_debug("Dropping evil TCP offset=0 tinygram.\n"); |
99 | *par->hotdrop = true; | 93 | par->hotdrop = true; |
100 | return false; | 94 | return false; |
101 | } | 95 | } |
102 | 96 | ||
@@ -114,27 +108,27 @@ static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
114 | return false; | 108 | return false; |
115 | if (tcpinfo->option) { | 109 | if (tcpinfo->option) { |
116 | if (th->doff * 4 < sizeof(_tcph)) { | 110 | if (th->doff * 4 < sizeof(_tcph)) { |
117 | *par->hotdrop = true; | 111 | par->hotdrop = true; |
118 | return false; | 112 | return false; |
119 | } | 113 | } |
120 | if (!tcp_find_option(tcpinfo->option, skb, par->thoff, | 114 | if (!tcp_find_option(tcpinfo->option, skb, par->thoff, |
121 | th->doff*4 - sizeof(_tcph), | 115 | th->doff*4 - sizeof(_tcph), |
122 | tcpinfo->invflags & XT_TCP_INV_OPTION, | 116 | tcpinfo->invflags & XT_TCP_INV_OPTION, |
123 | par->hotdrop)) | 117 | &par->hotdrop)) |
124 | return false; | 118 | return false; |
125 | } | 119 | } |
126 | return true; | 120 | return true; |
127 | } | 121 | } |
128 | 122 | ||
129 | static bool tcp_mt_check(const struct xt_mtchk_param *par) | 123 | static int tcp_mt_check(const struct xt_mtchk_param *par) |
130 | { | 124 | { |
131 | const struct xt_tcp *tcpinfo = par->matchinfo; | 125 | const struct xt_tcp *tcpinfo = par->matchinfo; |
132 | 126 | ||
133 | /* Must specify no unknown invflags */ | 127 | /* Must specify no unknown invflags */ |
134 | return !(tcpinfo->invflags & ~XT_TCP_INV_MASK); | 128 | return (tcpinfo->invflags & ~XT_TCP_INV_MASK) ? -EINVAL : 0; |
135 | } | 129 | } |
136 | 130 | ||
137 | static bool udp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 131 | static bool udp_mt(const struct sk_buff *skb, struct xt_action_param *par) |
138 | { | 132 | { |
139 | const struct udphdr *uh; | 133 | const struct udphdr *uh; |
140 | struct udphdr _udph; | 134 | struct udphdr _udph; |
@@ -148,8 +142,8 @@ static bool udp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
148 | if (uh == NULL) { | 142 | if (uh == NULL) { |
149 | /* We've been asked to examine this packet, and we | 143 | /* We've been asked to examine this packet, and we |
150 | can't. Hence, no choice but to drop. */ | 144 | can't. Hence, no choice but to drop. */ |
151 | duprintf("Dropping evil UDP tinygram.\n"); | 145 | pr_debug("Dropping evil UDP tinygram.\n"); |
152 | *par->hotdrop = true; | 146 | par->hotdrop = true; |
153 | return false; | 147 | return false; |
154 | } | 148 | } |
155 | 149 | ||
@@ -161,12 +155,12 @@ static bool udp_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
161 | !!(udpinfo->invflags & XT_UDP_INV_DSTPT)); | 155 | !!(udpinfo->invflags & XT_UDP_INV_DSTPT)); |
162 | } | 156 | } |
163 | 157 | ||
164 | static bool udp_mt_check(const struct xt_mtchk_param *par) | 158 | static int udp_mt_check(const struct xt_mtchk_param *par) |
165 | { | 159 | { |
166 | const struct xt_udp *udpinfo = par->matchinfo; | 160 | const struct xt_udp *udpinfo = par->matchinfo; |
167 | 161 | ||
168 | /* Must specify no unknown invflags */ | 162 | /* Must specify no unknown invflags */ |
169 | return !(udpinfo->invflags & ~XT_UDP_INV_MASK); | 163 | return (udpinfo->invflags & ~XT_UDP_INV_MASK) ? -EINVAL : 0; |
170 | } | 164 | } |
171 | 165 | ||
172 | static struct xt_match tcpudp_mt_reg[] __read_mostly = { | 166 | static struct xt_match tcpudp_mt_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index 93acaa59d108..c48975ff8ea2 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * xt_time | 2 | * xt_time |
3 | * Copyright © CC Computer Consultants GmbH, 2007 | 3 | * Copyright © CC Computer Consultants GmbH, 2007 |
4 | * Contact: <jengelh@computergmbh.de> | ||
5 | * | 4 | * |
6 | * based on ipt_time by Fabrice MARIE <fabrice@netfilter.org> | 5 | * based on ipt_time by Fabrice MARIE <fabrice@netfilter.org> |
7 | * This is a module which is used for time matching | 6 | * This is a module which is used for time matching |
@@ -149,11 +148,10 @@ static void localtime_3(struct xtm *r, time_t time) | |||
149 | } | 148 | } |
150 | 149 | ||
151 | r->month = i + 1; | 150 | r->month = i + 1; |
152 | return; | ||
153 | } | 151 | } |
154 | 152 | ||
155 | static bool | 153 | static bool |
156 | time_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 154 | time_mt(const struct sk_buff *skb, struct xt_action_param *par) |
157 | { | 155 | { |
158 | const struct xt_time_info *info = par->matchinfo; | 156 | const struct xt_time_info *info = par->matchinfo; |
159 | unsigned int packet_time; | 157 | unsigned int packet_time; |
@@ -218,18 +216,18 @@ time_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
218 | return true; | 216 | return true; |
219 | } | 217 | } |
220 | 218 | ||
221 | static bool time_mt_check(const struct xt_mtchk_param *par) | 219 | static int time_mt_check(const struct xt_mtchk_param *par) |
222 | { | 220 | { |
223 | const struct xt_time_info *info = par->matchinfo; | 221 | const struct xt_time_info *info = par->matchinfo; |
224 | 222 | ||
225 | if (info->daytime_start > XT_TIME_MAX_DAYTIME || | 223 | if (info->daytime_start > XT_TIME_MAX_DAYTIME || |
226 | info->daytime_stop > XT_TIME_MAX_DAYTIME) { | 224 | info->daytime_stop > XT_TIME_MAX_DAYTIME) { |
227 | printk(KERN_WARNING "xt_time: invalid argument - start or " | 225 | pr_info("invalid argument - start or " |
228 | "stop time greater than 23:59:59\n"); | 226 | "stop time greater than 23:59:59\n"); |
229 | return false; | 227 | return -EDOM; |
230 | } | 228 | } |
231 | 229 | ||
232 | return true; | 230 | return 0; |
233 | } | 231 | } |
234 | 232 | ||
235 | static struct xt_match xt_time_mt_reg __read_mostly = { | 233 | static struct xt_match xt_time_mt_reg __read_mostly = { |
@@ -264,7 +262,7 @@ static void __exit time_mt_exit(void) | |||
264 | 262 | ||
265 | module_init(time_mt_init); | 263 | module_init(time_mt_init); |
266 | module_exit(time_mt_exit); | 264 | module_exit(time_mt_exit); |
267 | MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>"); | 265 | MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); |
268 | MODULE_DESCRIPTION("Xtables: time-based matching"); | 266 | MODULE_DESCRIPTION("Xtables: time-based matching"); |
269 | MODULE_LICENSE("GPL"); | 267 | MODULE_LICENSE("GPL"); |
270 | MODULE_ALIAS("ipt_time"); | 268 | MODULE_ALIAS("ipt_time"); |
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c index 24a527624500..a95b50342dbb 100644 --- a/net/netfilter/xt_u32.c +++ b/net/netfilter/xt_u32.c | |||
@@ -3,7 +3,6 @@ | |||
3 | * | 3 | * |
4 | * Original author: Don Cohen <don@isis.cs3-inc.com> | 4 | * Original author: Don Cohen <don@isis.cs3-inc.com> |
5 | * (C) CC Computer Consultants GmbH, 2007 | 5 | * (C) CC Computer Consultants GmbH, 2007 |
6 | * Contact: <jengelh@computergmbh.de> | ||
7 | */ | 6 | */ |
8 | 7 | ||
9 | #include <linux/module.h> | 8 | #include <linux/module.h> |
@@ -87,7 +86,7 @@ static bool u32_match_it(const struct xt_u32 *data, | |||
87 | return true; | 86 | return true; |
88 | } | 87 | } |
89 | 88 | ||
90 | static bool u32_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 89 | static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par) |
91 | { | 90 | { |
92 | const struct xt_u32 *data = par->matchinfo; | 91 | const struct xt_u32 *data = par->matchinfo; |
93 | bool ret; | 92 | bool ret; |
@@ -117,7 +116,7 @@ static void __exit u32_mt_exit(void) | |||
117 | 116 | ||
118 | module_init(u32_mt_init); | 117 | module_init(u32_mt_init); |
119 | module_exit(u32_mt_exit); | 118 | module_exit(u32_mt_exit); |
120 | MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>"); | 119 | MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); |
121 | MODULE_DESCRIPTION("Xtables: arbitrary byte matching"); | 120 | MODULE_DESCRIPTION("Xtables: arbitrary byte matching"); |
122 | MODULE_LICENSE("GPL"); | 121 | MODULE_LICENSE("GPL"); |
123 | MODULE_ALIAS("ipt_u32"); | 122 | MODULE_ALIAS("ipt_u32"); |
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h index 07ae7fd82be1..1c1c093cf279 100644 --- a/net/netlabel/netlabel_addrlist.h +++ b/net/netlabel/netlabel_addrlist.h | |||
@@ -130,7 +130,6 @@ static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, | |||
130 | int src, const char *dev, | 130 | int src, const char *dev, |
131 | __be32 addr, __be32 mask) | 131 | __be32 addr, __be32 mask) |
132 | { | 132 | { |
133 | return; | ||
134 | } | 133 | } |
135 | #endif | 134 | #endif |
136 | 135 | ||
@@ -203,7 +202,6 @@ static inline void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, | |||
203 | const struct in6_addr *addr, | 202 | const struct in6_addr *addr, |
204 | const struct in6_addr *mask) | 203 | const struct in6_addr *mask) |
205 | { | 204 | { |
206 | return; | ||
207 | } | 205 | } |
208 | #endif | 206 | #endif |
209 | #endif /* IPV6 */ | 207 | #endif /* IPV6 */ |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index a3d64aabe2f7..e2b0a680dd56 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -670,7 +670,6 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface) | |||
670 | 670 | ||
671 | unlhsh_condremove_failure: | 671 | unlhsh_condremove_failure: |
672 | spin_unlock(&netlbl_unlhsh_lock); | 672 | spin_unlock(&netlbl_unlhsh_lock); |
673 | return; | ||
674 | } | 673 | } |
675 | 674 | ||
676 | /** | 675 | /** |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 6464a1972a69..a2eb965207d3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -978,6 +978,8 @@ struct netlink_broadcast_data { | |||
978 | int delivered; | 978 | int delivered; |
979 | gfp_t allocation; | 979 | gfp_t allocation; |
980 | struct sk_buff *skb, *skb2; | 980 | struct sk_buff *skb, *skb2; |
981 | int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data); | ||
982 | void *tx_data; | ||
981 | }; | 983 | }; |
982 | 984 | ||
983 | static inline int do_one_broadcast(struct sock *sk, | 985 | static inline int do_one_broadcast(struct sock *sk, |
@@ -1020,6 +1022,9 @@ static inline int do_one_broadcast(struct sock *sk, | |||
1020 | p->failure = 1; | 1022 | p->failure = 1; |
1021 | if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR) | 1023 | if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR) |
1022 | p->delivery_failure = 1; | 1024 | p->delivery_failure = 1; |
1025 | } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) { | ||
1026 | kfree_skb(p->skb2); | ||
1027 | p->skb2 = NULL; | ||
1023 | } else if (sk_filter(sk, p->skb2)) { | 1028 | } else if (sk_filter(sk, p->skb2)) { |
1024 | kfree_skb(p->skb2); | 1029 | kfree_skb(p->skb2); |
1025 | p->skb2 = NULL; | 1030 | p->skb2 = NULL; |
@@ -1038,8 +1043,10 @@ out: | |||
1038 | return 0; | 1043 | return 0; |
1039 | } | 1044 | } |
1040 | 1045 | ||
1041 | int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, | 1046 | int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid, |
1042 | u32 group, gfp_t allocation) | 1047 | u32 group, gfp_t allocation, |
1048 | int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), | ||
1049 | void *filter_data) | ||
1043 | { | 1050 | { |
1044 | struct net *net = sock_net(ssk); | 1051 | struct net *net = sock_net(ssk); |
1045 | struct netlink_broadcast_data info; | 1052 | struct netlink_broadcast_data info; |
@@ -1059,6 +1066,8 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, | |||
1059 | info.allocation = allocation; | 1066 | info.allocation = allocation; |
1060 | info.skb = skb; | 1067 | info.skb = skb; |
1061 | info.skb2 = NULL; | 1068 | info.skb2 = NULL; |
1069 | info.tx_filter = filter; | ||
1070 | info.tx_data = filter_data; | ||
1062 | 1071 | ||
1063 | /* While we sleep in clone, do not allow to change socket list */ | 1072 | /* While we sleep in clone, do not allow to change socket list */ |
1064 | 1073 | ||
@@ -1083,6 +1092,14 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, | |||
1083 | } | 1092 | } |
1084 | return -ESRCH; | 1093 | return -ESRCH; |
1085 | } | 1094 | } |
1095 | EXPORT_SYMBOL(netlink_broadcast_filtered); | ||
1096 | |||
1097 | int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, | ||
1098 | u32 group, gfp_t allocation) | ||
1099 | { | ||
1100 | return netlink_broadcast_filtered(ssk, skb, pid, group, allocation, | ||
1101 | NULL, NULL); | ||
1102 | } | ||
1086 | EXPORT_SYMBOL(netlink_broadcast); | 1103 | EXPORT_SYMBOL(netlink_broadcast); |
1087 | 1104 | ||
1088 | struct netlink_set_err_data { | 1105 | struct netlink_set_err_data { |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index af4d38bc3b22..94d72e85a475 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -626,6 +626,7 @@ static void pep_sock_close(struct sock *sk, long timeout) | |||
626 | struct pep_sock *pn = pep_sk(sk); | 626 | struct pep_sock *pn = pep_sk(sk); |
627 | int ifindex = 0; | 627 | int ifindex = 0; |
628 | 628 | ||
629 | sock_hold(sk); /* keep a reference after sk_common_release() */ | ||
629 | sk_common_release(sk); | 630 | sk_common_release(sk); |
630 | 631 | ||
631 | lock_sock(sk); | 632 | lock_sock(sk); |
@@ -644,6 +645,7 @@ static void pep_sock_close(struct sock *sk, long timeout) | |||
644 | 645 | ||
645 | if (ifindex) | 646 | if (ifindex) |
646 | gprs_detach(sk); | 647 | gprs_detach(sk); |
648 | sock_put(sk); | ||
647 | } | 649 | } |
648 | 650 | ||
649 | static int pep_wait_connreq(struct sock *sk, int noblock) | 651 | static int pep_wait_connreq(struct sock *sk, int noblock) |
@@ -1043,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk) | |||
1043 | lock_sock(sk); | 1045 | lock_sock(sk); |
1044 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { | 1046 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { |
1045 | skparent = pn->listener; | 1047 | skparent = pn->listener; |
1046 | sk_del_node_init(sk); | ||
1047 | release_sock(sk); | 1048 | release_sock(sk); |
1048 | 1049 | ||
1049 | sk = skparent; | ||
1050 | pn = pep_sk(skparent); | 1050 | pn = pep_sk(skparent); |
1051 | lock_sock(sk); | 1051 | lock_sock(skparent); |
1052 | sk_del_node_init(sk); | ||
1053 | sk = skparent; | ||
1052 | } | 1054 | } |
1053 | /* Unhash a listening sock only when it is closed | 1055 | /* Unhash a listening sock only when it is closed |
1054 | * and all of its active connected pipes are closed. */ | 1056 | * and all of its active connected pipes are closed. */ |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 10ed0d55f759..f68832798db2 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
475 | err = rds_ib_setup_qp(conn); | 475 | err = rds_ib_setup_qp(conn); |
476 | if (err) { | 476 | if (err) { |
477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); | 477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); |
478 | mutex_unlock(&conn->c_cm_lock); | ||
478 | goto out; | 479 | goto out; |
479 | } | 480 | } |
480 | 481 | ||
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index a9d951b4fbae..b5dd6ac39be8 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
452 | err = rds_iw_setup_qp(conn); | 452 | err = rds_iw_setup_qp(conn); |
453 | if (err) { | 453 | if (err) { |
454 | rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); | 454 | rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); |
455 | mutex_unlock(&conn->c_cm_lock); | ||
455 | goto out; | 456 | goto out; |
456 | } | 457 | } |
457 | 458 | ||
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index 056256285987..c397524c039c 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c | |||
@@ -141,7 +141,7 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn) | |||
141 | 141 | ||
142 | release_sock(sock->sk); | 142 | release_sock(sock->sk); |
143 | sock_release(sock); | 143 | sock_release(sock); |
144 | }; | 144 | } |
145 | 145 | ||
146 | if (tc->t_tinc) { | 146 | if (tc->t_tinc) { |
147 | rds_inc_put(&tc->t_tinc->ti_inc); | 147 | rds_inc_put(&tc->t_tinc->ti_inc); |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 019045174fc3..972378f47f3c 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -153,7 +153,7 @@ int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, | |||
153 | } else if (type == RTM_GETACTION) { | 153 | } else if (type == RTM_GETACTION) { |
154 | return tcf_dump_walker(skb, cb, a, hinfo); | 154 | return tcf_dump_walker(skb, cb, a, hinfo); |
155 | } else { | 155 | } else { |
156 | printk("tcf_generic_walker: unknown action %d\n", type); | 156 | WARN(1, "tcf_generic_walker: unknown action %d\n", type); |
157 | return -EINVAL; | 157 | return -EINVAL; |
158 | } | 158 | } |
159 | } | 159 | } |
@@ -403,8 +403,9 @@ void tcf_action_destroy(struct tc_action *act, int bind) | |||
403 | module_put(a->ops->owner); | 403 | module_put(a->ops->owner); |
404 | act = act->next; | 404 | act = act->next; |
405 | kfree(a); | 405 | kfree(a); |
406 | } else { /*FIXME: Remove later - catch insertion bugs*/ | 406 | } else { |
407 | printk("tcf_action_destroy: BUG? destroying NULL ops\n"); | 407 | /*FIXME: Remove later - catch insertion bugs*/ |
408 | WARN(1, "tcf_action_destroy: BUG? destroying NULL ops\n"); | ||
408 | act = act->next; | 409 | act = act->next; |
409 | kfree(a); | 410 | kfree(a); |
410 | } | 411 | } |
@@ -744,7 +745,7 @@ static struct tc_action *create_a(int i) | |||
744 | 745 | ||
745 | act = kzalloc(sizeof(*act), GFP_KERNEL); | 746 | act = kzalloc(sizeof(*act), GFP_KERNEL); |
746 | if (act == NULL) { | 747 | if (act == NULL) { |
747 | printk("create_a: failed to alloc!\n"); | 748 | pr_debug("create_a: failed to alloc!\n"); |
748 | return NULL; | 749 | return NULL; |
749 | } | 750 | } |
750 | act->order = i; | 751 | act->order = i; |
@@ -766,13 +767,13 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, | |||
766 | int err = -ENOMEM; | 767 | int err = -ENOMEM; |
767 | 768 | ||
768 | if (a == NULL) { | 769 | if (a == NULL) { |
769 | printk("tca_action_flush: couldnt create tc_action\n"); | 770 | pr_debug("tca_action_flush: couldnt create tc_action\n"); |
770 | return err; | 771 | return err; |
771 | } | 772 | } |
772 | 773 | ||
773 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 774 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
774 | if (!skb) { | 775 | if (!skb) { |
775 | printk("tca_action_flush: failed skb alloc\n"); | 776 | pr_debug("tca_action_flush: failed skb alloc\n"); |
776 | kfree(a); | 777 | kfree(a); |
777 | return err; | 778 | return err; |
778 | } | 779 | } |
@@ -979,7 +980,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
979 | return ret; | 980 | return ret; |
980 | 981 | ||
981 | if (tca[TCA_ACT_TAB] == NULL) { | 982 | if (tca[TCA_ACT_TAB] == NULL) { |
982 | printk("tc_ctl_action: received NO action attribs\n"); | 983 | pr_notice("tc_ctl_action: received NO action attribs\n"); |
983 | return -EINVAL; | 984 | return -EINVAL; |
984 | } | 985 | } |
985 | 986 | ||
@@ -1056,7 +1057,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | |||
1056 | struct nlattr *kind = find_dump_kind(cb->nlh); | 1057 | struct nlattr *kind = find_dump_kind(cb->nlh); |
1057 | 1058 | ||
1058 | if (kind == NULL) { | 1059 | if (kind == NULL) { |
1059 | printk("tc_dump_action: action bad kind\n"); | 1060 | pr_info("tc_dump_action: action bad kind\n"); |
1060 | return 0; | 1061 | return 0; |
1061 | } | 1062 | } |
1062 | 1063 | ||
@@ -1069,7 +1070,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | |||
1069 | a.ops = a_o; | 1070 | a.ops = a_o; |
1070 | 1071 | ||
1071 | if (a_o->walk == NULL) { | 1072 | if (a_o->walk == NULL) { |
1072 | printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind); | 1073 | WARN(1, "tc_dump_action: %s !capable of dumping table\n", |
1074 | a_o->kind); | ||
1073 | goto nla_put_failure; | 1075 | goto nla_put_failure; |
1074 | } | 1076 | } |
1075 | 1077 | ||
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index e7f796aec657..8406c6654990 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -202,9 +202,9 @@ MODULE_LICENSE("GPL"); | |||
202 | static int __init gact_init_module(void) | 202 | static int __init gact_init_module(void) |
203 | { | 203 | { |
204 | #ifdef CONFIG_GACT_PROB | 204 | #ifdef CONFIG_GACT_PROB |
205 | printk("GACT probability on\n"); | 205 | printk(KERN_INFO "GACT probability on\n"); |
206 | #else | 206 | #else |
207 | printk("GACT probability NOT on\n"); | 207 | printk(KERN_INFO "GACT probability NOT on\n"); |
208 | #endif | 208 | #endif |
209 | return tcf_register_action(&act_gact_ops); | 209 | return tcf_register_action(&act_gact_ops); |
210 | } | 210 | } |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index da27a170b6b7..c7e59e6ec349 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -47,8 +47,8 @@ static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int | |||
47 | 47 | ||
48 | target = xt_request_find_target(AF_INET, t->u.user.name, | 48 | target = xt_request_find_target(AF_INET, t->u.user.name, |
49 | t->u.user.revision); | 49 | t->u.user.revision); |
50 | if (!target) | 50 | if (IS_ERR(target)) |
51 | return -ENOENT; | 51 | return PTR_ERR(target); |
52 | 52 | ||
53 | t->u.kernel.target = target; | 53 | t->u.kernel.target = target; |
54 | par.table = table; | 54 | par.table = table; |
@@ -199,7 +199,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, | |||
199 | { | 199 | { |
200 | int ret = 0, result = 0; | 200 | int ret = 0, result = 0; |
201 | struct tcf_ipt *ipt = a->priv; | 201 | struct tcf_ipt *ipt = a->priv; |
202 | struct xt_target_param par; | 202 | struct xt_action_param par; |
203 | 203 | ||
204 | if (skb_cloned(skb)) { | 204 | if (skb_cloned(skb)) { |
205 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | 205 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) |
@@ -235,7 +235,8 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, | |||
235 | break; | 235 | break; |
236 | default: | 236 | default: |
237 | if (net_ratelimit()) | 237 | if (net_ratelimit()) |
238 | printk("Bogus netfilter code %d assume ACCEPT\n", ret); | 238 | pr_notice("tc filter: Bogus netfilter code" |
239 | " %d assume ACCEPT\n", ret); | ||
239 | result = TC_POLICE_OK; | 240 | result = TC_POLICE_OK; |
240 | break; | 241 | break; |
241 | } | 242 | } |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index c046682054eb..c0b6863e3b87 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -164,8 +164,8 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, | |||
164 | dev = m->tcfm_dev; | 164 | dev = m->tcfm_dev; |
165 | if (!(dev->flags & IFF_UP)) { | 165 | if (!(dev->flags & IFF_UP)) { |
166 | if (net_ratelimit()) | 166 | if (net_ratelimit()) |
167 | printk("mirred to Houston: device %s is gone!\n", | 167 | pr_notice("tc mirred to Houston: device %s is gone!\n", |
168 | dev->name); | 168 | dev->name); |
169 | goto out; | 169 | goto out; |
170 | } | 170 | } |
171 | 171 | ||
@@ -252,7 +252,7 @@ MODULE_LICENSE("GPL"); | |||
252 | 252 | ||
253 | static int __init mirred_init_module(void) | 253 | static int __init mirred_init_module(void) |
254 | { | 254 | { |
255 | printk("Mirror/redirect action on\n"); | 255 | pr_info("Mirror/redirect action on\n"); |
256 | return tcf_register_action(&act_mirred_ops); | 256 | return tcf_register_action(&act_mirred_ops); |
257 | } | 257 | } |
258 | 258 | ||
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index d885ba311564..570949417f38 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
159 | iph->daddr = new_addr; | 159 | iph->daddr = new_addr; |
160 | 160 | ||
161 | csum_replace4(&iph->check, addr, new_addr); | 161 | csum_replace4(&iph->check, addr, new_addr); |
162 | } else if ((iph->frag_off & htons(IP_OFFSET)) || | ||
163 | iph->protocol != IPPROTO_ICMP) { | ||
164 | goto out; | ||
162 | } | 165 | } |
163 | 166 | ||
164 | ihl = iph->ihl * 4; | 167 | ihl = iph->ihl * 4; |
@@ -247,6 +250,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
247 | break; | 250 | break; |
248 | } | 251 | } |
249 | 252 | ||
253 | out: | ||
250 | return action; | 254 | return action; |
251 | 255 | ||
252 | drop: | 256 | drop: |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index b7dcfedc802e..50e3d945e1f4 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
125 | { | 125 | { |
126 | struct tcf_pedit *p = a->priv; | 126 | struct tcf_pedit *p = a->priv; |
127 | int i, munged = 0; | 127 | int i, munged = 0; |
128 | u8 *pptr; | 128 | unsigned int off; |
129 | 129 | ||
130 | if (!(skb->tc_verd & TC_OK2MUNGE)) { | 130 | if (!(skb->tc_verd & TC_OK2MUNGE)) { |
131 | /* should we set skb->cloned? */ | 131 | /* should we set skb->cloned? */ |
@@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
134 | } | 134 | } |
135 | } | 135 | } |
136 | 136 | ||
137 | pptr = skb_network_header(skb); | 137 | off = skb_network_offset(skb); |
138 | 138 | ||
139 | spin_lock(&p->tcf_lock); | 139 | spin_lock(&p->tcf_lock); |
140 | 140 | ||
@@ -144,41 +144,46 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
144 | struct tc_pedit_key *tkey = p->tcfp_keys; | 144 | struct tc_pedit_key *tkey = p->tcfp_keys; |
145 | 145 | ||
146 | for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { | 146 | for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { |
147 | u32 *ptr; | 147 | u32 *ptr, _data; |
148 | int offset = tkey->off; | 148 | int offset = tkey->off; |
149 | 149 | ||
150 | if (tkey->offmask) { | 150 | if (tkey->offmask) { |
151 | if (skb->len > tkey->at) { | 151 | char *d, _d; |
152 | char *j = pptr + tkey->at; | 152 | |
153 | offset += ((*j & tkey->offmask) >> | 153 | d = skb_header_pointer(skb, off + tkey->at, 1, |
154 | tkey->shift); | 154 | &_d); |
155 | } else { | 155 | if (!d) |
156 | goto bad; | 156 | goto bad; |
157 | } | 157 | offset += (*d & tkey->offmask) >> tkey->shift; |
158 | } | 158 | } |
159 | 159 | ||
160 | if (offset % 4) { | 160 | if (offset % 4) { |
161 | printk("offset must be on 32 bit boundaries\n"); | 161 | pr_info("tc filter pedit" |
162 | " offset must be on 32 bit boundaries\n"); | ||
162 | goto bad; | 163 | goto bad; |
163 | } | 164 | } |
164 | if (offset > 0 && offset > skb->len) { | 165 | if (offset > 0 && offset > skb->len) { |
165 | printk("offset %d cant exceed pkt length %d\n", | 166 | pr_info("tc filter pedit" |
167 | " offset %d cant exceed pkt length %d\n", | ||
166 | offset, skb->len); | 168 | offset, skb->len); |
167 | goto bad; | 169 | goto bad; |
168 | } | 170 | } |
169 | 171 | ||
170 | ptr = (u32 *)(pptr+offset); | 172 | ptr = skb_header_pointer(skb, off + offset, 4, &_data); |
173 | if (!ptr) | ||
174 | goto bad; | ||
171 | /* just do it, baby */ | 175 | /* just do it, baby */ |
172 | *ptr = ((*ptr & tkey->mask) ^ tkey->val); | 176 | *ptr = ((*ptr & tkey->mask) ^ tkey->val); |
177 | if (ptr == &_data) | ||
178 | skb_store_bits(skb, off + offset, ptr, 4); | ||
173 | munged++; | 179 | munged++; |
174 | } | 180 | } |
175 | 181 | ||
176 | if (munged) | 182 | if (munged) |
177 | skb->tc_verd = SET_TC_MUNGED(skb->tc_verd); | 183 | skb->tc_verd = SET_TC_MUNGED(skb->tc_verd); |
178 | goto done; | 184 | goto done; |
179 | } else { | 185 | } else |
180 | printk("pedit BUG: index %d\n", p->tcf_index); | 186 | WARN(1, "pedit BUG: index %d\n", p->tcf_index); |
181 | } | ||
182 | 187 | ||
183 | bad: | 188 | bad: |
184 | p->tcf_qstats.overlimits++; | 189 | p->tcf_qstats.overlimits++; |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 622ca809c15c..1b4bc691d7d1 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -49,7 +49,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
49 | * Example if this was the 3rd packet and the string was "hello" | 49 | * Example if this was the 3rd packet and the string was "hello" |
50 | * then it would look like "hello_3" (without quotes) | 50 | * then it would look like "hello_3" (without quotes) |
51 | **/ | 51 | **/ |
52 | printk("simple: %s_%d\n", | 52 | pr_info("simple: %s_%d\n", |
53 | (char *)d->tcfd_defdata, d->tcf_bstats.packets); | 53 | (char *)d->tcfd_defdata, d->tcf_bstats.packets); |
54 | spin_unlock(&d->tcf_lock); | 54 | spin_unlock(&d->tcf_lock); |
55 | return d->tcf_action; | 55 | return d->tcf_action; |
@@ -205,7 +205,7 @@ static int __init simp_init_module(void) | |||
205 | { | 205 | { |
206 | int ret = tcf_register_action(&act_simp_ops); | 206 | int ret = tcf_register_action(&act_simp_ops); |
207 | if (!ret) | 207 | if (!ret) |
208 | printk("Simple TC action Loaded\n"); | 208 | pr_info("Simple TC action Loaded\n"); |
209 | return ret; | 209 | return ret; |
210 | } | 210 | } |
211 | 211 | ||
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 221180384fd7..78ef2c5e130b 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -16,14 +16,11 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
18 | #include <linux/cgroup.h> | 18 | #include <linux/cgroup.h> |
19 | #include <linux/rcupdate.h> | ||
19 | #include <net/rtnetlink.h> | 20 | #include <net/rtnetlink.h> |
20 | #include <net/pkt_cls.h> | 21 | #include <net/pkt_cls.h> |
21 | 22 | #include <net/sock.h> | |
22 | struct cgroup_cls_state | 23 | #include <net/cls_cgroup.h> |
23 | { | ||
24 | struct cgroup_subsys_state css; | ||
25 | u32 classid; | ||
26 | }; | ||
27 | 24 | ||
28 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | 25 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, |
29 | struct cgroup *cgrp); | 26 | struct cgroup *cgrp); |
@@ -112,6 +109,10 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
112 | struct cls_cgroup_head *head = tp->root; | 109 | struct cls_cgroup_head *head = tp->root; |
113 | u32 classid; | 110 | u32 classid; |
114 | 111 | ||
112 | rcu_read_lock(); | ||
113 | classid = task_cls_state(current)->classid; | ||
114 | rcu_read_unlock(); | ||
115 | |||
115 | /* | 116 | /* |
116 | * Due to the nature of the classifier it is required to ignore all | 117 | * Due to the nature of the classifier it is required to ignore all |
117 | * packets originating from softirq context as accessing `current' | 118 | * packets originating from softirq context as accessing `current' |
@@ -122,12 +123,12 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
122 | * calls by looking at the number of nested bh disable calls because | 123 | * calls by looking at the number of nested bh disable calls because |
123 | * softirqs always disables bh. | 124 | * softirqs always disables bh. |
124 | */ | 125 | */ |
125 | if (softirq_count() != SOFTIRQ_OFFSET) | 126 | if (softirq_count() != SOFTIRQ_OFFSET) { |
126 | return -1; | 127 | /* If there is an sk_classid we'll use that. */ |
127 | 128 | if (!skb->sk) | |
128 | rcu_read_lock(); | 129 | return -1; |
129 | classid = task_cls_state(current)->classid; | 130 | classid = skb->sk->sk_classid; |
130 | rcu_read_unlock(); | 131 | } |
131 | 132 | ||
132 | if (!classid) | 133 | if (!classid) |
133 | return -1; | 134 | return -1; |
@@ -289,18 +290,35 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { | |||
289 | 290 | ||
290 | static int __init init_cgroup_cls(void) | 291 | static int __init init_cgroup_cls(void) |
291 | { | 292 | { |
292 | int ret = register_tcf_proto_ops(&cls_cgroup_ops); | 293 | int ret; |
293 | if (ret) | 294 | |
294 | return ret; | ||
295 | ret = cgroup_load_subsys(&net_cls_subsys); | 295 | ret = cgroup_load_subsys(&net_cls_subsys); |
296 | if (ret) | 296 | if (ret) |
297 | unregister_tcf_proto_ops(&cls_cgroup_ops); | 297 | goto out; |
298 | |||
299 | #ifndef CONFIG_NET_CLS_CGROUP | ||
300 | /* We can't use rcu_assign_pointer because this is an int. */ | ||
301 | smp_wmb(); | ||
302 | net_cls_subsys_id = net_cls_subsys.subsys_id; | ||
303 | #endif | ||
304 | |||
305 | ret = register_tcf_proto_ops(&cls_cgroup_ops); | ||
306 | if (ret) | ||
307 | cgroup_unload_subsys(&net_cls_subsys); | ||
308 | |||
309 | out: | ||
298 | return ret; | 310 | return ret; |
299 | } | 311 | } |
300 | 312 | ||
301 | static void __exit exit_cgroup_cls(void) | 313 | static void __exit exit_cgroup_cls(void) |
302 | { | 314 | { |
303 | unregister_tcf_proto_ops(&cls_cgroup_ops); | 315 | unregister_tcf_proto_ops(&cls_cgroup_ops); |
316 | |||
317 | #ifndef CONFIG_NET_CLS_CGROUP | ||
318 | net_cls_subsys_id = -1; | ||
319 | synchronize_rcu(); | ||
320 | #endif | ||
321 | |||
304 | cgroup_unload_subsys(&net_cls_subsys); | 322 | cgroup_unload_subsys(&net_cls_subsys); |
305 | } | 323 | } |
306 | 324 | ||
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 6ed61b10e002..f73542d2cdd0 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c | |||
@@ -602,7 +602,6 @@ static unsigned long flow_get(struct tcf_proto *tp, u32 handle) | |||
602 | 602 | ||
603 | static void flow_put(struct tcf_proto *tp, unsigned long f) | 603 | static void flow_put(struct tcf_proto *tp, unsigned long f) |
604 | { | 604 | { |
605 | return; | ||
606 | } | 605 | } |
607 | 606 | ||
608 | static int flow_dump(struct tcf_proto *tp, unsigned long fh, | 607 | static int flow_dump(struct tcf_proto *tp, unsigned long fh, |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 593eac056e8d..4f522143811e 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re | |||
98 | { | 98 | { |
99 | struct { | 99 | struct { |
100 | struct tc_u_knode *knode; | 100 | struct tc_u_knode *knode; |
101 | u8 *ptr; | 101 | unsigned int off; |
102 | } stack[TC_U32_MAXDEPTH]; | 102 | } stack[TC_U32_MAXDEPTH]; |
103 | 103 | ||
104 | struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; | 104 | struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; |
105 | u8 *ptr = skb_network_header(skb); | 105 | unsigned int off = skb_network_offset(skb); |
106 | struct tc_u_knode *n; | 106 | struct tc_u_knode *n; |
107 | int sdepth = 0; | 107 | int sdepth = 0; |
108 | int off2 = 0; | 108 | int off2 = 0; |
@@ -134,8 +134,14 @@ next_knode: | |||
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | for (i = n->sel.nkeys; i>0; i--, key++) { | 136 | for (i = n->sel.nkeys; i>0; i--, key++) { |
137 | 137 | unsigned int toff; | |
138 | if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { | 138 | __be32 *data, _data; |
139 | |||
140 | toff = off + key->off + (off2 & key->offmask); | ||
141 | data = skb_header_pointer(skb, toff, 4, &_data); | ||
142 | if (!data) | ||
143 | goto out; | ||
144 | if ((*data ^ key->val) & key->mask) { | ||
139 | n = n->next; | 145 | n = n->next; |
140 | goto next_knode; | 146 | goto next_knode; |
141 | } | 147 | } |
@@ -174,29 +180,45 @@ check_terminal: | |||
174 | if (sdepth >= TC_U32_MAXDEPTH) | 180 | if (sdepth >= TC_U32_MAXDEPTH) |
175 | goto deadloop; | 181 | goto deadloop; |
176 | stack[sdepth].knode = n; | 182 | stack[sdepth].knode = n; |
177 | stack[sdepth].ptr = ptr; | 183 | stack[sdepth].off = off; |
178 | sdepth++; | 184 | sdepth++; |
179 | 185 | ||
180 | ht = n->ht_down; | 186 | ht = n->ht_down; |
181 | sel = 0; | 187 | sel = 0; |
182 | if (ht->divisor) | 188 | if (ht->divisor) { |
183 | sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); | 189 | __be32 *data, _data; |
184 | 190 | ||
191 | data = skb_header_pointer(skb, off + n->sel.hoff, 4, | ||
192 | &_data); | ||
193 | if (!data) | ||
194 | goto out; | ||
195 | sel = ht->divisor & u32_hash_fold(*data, &n->sel, | ||
196 | n->fshift); | ||
197 | } | ||
185 | if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) | 198 | if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) |
186 | goto next_ht; | 199 | goto next_ht; |
187 | 200 | ||
188 | if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { | 201 | if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { |
189 | off2 = n->sel.off + 3; | 202 | off2 = n->sel.off + 3; |
190 | if (n->sel.flags&TC_U32_VAROFFSET) | 203 | if (n->sel.flags & TC_U32_VAROFFSET) { |
191 | off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; | 204 | __be16 *data, _data; |
205 | |||
206 | data = skb_header_pointer(skb, | ||
207 | off + n->sel.offoff, | ||
208 | 2, &_data); | ||
209 | if (!data) | ||
210 | goto out; | ||
211 | off2 += ntohs(n->sel.offmask & *data) >> | ||
212 | n->sel.offshift; | ||
213 | } | ||
192 | off2 &= ~3; | 214 | off2 &= ~3; |
193 | } | 215 | } |
194 | if (n->sel.flags&TC_U32_EAT) { | 216 | if (n->sel.flags&TC_U32_EAT) { |
195 | ptr += off2; | 217 | off += off2; |
196 | off2 = 0; | 218 | off2 = 0; |
197 | } | 219 | } |
198 | 220 | ||
199 | if (ptr < skb_tail_pointer(skb)) | 221 | if (off < skb->len) |
200 | goto next_ht; | 222 | goto next_ht; |
201 | } | 223 | } |
202 | 224 | ||
@@ -204,14 +226,15 @@ check_terminal: | |||
204 | if (sdepth--) { | 226 | if (sdepth--) { |
205 | n = stack[sdepth].knode; | 227 | n = stack[sdepth].knode; |
206 | ht = n->ht_up; | 228 | ht = n->ht_up; |
207 | ptr = stack[sdepth].ptr; | 229 | off = stack[sdepth].off; |
208 | goto check_terminal; | 230 | goto check_terminal; |
209 | } | 231 | } |
232 | out: | ||
210 | return -1; | 233 | return -1; |
211 | 234 | ||
212 | deadloop: | 235 | deadloop: |
213 | if (net_ratelimit()) | 236 | if (net_ratelimit()) |
214 | printk("cls_u32: dead loop\n"); | 237 | printk(KERN_WARNING "cls_u32: dead loop\n"); |
215 | return -1; | 238 | return -1; |
216 | } | 239 | } |
217 | 240 | ||
@@ -768,15 +791,15 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = { | |||
768 | 791 | ||
769 | static int __init init_u32(void) | 792 | static int __init init_u32(void) |
770 | { | 793 | { |
771 | printk("u32 classifier\n"); | 794 | pr_info("u32 classifier\n"); |
772 | #ifdef CONFIG_CLS_U32_PERF | 795 | #ifdef CONFIG_CLS_U32_PERF |
773 | printk(" Performance counters on\n"); | 796 | pr_info(" Performance counters on\n"); |
774 | #endif | 797 | #endif |
775 | #ifdef CONFIG_NET_CLS_IND | 798 | #ifdef CONFIG_NET_CLS_IND |
776 | printk(" input device check on\n"); | 799 | pr_info(" input device check on\n"); |
777 | #endif | 800 | #endif |
778 | #ifdef CONFIG_NET_CLS_ACT | 801 | #ifdef CONFIG_NET_CLS_ACT |
779 | printk(" Actions configured\n"); | 802 | pr_info(" Actions configured\n"); |
780 | #endif | 803 | #endif |
781 | return register_tcf_proto_ops(&cls_u32_ops); | 804 | return register_tcf_proto_ops(&cls_u32_ops); |
782 | } | 805 | } |
diff --git a/net/sched/ematch.c b/net/sched/ematch.c index e782bdeedc58..5e37da961f80 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c | |||
@@ -527,7 +527,8 @@ pop_stack: | |||
527 | 527 | ||
528 | stack_overflow: | 528 | stack_overflow: |
529 | if (net_ratelimit()) | 529 | if (net_ratelimit()) |
530 | printk("Local stack overflow, increase NET_EMATCH_STACK\n"); | 530 | printk(KERN_WARNING "tc ematch: local stack overflow," |
531 | " increase NET_EMATCH_STACK\n"); | ||
531 | return -1; | 532 | return -1; |
532 | } | 533 | } |
533 | EXPORT_SYMBOL(__tcf_em_tree_match); | 534 | EXPORT_SYMBOL(__tcf_em_tree_match); |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 9839b26674f4..b9e8c3b7d406 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -1195,6 +1195,11 @@ nla_put_failure: | |||
1195 | return -1; | 1195 | return -1; |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | static bool tc_qdisc_dump_ignore(struct Qdisc *q) | ||
1199 | { | ||
1200 | return (q->flags & TCQ_F_BUILTIN) ? true : false; | ||
1201 | } | ||
1202 | |||
1198 | static int qdisc_notify(struct net *net, struct sk_buff *oskb, | 1203 | static int qdisc_notify(struct net *net, struct sk_buff *oskb, |
1199 | struct nlmsghdr *n, u32 clid, | 1204 | struct nlmsghdr *n, u32 clid, |
1200 | struct Qdisc *old, struct Qdisc *new) | 1205 | struct Qdisc *old, struct Qdisc *new) |
@@ -1206,11 +1211,11 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb, | |||
1206 | if (!skb) | 1211 | if (!skb) |
1207 | return -ENOBUFS; | 1212 | return -ENOBUFS; |
1208 | 1213 | ||
1209 | if (old && old->handle) { | 1214 | if (old && !tc_qdisc_dump_ignore(old)) { |
1210 | if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) | 1215 | if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) |
1211 | goto err_out; | 1216 | goto err_out; |
1212 | } | 1217 | } |
1213 | if (new) { | 1218 | if (new && !tc_qdisc_dump_ignore(new)) { |
1214 | if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) | 1219 | if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) |
1215 | goto err_out; | 1220 | goto err_out; |
1216 | } | 1221 | } |
@@ -1223,11 +1228,6 @@ err_out: | |||
1223 | return -EINVAL; | 1228 | return -EINVAL; |
1224 | } | 1229 | } |
1225 | 1230 | ||
1226 | static bool tc_qdisc_dump_ignore(struct Qdisc *q) | ||
1227 | { | ||
1228 | return (q->flags & TCQ_F_BUILTIN) ? true : false; | ||
1229 | } | ||
1230 | |||
1231 | static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, | 1231 | static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, |
1232 | struct netlink_callback *cb, | 1232 | struct netlink_callback *cb, |
1233 | int *q_idx_p, int s_q_idx) | 1233 | int *q_idx_p, int s_q_idx) |
@@ -1637,9 +1637,12 @@ reclassify: | |||
1637 | tp = otp; | 1637 | tp = otp; |
1638 | 1638 | ||
1639 | if (verd++ >= MAX_REC_LOOP) { | 1639 | if (verd++ >= MAX_REC_LOOP) { |
1640 | printk("rule prio %u protocol %02x reclassify loop, " | 1640 | if (net_ratelimit()) |
1641 | "packet dropped\n", | 1641 | printk(KERN_NOTICE |
1642 | tp->prio&0xffff, ntohs(tp->protocol)); | 1642 | "%s: packet reclassify loop" |
1643 | " rule prio %u protocol %02x\n", | ||
1644 | tp->q->ops->id, | ||
1645 | tp->prio & 0xffff, ntohs(tp->protocol)); | ||
1643 | return TC_ACT_SHOT; | 1646 | return TC_ACT_SHOT; |
1644 | } | 1647 | } |
1645 | skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); | 1648 | skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a969b111bd76..a63029ef3edd 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/list.h> | 26 | #include <linux/list.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <net/pkt_sched.h> | 28 | #include <net/pkt_sched.h> |
29 | #include <net/dst.h> | ||
29 | 30 | ||
30 | /* Main transmission queue. */ | 31 | /* Main transmission queue. */ |
31 | 32 | ||
@@ -40,6 +41,7 @@ | |||
40 | 41 | ||
41 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | 42 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
42 | { | 43 | { |
44 | skb_dst_force(skb); | ||
43 | q->gso_skb = skb; | 45 | q->gso_skb = skb; |
44 | q->qstats.requeues++; | 46 | q->qstats.requeues++; |
45 | q->q.qlen++; /* it's still part of the queue */ | 47 | q->q.qlen++; /* it's still part of the queue */ |
@@ -179,7 +181,7 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
179 | skb = dequeue_skb(q); | 181 | skb = dequeue_skb(q); |
180 | if (unlikely(!skb)) | 182 | if (unlikely(!skb)) |
181 | return 0; | 183 | return 0; |
182 | 184 | WARN_ON_ONCE(skb_dst_is_noref(skb)); | |
183 | root_lock = qdisc_lock(q); | 185 | root_lock = qdisc_lock(q); |
184 | dev = qdisc_dev(q); | 186 | dev = qdisc_dev(q); |
185 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 187 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index b38b39c60752..abd904be4287 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -617,7 +617,6 @@ rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) | |||
617 | rtsc->y = y; | 617 | rtsc->y = y; |
618 | rtsc->dx = dx; | 618 | rtsc->dx = dx; |
619 | rtsc->dy = dy; | 619 | rtsc->dy = dy; |
620 | return; | ||
621 | } | 620 | } |
622 | 621 | ||
623 | static void | 622 | static void |
@@ -1155,7 +1154,7 @@ static struct hfsc_class * | |||
1155 | hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | 1154 | hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) |
1156 | { | 1155 | { |
1157 | struct hfsc_sched *q = qdisc_priv(sch); | 1156 | struct hfsc_sched *q = qdisc_priv(sch); |
1158 | struct hfsc_class *cl; | 1157 | struct hfsc_class *head, *cl; |
1159 | struct tcf_result res; | 1158 | struct tcf_result res; |
1160 | struct tcf_proto *tcf; | 1159 | struct tcf_proto *tcf; |
1161 | int result; | 1160 | int result; |
@@ -1166,6 +1165,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
1166 | return cl; | 1165 | return cl; |
1167 | 1166 | ||
1168 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 1167 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
1168 | head = &q->root; | ||
1169 | tcf = q->root.filter_list; | 1169 | tcf = q->root.filter_list; |
1170 | while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { | 1170 | while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { |
1171 | #ifdef CONFIG_NET_CLS_ACT | 1171 | #ifdef CONFIG_NET_CLS_ACT |
@@ -1180,6 +1180,8 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
1180 | if ((cl = (struct hfsc_class *)res.class) == NULL) { | 1180 | if ((cl = (struct hfsc_class *)res.class) == NULL) { |
1181 | if ((cl = hfsc_find_class(res.classid, sch)) == NULL) | 1181 | if ((cl = hfsc_find_class(res.classid, sch)) == NULL) |
1182 | break; /* filter selected invalid classid */ | 1182 | break; /* filter selected invalid classid */ |
1183 | if (cl->level >= head->level) | ||
1184 | break; /* filter may only point downwards */ | ||
1183 | } | 1185 | } |
1184 | 1186 | ||
1185 | if (cl->level == 0) | 1187 | if (cl->level == 0) |
@@ -1187,6 +1189,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
1187 | 1189 | ||
1188 | /* apply inner filter chain */ | 1190 | /* apply inner filter chain */ |
1189 | tcf = cl->filter_list; | 1191 | tcf = cl->filter_list; |
1192 | head = cl; | ||
1190 | } | 1193 | } |
1191 | 1194 | ||
1192 | /* classification failed, try default class */ | 1195 | /* classification failed, try default class */ |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index a9e646bdb605..f10e34a68445 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -44,7 +44,6 @@ static void ingress_put(struct Qdisc *sch, unsigned long cl) | |||
44 | 44 | ||
45 | static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) | 45 | static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
46 | { | 46 | { |
47 | return; | ||
48 | } | 47 | } |
49 | 48 | ||
50 | static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl) | 49 | static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl) |
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index b2aba3f5e6fa..fe91e50f9d98 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c | |||
@@ -174,7 +174,6 @@ static unsigned long mq_get(struct Qdisc *sch, u32 classid) | |||
174 | 174 | ||
175 | static void mq_put(struct Qdisc *sch, unsigned long cl) | 175 | static void mq_put(struct Qdisc *sch, unsigned long cl) |
176 | { | 176 | { |
177 | return; | ||
178 | } | 177 | } |
179 | 178 | ||
180 | static int mq_dump_class(struct Qdisc *sch, unsigned long cl, | 179 | static int mq_dump_class(struct Qdisc *sch, unsigned long cl, |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index c50876cd8704..6ae251279fc2 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -340,7 +340,6 @@ static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, | |||
340 | 340 | ||
341 | static void multiq_put(struct Qdisc *q, unsigned long cl) | 341 | static void multiq_put(struct Qdisc *q, unsigned long cl) |
342 | { | 342 | { |
343 | return; | ||
344 | } | 343 | } |
345 | 344 | ||
346 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, | 345 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 81672e0c1b25..0748fb1e3a49 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -303,7 +303,6 @@ static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 clas | |||
303 | 303 | ||
304 | static void prio_put(struct Qdisc *q, unsigned long cl) | 304 | static void prio_put(struct Qdisc *q, unsigned long cl) |
305 | { | 305 | { |
306 | return; | ||
307 | } | 306 | } |
308 | 307 | ||
309 | static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, | 308 | static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 072cdf442f8e..8d42bb3ba540 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -303,7 +303,6 @@ static unsigned long red_get(struct Qdisc *sch, u32 classid) | |||
303 | 303 | ||
304 | static void red_put(struct Qdisc *sch, unsigned long arg) | 304 | static void red_put(struct Qdisc *sch, unsigned long arg) |
305 | { | 305 | { |
306 | return; | ||
307 | } | 306 | } |
308 | 307 | ||
309 | static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) | 308 | static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 8fb8107ab188..0991c640cd3e 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -273,7 +273,11 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt) | |||
273 | if (max_size < 0) | 273 | if (max_size < 0) |
274 | goto done; | 274 | goto done; |
275 | 275 | ||
276 | if (qopt->limit > 0) { | 276 | if (q->qdisc != &noop_qdisc) { |
277 | err = fifo_set_limit(q->qdisc, qopt->limit); | ||
278 | if (err) | ||
279 | goto done; | ||
280 | } else if (qopt->limit > 0) { | ||
277 | child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); | 281 | child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); |
278 | if (IS_ERR(child)) { | 282 | if (IS_ERR(child)) { |
279 | err = PTR_ERR(child); | 283 | err = PTR_ERR(child); |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 3912420cedcc..e41feff19e43 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -816,8 +816,6 @@ void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, | |||
816 | if (t != primary) | 816 | if (t != primary) |
817 | sctp_assoc_rm_peer(asoc, t); | 817 | sctp_assoc_rm_peer(asoc, t); |
818 | } | 818 | } |
819 | |||
820 | return; | ||
821 | } | 819 | } |
822 | 820 | ||
823 | /* Engage in transport control operations. | 821 | /* Engage in transport control operations. |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 2a570184e5a9..ea2192444ce6 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -440,11 +440,25 @@ void sctp_icmp_proto_unreachable(struct sock *sk, | |||
440 | { | 440 | { |
441 | SCTP_DEBUG_PRINTK("%s\n", __func__); | 441 | SCTP_DEBUG_PRINTK("%s\n", __func__); |
442 | 442 | ||
443 | sctp_do_sm(SCTP_EVENT_T_OTHER, | 443 | if (sock_owned_by_user(sk)) { |
444 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | 444 | if (timer_pending(&t->proto_unreach_timer)) |
445 | asoc->state, asoc->ep, asoc, t, | 445 | return; |
446 | GFP_ATOMIC); | 446 | else { |
447 | if (!mod_timer(&t->proto_unreach_timer, | ||
448 | jiffies + (HZ/20))) | ||
449 | sctp_association_hold(asoc); | ||
450 | } | ||
451 | |||
452 | } else { | ||
453 | if (timer_pending(&t->proto_unreach_timer) && | ||
454 | del_timer(&t->proto_unreach_timer)) | ||
455 | sctp_association_put(asoc); | ||
447 | 456 | ||
457 | sctp_do_sm(SCTP_EVENT_T_OTHER, | ||
458 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | ||
459 | asoc->state, asoc->ep, asoc, t, | ||
460 | GFP_ATOMIC); | ||
461 | } | ||
448 | } | 462 | } |
449 | 463 | ||
450 | /* Common lookup code for icmp/icmpv6 error handler. */ | 464 | /* Common lookup code for icmp/icmpv6 error handler. */ |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 5d057178ce0c..c04b2eb59186 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -80,7 +80,6 @@ static inline void sctp_outq_head_data(struct sctp_outq *q, | |||
80 | { | 80 | { |
81 | list_add(&ch->list, &q->out_chunk_list); | 81 | list_add(&ch->list, &q->out_chunk_list); |
82 | q->out_qlen += ch->skb->len; | 82 | q->out_qlen += ch->skb->len; |
83 | return; | ||
84 | } | 83 | } |
85 | 84 | ||
86 | /* Take data from the front of the queue. */ | 85 | /* Take data from the front of the queue. */ |
@@ -103,7 +102,6 @@ static inline void sctp_outq_tail_data(struct sctp_outq *q, | |||
103 | { | 102 | { |
104 | list_add_tail(&ch->list, &q->out_chunk_list); | 103 | list_add_tail(&ch->list, &q->out_chunk_list); |
105 | q->out_qlen += ch->skb->len; | 104 | q->out_qlen += ch->skb->len; |
106 | return; | ||
107 | } | 105 | } |
108 | 106 | ||
109 | /* | 107 | /* |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 784bcc9a979d..61aacfbbaa92 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -181,7 +181,6 @@ static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) | |||
181 | 181 | ||
182 | static void sctp_eps_seq_stop(struct seq_file *seq, void *v) | 182 | static void sctp_eps_seq_stop(struct seq_file *seq, void *v) |
183 | { | 183 | { |
184 | return; | ||
185 | } | 184 | } |
186 | 185 | ||
187 | 186 | ||
@@ -286,7 +285,6 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) | |||
286 | 285 | ||
287 | static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) | 286 | static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) |
288 | { | 287 | { |
289 | return; | ||
290 | } | 288 | } |
291 | 289 | ||
292 | 290 | ||
@@ -409,7 +407,6 @@ static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
409 | 407 | ||
410 | static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) | 408 | static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) |
411 | { | 409 | { |
412 | return; | ||
413 | } | 410 | } |
414 | 411 | ||
415 | static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | 412 | static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index d8261f3d7715..bd2a50b482ac 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -141,7 +141,7 @@ int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, | |||
141 | len = sizeof(sctp_errhdr_t) + paylen; | 141 | len = sizeof(sctp_errhdr_t) + paylen; |
142 | err.length = htons(len); | 142 | err.length = htons(len); |
143 | 143 | ||
144 | if (skb_tailroom(chunk->skb) > len) | 144 | if (skb_tailroom(chunk->skb) < len) |
145 | return -ENOSPC; | 145 | return -ENOSPC; |
146 | chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, | 146 | chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, |
147 | sizeof(sctp_errhdr_t), | 147 | sizeof(sctp_errhdr_t), |
@@ -1415,7 +1415,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | |||
1415 | void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, | 1415 | void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, |
1416 | int len, const void *data) | 1416 | int len, const void *data) |
1417 | { | 1417 | { |
1418 | if (skb_tailroom(chunk->skb) > len) | 1418 | if (skb_tailroom(chunk->skb) >= len) |
1419 | return sctp_addto_chunk(chunk, len, data); | 1419 | return sctp_addto_chunk(chunk, len, data); |
1420 | else | 1420 | else |
1421 | return NULL; | 1421 | return NULL; |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 3b7230ef77c2..f5e5e27cac5e 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -397,6 +397,41 @@ out_unlock: | |||
397 | sctp_transport_put(transport); | 397 | sctp_transport_put(transport); |
398 | } | 398 | } |
399 | 399 | ||
400 | /* Handle the timeout of the ICMP protocol unreachable timer. Trigger | ||
401 | * the correct state machine transition that will close the association. | ||
402 | */ | ||
403 | void sctp_generate_proto_unreach_event(unsigned long data) | ||
404 | { | ||
405 | struct sctp_transport *transport = (struct sctp_transport *) data; | ||
406 | struct sctp_association *asoc = transport->asoc; | ||
407 | |||
408 | sctp_bh_lock_sock(asoc->base.sk); | ||
409 | if (sock_owned_by_user(asoc->base.sk)) { | ||
410 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); | ||
411 | |||
412 | /* Try again later. */ | ||
413 | if (!mod_timer(&transport->proto_unreach_timer, | ||
414 | jiffies + (HZ/20))) | ||
415 | sctp_association_hold(asoc); | ||
416 | goto out_unlock; | ||
417 | } | ||
418 | |||
419 | /* Is this structure just waiting around for us to actually | ||
420 | * get destroyed? | ||
421 | */ | ||
422 | if (asoc->base.dead) | ||
423 | goto out_unlock; | ||
424 | |||
425 | sctp_do_sm(SCTP_EVENT_T_OTHER, | ||
426 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | ||
427 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); | ||
428 | |||
429 | out_unlock: | ||
430 | sctp_bh_unlock_sock(asoc->base.sk); | ||
431 | sctp_association_put(asoc); | ||
432 | } | ||
433 | |||
434 | |||
400 | /* Inject a SACK Timeout event into the state machine. */ | 435 | /* Inject a SACK Timeout event into the state machine. */ |
401 | static void sctp_generate_sack_event(unsigned long data) | 436 | static void sctp_generate_sack_event(unsigned long data) |
402 | { | 437 | { |
@@ -857,8 +892,6 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, | |||
857 | sctp_walk_fwdtsn(skip, chunk) { | 892 | sctp_walk_fwdtsn(skip, chunk) { |
858 | sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); | 893 | sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); |
859 | } | 894 | } |
860 | |||
861 | return; | ||
862 | } | 895 | } |
863 | 896 | ||
864 | /* Helper function to remove the association non-primary peer | 897 | /* Helper function to remove the association non-primary peer |
@@ -877,8 +910,6 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc) | |||
877 | sctp_assoc_del_peer(asoc, &t->ipaddr); | 910 | sctp_assoc_del_peer(asoc, &t->ipaddr); |
878 | } | 911 | } |
879 | } | 912 | } |
880 | |||
881 | return; | ||
882 | } | 913 | } |
883 | 914 | ||
884 | /* Helper function to set sk_err on a 1-1 style socket. */ | 915 | /* Helper function to set sk_err on a 1-1 style socket. */ |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ba1add0b13c3..ca44917872d2 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -5433,6 +5433,8 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) | |||
5433 | rover++; | 5433 | rover++; |
5434 | if ((rover < low) || (rover > high)) | 5434 | if ((rover < low) || (rover > high)) |
5435 | rover = low; | 5435 | rover = low; |
5436 | if (inet_is_reserved_local_port(rover)) | ||
5437 | continue; | ||
5436 | index = sctp_phashfn(rover); | 5438 | index = sctp_phashfn(rover); |
5437 | head = &sctp_port_hashtable[index]; | 5439 | head = &sctp_port_hashtable[index]; |
5438 | sctp_spin_lock(&head->lock); | 5440 | sctp_spin_lock(&head->lock); |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index fccf4947aff1..132046cb82fc 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -92,6 +92,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, | |||
92 | (unsigned long)peer); | 92 | (unsigned long)peer); |
93 | setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, | 93 | setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, |
94 | (unsigned long)peer); | 94 | (unsigned long)peer); |
95 | setup_timer(&peer->proto_unreach_timer, | ||
96 | sctp_generate_proto_unreach_event, (unsigned long)peer); | ||
95 | 97 | ||
96 | /* Initialize the 64-bit random nonce sent with heartbeat. */ | 98 | /* Initialize the 64-bit random nonce sent with heartbeat. */ |
97 | get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); | 99 | get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); |
@@ -146,6 +148,10 @@ void sctp_transport_free(struct sctp_transport *transport) | |||
146 | del_timer(&transport->T3_rtx_timer)) | 148 | del_timer(&transport->T3_rtx_timer)) |
147 | sctp_transport_put(transport); | 149 | sctp_transport_put(transport); |
148 | 150 | ||
151 | /* Delete the ICMP proto unreachable timer if it's active. */ | ||
152 | if (timer_pending(&transport->proto_unreach_timer) && | ||
153 | del_timer(&transport->proto_unreach_timer)) | ||
154 | sctp_association_put(transport->asoc); | ||
149 | 155 | ||
150 | sctp_transport_put(transport); | 156 | sctp_transport_put(transport); |
151 | } | 157 | } |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 3a448536f0b6..c7f7e49609cb 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -955,7 +955,6 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | |||
955 | * ordering and deliver them if needed. | 955 | * ordering and deliver them if needed. |
956 | */ | 956 | */ |
957 | sctp_ulpq_reap_ordered(ulpq, sid); | 957 | sctp_ulpq_reap_ordered(ulpq, sid); |
958 | return; | ||
959 | } | 958 | } |
960 | 959 | ||
961 | static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, | 960 | static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, |
@@ -1064,7 +1063,6 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
1064 | } | 1063 | } |
1065 | 1064 | ||
1066 | sk_mem_reclaim(asoc->base.sk); | 1065 | sk_mem_reclaim(asoc->base.sk); |
1067 | return; | ||
1068 | } | 1066 | } |
1069 | 1067 | ||
1070 | 1068 | ||
diff --git a/net/socket.c b/net/socket.c index dae8c6b84a09..367d5477d00f 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -94,6 +94,7 @@ | |||
94 | 94 | ||
95 | #include <net/compat.h> | 95 | #include <net/compat.h> |
96 | #include <net/wext.h> | 96 | #include <net/wext.h> |
97 | #include <net/cls_cgroup.h> | ||
97 | 98 | ||
98 | #include <net/sock.h> | 99 | #include <net/sock.h> |
99 | #include <linux/netfilter.h> | 100 | #include <linux/netfilter.h> |
@@ -558,6 +559,8 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
558 | struct sock_iocb *si = kiocb_to_siocb(iocb); | 559 | struct sock_iocb *si = kiocb_to_siocb(iocb); |
559 | int err; | 560 | int err; |
560 | 561 | ||
562 | sock_update_classid(sock->sk); | ||
563 | |||
561 | si->sock = sock; | 564 | si->sock = sock; |
562 | si->scm = NULL; | 565 | si->scm = NULL; |
563 | si->msg = msg; | 566 | si->msg = msg; |
@@ -684,6 +687,8 @@ static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, | |||
684 | { | 687 | { |
685 | struct sock_iocb *si = kiocb_to_siocb(iocb); | 688 | struct sock_iocb *si = kiocb_to_siocb(iocb); |
686 | 689 | ||
690 | sock_update_classid(sock->sk); | ||
691 | |||
687 | si->sock = sock; | 692 | si->sock = sock; |
688 | si->scm = NULL; | 693 | si->scm = NULL; |
689 | si->msg = msg; | 694 | si->msg = msg; |
@@ -777,6 +782,8 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | |||
777 | if (unlikely(!sock->ops->splice_read)) | 782 | if (unlikely(!sock->ops->splice_read)) |
778 | return -EINVAL; | 783 | return -EINVAL; |
779 | 784 | ||
785 | sock_update_classid(sock->sk); | ||
786 | |||
780 | return sock->ops->splice_read(sock, ppos, pipe, len, flags); | 787 | return sock->ops->splice_read(sock, ppos, pipe, len, flags); |
781 | } | 788 | } |
782 | 789 | ||
@@ -2615,7 +2622,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd, | |||
2615 | return dev_ioctl(net, cmd, uifr); | 2622 | return dev_ioctl(net, cmd, uifr); |
2616 | default: | 2623 | default: |
2617 | return -EINVAL; | 2624 | return -EINVAL; |
2618 | }; | 2625 | } |
2619 | } | 2626 | } |
2620 | 2627 | ||
2621 | static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, | 2628 | static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, |
@@ -3069,6 +3076,8 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, | |||
3069 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, | 3076 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, |
3070 | size_t size, int flags) | 3077 | size_t size, int flags) |
3071 | { | 3078 | { |
3079 | sock_update_classid(sock->sk); | ||
3080 | |||
3072 | if (sock->ops->sendpage) | 3081 | if (sock->ops->sendpage) |
3073 | return sock->ops->sendpage(sock, page, offset, size, flags); | 3082 | return sock->ops->sendpage(sock, page, offset, size, flags); |
3074 | 3083 | ||
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index f394fc190a49..73affb8624fa 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -236,10 +236,15 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) | |||
236 | 236 | ||
237 | list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { | 237 | list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { |
238 | 238 | ||
239 | /* Enforce a 60 second garbage collection moratorium */ | 239 | if (nr_to_scan-- == 0) |
240 | if (time_in_range_open(cred->cr_expire, expired, jiffies) && | 240 | break; |
241 | /* | ||
242 | * Enforce a 60 second garbage collection moratorium | ||
243 | * Note that the cred_unused list must be time-ordered. | ||
244 | */ | ||
245 | if (time_in_range(cred->cr_expire, expired, jiffies) && | ||
241 | test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) | 246 | test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) |
242 | continue; | 247 | return 0; |
243 | 248 | ||
244 | list_del_init(&cred->cr_lru); | 249 | list_del_init(&cred->cr_lru); |
245 | number_cred_unused--; | 250 | number_cred_unused--; |
@@ -252,13 +257,10 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) | |||
252 | get_rpccred(cred); | 257 | get_rpccred(cred); |
253 | list_add_tail(&cred->cr_lru, free); | 258 | list_add_tail(&cred->cr_lru, free); |
254 | rpcauth_unhash_cred_locked(cred); | 259 | rpcauth_unhash_cred_locked(cred); |
255 | nr_to_scan--; | ||
256 | } | 260 | } |
257 | spin_unlock(cache_lock); | 261 | spin_unlock(cache_lock); |
258 | if (nr_to_scan == 0) | ||
259 | break; | ||
260 | } | 262 | } |
261 | return nr_to_scan; | 263 | return (number_cred_unused / 100) * sysctl_vfs_cache_pressure; |
262 | } | 264 | } |
263 | 265 | ||
264 | /* | 266 | /* |
@@ -270,11 +272,12 @@ rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) | |||
270 | LIST_HEAD(free); | 272 | LIST_HEAD(free); |
271 | int res; | 273 | int res; |
272 | 274 | ||
275 | if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) | ||
276 | return (nr_to_scan == 0) ? 0 : -1; | ||
273 | if (list_empty(&cred_unused)) | 277 | if (list_empty(&cred_unused)) |
274 | return 0; | 278 | return 0; |
275 | spin_lock(&rpc_credcache_lock); | 279 | spin_lock(&rpc_credcache_lock); |
276 | nr_to_scan = rpcauth_prune_expired(&free, nr_to_scan); | 280 | res = rpcauth_prune_expired(&free, nr_to_scan); |
277 | res = (number_cred_unused / 100) * sysctl_vfs_cache_pressure; | ||
278 | spin_unlock(&rpc_credcache_lock); | 281 | spin_unlock(&rpc_credcache_lock); |
279 | rpcauth_destroy_credlist(&free); | 282 | rpcauth_destroy_credlist(&free); |
280 | return res; | 283 | return res; |
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile index 4de8bcf26fa7..74a231735f67 100644 --- a/net/sunrpc/auth_gss/Makefile +++ b/net/sunrpc/auth_gss/Makefile | |||
@@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ | |||
10 | obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o | 10 | obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o |
11 | 11 | ||
12 | rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ | 12 | rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ |
13 | gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o | 13 | gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o |
14 | 14 | ||
15 | obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o | 15 | obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o |
16 | 16 | ||
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index c389ccf6437d..8da2a0e68574 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -57,11 +57,14 @@ static const struct rpc_authops authgss_ops; | |||
57 | static const struct rpc_credops gss_credops; | 57 | static const struct rpc_credops gss_credops; |
58 | static const struct rpc_credops gss_nullops; | 58 | static const struct rpc_credops gss_nullops; |
59 | 59 | ||
60 | #define GSS_RETRY_EXPIRED 5 | ||
61 | static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; | ||
62 | |||
60 | #ifdef RPC_DEBUG | 63 | #ifdef RPC_DEBUG |
61 | # define RPCDBG_FACILITY RPCDBG_AUTH | 64 | # define RPCDBG_FACILITY RPCDBG_AUTH |
62 | #endif | 65 | #endif |
63 | 66 | ||
64 | #define GSS_CRED_SLACK 1024 | 67 | #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2) |
65 | /* length of a krb5 verifier (48), plus data added before arguments when | 68 | /* length of a krb5 verifier (48), plus data added before arguments when |
66 | * using integrity (two 4-byte integers): */ | 69 | * using integrity (two 4-byte integers): */ |
67 | #define GSS_VERF_SLACK 100 | 70 | #define GSS_VERF_SLACK 100 |
@@ -229,7 +232,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct | |||
229 | p = ERR_PTR(-EFAULT); | 232 | p = ERR_PTR(-EFAULT); |
230 | goto err; | 233 | goto err; |
231 | } | 234 | } |
232 | ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx); | 235 | ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, GFP_NOFS); |
233 | if (ret < 0) { | 236 | if (ret < 0) { |
234 | p = ERR_PTR(ret); | 237 | p = ERR_PTR(ret); |
235 | goto err; | 238 | goto err; |
@@ -350,6 +353,24 @@ gss_unhash_msg(struct gss_upcall_msg *gss_msg) | |||
350 | } | 353 | } |
351 | 354 | ||
352 | static void | 355 | static void |
356 | gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg) | ||
357 | { | ||
358 | switch (gss_msg->msg.errno) { | ||
359 | case 0: | ||
360 | if (gss_msg->ctx == NULL) | ||
361 | break; | ||
362 | clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); | ||
363 | gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx); | ||
364 | break; | ||
365 | case -EKEYEXPIRED: | ||
366 | set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); | ||
367 | } | ||
368 | gss_cred->gc_upcall_timestamp = jiffies; | ||
369 | gss_cred->gc_upcall = NULL; | ||
370 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); | ||
371 | } | ||
372 | |||
373 | static void | ||
353 | gss_upcall_callback(struct rpc_task *task) | 374 | gss_upcall_callback(struct rpc_task *task) |
354 | { | 375 | { |
355 | struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, | 376 | struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, |
@@ -358,13 +379,9 @@ gss_upcall_callback(struct rpc_task *task) | |||
358 | struct inode *inode = &gss_msg->inode->vfs_inode; | 379 | struct inode *inode = &gss_msg->inode->vfs_inode; |
359 | 380 | ||
360 | spin_lock(&inode->i_lock); | 381 | spin_lock(&inode->i_lock); |
361 | if (gss_msg->ctx) | 382 | gss_handle_downcall_result(gss_cred, gss_msg); |
362 | gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx); | ||
363 | else | ||
364 | task->tk_status = gss_msg->msg.errno; | ||
365 | gss_cred->gc_upcall = NULL; | ||
366 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); | ||
367 | spin_unlock(&inode->i_lock); | 383 | spin_unlock(&inode->i_lock); |
384 | task->tk_status = gss_msg->msg.errno; | ||
368 | gss_release_msg(gss_msg); | 385 | gss_release_msg(gss_msg); |
369 | } | 386 | } |
370 | 387 | ||
@@ -377,11 +394,12 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) | |||
377 | static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, | 394 | static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, |
378 | struct rpc_clnt *clnt, int machine_cred) | 395 | struct rpc_clnt *clnt, int machine_cred) |
379 | { | 396 | { |
397 | struct gss_api_mech *mech = gss_msg->auth->mech; | ||
380 | char *p = gss_msg->databuf; | 398 | char *p = gss_msg->databuf; |
381 | int len = 0; | 399 | int len = 0; |
382 | 400 | ||
383 | gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", | 401 | gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", |
384 | gss_msg->auth->mech->gm_name, | 402 | mech->gm_name, |
385 | gss_msg->uid); | 403 | gss_msg->uid); |
386 | p += gss_msg->msg.len; | 404 | p += gss_msg->msg.len; |
387 | if (clnt->cl_principal) { | 405 | if (clnt->cl_principal) { |
@@ -398,6 +416,11 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, | |||
398 | p += len; | 416 | p += len; |
399 | gss_msg->msg.len += len; | 417 | gss_msg->msg.len += len; |
400 | } | 418 | } |
419 | if (mech->gm_upcall_enctypes) { | ||
420 | len = sprintf(p, mech->gm_upcall_enctypes); | ||
421 | p += len; | ||
422 | gss_msg->msg.len += len; | ||
423 | } | ||
401 | len = sprintf(p, "\n"); | 424 | len = sprintf(p, "\n"); |
402 | gss_msg->msg.len += len; | 425 | gss_msg->msg.len += len; |
403 | 426 | ||
@@ -507,18 +530,16 @@ gss_refresh_upcall(struct rpc_task *task) | |||
507 | spin_lock(&inode->i_lock); | 530 | spin_lock(&inode->i_lock); |
508 | if (gss_cred->gc_upcall != NULL) | 531 | if (gss_cred->gc_upcall != NULL) |
509 | rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); | 532 | rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); |
510 | else if (gss_msg->ctx != NULL) { | 533 | else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { |
511 | gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx); | ||
512 | gss_cred->gc_upcall = NULL; | ||
513 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); | ||
514 | } else if (gss_msg->msg.errno >= 0) { | ||
515 | task->tk_timeout = 0; | 534 | task->tk_timeout = 0; |
516 | gss_cred->gc_upcall = gss_msg; | 535 | gss_cred->gc_upcall = gss_msg; |
517 | /* gss_upcall_callback will release the reference to gss_upcall_msg */ | 536 | /* gss_upcall_callback will release the reference to gss_upcall_msg */ |
518 | atomic_inc(&gss_msg->count); | 537 | atomic_inc(&gss_msg->count); |
519 | rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); | 538 | rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); |
520 | } else | 539 | } else { |
540 | gss_handle_downcall_result(gss_cred, gss_msg); | ||
521 | err = gss_msg->msg.errno; | 541 | err = gss_msg->msg.errno; |
542 | } | ||
522 | spin_unlock(&inode->i_lock); | 543 | spin_unlock(&inode->i_lock); |
523 | gss_release_msg(gss_msg); | 544 | gss_release_msg(gss_msg); |
524 | out: | 545 | out: |
@@ -1117,6 +1138,23 @@ static int gss_renew_cred(struct rpc_task *task) | |||
1117 | return 0; | 1138 | return 0; |
1118 | } | 1139 | } |
1119 | 1140 | ||
1141 | static int gss_cred_is_negative_entry(struct rpc_cred *cred) | ||
1142 | { | ||
1143 | if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { | ||
1144 | unsigned long now = jiffies; | ||
1145 | unsigned long begin, expire; | ||
1146 | struct gss_cred *gss_cred; | ||
1147 | |||
1148 | gss_cred = container_of(cred, struct gss_cred, gc_base); | ||
1149 | begin = gss_cred->gc_upcall_timestamp; | ||
1150 | expire = begin + gss_expired_cred_retry_delay * HZ; | ||
1151 | |||
1152 | if (time_in_range_open(now, begin, expire)) | ||
1153 | return 1; | ||
1154 | } | ||
1155 | return 0; | ||
1156 | } | ||
1157 | |||
1120 | /* | 1158 | /* |
1121 | * Refresh credentials. XXX - finish | 1159 | * Refresh credentials. XXX - finish |
1122 | */ | 1160 | */ |
@@ -1126,6 +1164,9 @@ gss_refresh(struct rpc_task *task) | |||
1126 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 1164 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
1127 | int ret = 0; | 1165 | int ret = 0; |
1128 | 1166 | ||
1167 | if (gss_cred_is_negative_entry(cred)) | ||
1168 | return -EKEYEXPIRED; | ||
1169 | |||
1129 | if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && | 1170 | if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && |
1130 | !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { | 1171 | !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { |
1131 | ret = gss_renew_cred(task); | 1172 | ret = gss_renew_cred(task); |
@@ -1316,15 +1357,21 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1316 | inpages = snd_buf->pages + first; | 1357 | inpages = snd_buf->pages + first; |
1317 | snd_buf->pages = rqstp->rq_enc_pages; | 1358 | snd_buf->pages = rqstp->rq_enc_pages; |
1318 | snd_buf->page_base -= first << PAGE_CACHE_SHIFT; | 1359 | snd_buf->page_base -= first << PAGE_CACHE_SHIFT; |
1319 | /* Give the tail its own page, in case we need extra space in the | 1360 | /* |
1320 | * head when wrapping: */ | 1361 | * Give the tail its own page, in case we need extra space in the |
1362 | * head when wrapping: | ||
1363 | * | ||
1364 | * call_allocate() allocates twice the slack space required | ||
1365 | * by the authentication flavor to rq_callsize. | ||
1366 | * For GSS, slack is GSS_CRED_SLACK. | ||
1367 | */ | ||
1321 | if (snd_buf->page_len || snd_buf->tail[0].iov_len) { | 1368 | if (snd_buf->page_len || snd_buf->tail[0].iov_len) { |
1322 | tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); | 1369 | tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); |
1323 | memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); | 1370 | memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); |
1324 | snd_buf->tail[0].iov_base = tmp; | 1371 | snd_buf->tail[0].iov_base = tmp; |
1325 | } | 1372 | } |
1326 | maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); | 1373 | maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); |
1327 | /* RPC_SLACK_SPACE should prevent this ever happening: */ | 1374 | /* slack space should prevent this ever happening: */ |
1328 | BUG_ON(snd_buf->len > snd_buf->buflen); | 1375 | BUG_ON(snd_buf->len > snd_buf->buflen); |
1329 | status = -EIO; | 1376 | status = -EIO; |
1330 | /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was | 1377 | /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was |
@@ -1573,5 +1620,11 @@ static void __exit exit_rpcsec_gss(void) | |||
1573 | } | 1620 | } |
1574 | 1621 | ||
1575 | MODULE_LICENSE("GPL"); | 1622 | MODULE_LICENSE("GPL"); |
1623 | module_param_named(expired_cred_retry_delay, | ||
1624 | gss_expired_cred_retry_delay, | ||
1625 | uint, 0644); | ||
1626 | MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until " | ||
1627 | "the RPC engine retries an expired credential"); | ||
1628 | |||
1576 | module_init(init_rpcsec_gss) | 1629 | module_init(init_rpcsec_gss) |
1577 | module_exit(exit_rpcsec_gss) | 1630 | module_exit(exit_rpcsec_gss) |
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index e9b636176687..75ee993ea057 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/net/sunrpc/gss_krb5_crypto.c | 2 | * linux/net/sunrpc/gss_krb5_crypto.c |
3 | * | 3 | * |
4 | * Copyright (c) 2000 The Regents of the University of Michigan. | 4 | * Copyright (c) 2000-2008 The Regents of the University of Michigan. |
5 | * All rights reserved. | 5 | * All rights reserved. |
6 | * | 6 | * |
7 | * Andy Adamson <andros@umich.edu> | 7 | * Andy Adamson <andros@umich.edu> |
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/crypto.h> | 41 | #include <linux/crypto.h> |
42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
43 | #include <linux/pagemap.h> | 43 | #include <linux/pagemap.h> |
44 | #include <linux/random.h> | ||
44 | #include <linux/sunrpc/gss_krb5.h> | 45 | #include <linux/sunrpc/gss_krb5.h> |
45 | #include <linux/sunrpc/xdr.h> | 46 | #include <linux/sunrpc/xdr.h> |
46 | 47 | ||
@@ -58,13 +59,13 @@ krb5_encrypt( | |||
58 | { | 59 | { |
59 | u32 ret = -EINVAL; | 60 | u32 ret = -EINVAL; |
60 | struct scatterlist sg[1]; | 61 | struct scatterlist sg[1]; |
61 | u8 local_iv[16] = {0}; | 62 | u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; |
62 | struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; | 63 | struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; |
63 | 64 | ||
64 | if (length % crypto_blkcipher_blocksize(tfm) != 0) | 65 | if (length % crypto_blkcipher_blocksize(tfm) != 0) |
65 | goto out; | 66 | goto out; |
66 | 67 | ||
67 | if (crypto_blkcipher_ivsize(tfm) > 16) { | 68 | if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { |
68 | dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", | 69 | dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", |
69 | crypto_blkcipher_ivsize(tfm)); | 70 | crypto_blkcipher_ivsize(tfm)); |
70 | goto out; | 71 | goto out; |
@@ -92,13 +93,13 @@ krb5_decrypt( | |||
92 | { | 93 | { |
93 | u32 ret = -EINVAL; | 94 | u32 ret = -EINVAL; |
94 | struct scatterlist sg[1]; | 95 | struct scatterlist sg[1]; |
95 | u8 local_iv[16] = {0}; | 96 | u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; |
96 | struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; | 97 | struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; |
97 | 98 | ||
98 | if (length % crypto_blkcipher_blocksize(tfm) != 0) | 99 | if (length % crypto_blkcipher_blocksize(tfm) != 0) |
99 | goto out; | 100 | goto out; |
100 | 101 | ||
101 | if (crypto_blkcipher_ivsize(tfm) > 16) { | 102 | if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { |
102 | dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", | 103 | dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", |
103 | crypto_blkcipher_ivsize(tfm)); | 104 | crypto_blkcipher_ivsize(tfm)); |
104 | goto out; | 105 | goto out; |
@@ -123,21 +124,155 @@ checksummer(struct scatterlist *sg, void *data) | |||
123 | return crypto_hash_update(desc, sg, sg->length); | 124 | return crypto_hash_update(desc, sg, sg->length); |
124 | } | 125 | } |
125 | 126 | ||
126 | /* checksum the plaintext data and hdrlen bytes of the token header */ | 127 | static int |
127 | s32 | 128 | arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) |
128 | make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, | 129 | { |
129 | int body_offset, struct xdr_netobj *cksum) | 130 | unsigned int ms_usage; |
131 | |||
132 | switch (usage) { | ||
133 | case KG_USAGE_SIGN: | ||
134 | ms_usage = 15; | ||
135 | break; | ||
136 | case KG_USAGE_SEAL: | ||
137 | ms_usage = 13; | ||
138 | break; | ||
139 | default: | ||
140 | return EINVAL;; | ||
141 | } | ||
142 | salt[0] = (ms_usage >> 0) & 0xff; | ||
143 | salt[1] = (ms_usage >> 8) & 0xff; | ||
144 | salt[2] = (ms_usage >> 16) & 0xff; | ||
145 | salt[3] = (ms_usage >> 24) & 0xff; | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static u32 | ||
151 | make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, | ||
152 | struct xdr_buf *body, int body_offset, u8 *cksumkey, | ||
153 | unsigned int usage, struct xdr_netobj *cksumout) | ||
130 | { | 154 | { |
131 | struct hash_desc desc; /* XXX add to ctx? */ | 155 | struct hash_desc desc; |
132 | struct scatterlist sg[1]; | 156 | struct scatterlist sg[1]; |
133 | int err; | 157 | int err; |
158 | u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; | ||
159 | u8 rc4salt[4]; | ||
160 | struct crypto_hash *md5; | ||
161 | struct crypto_hash *hmac_md5; | ||
162 | |||
163 | if (cksumkey == NULL) | ||
164 | return GSS_S_FAILURE; | ||
165 | |||
166 | if (cksumout->len < kctx->gk5e->cksumlength) { | ||
167 | dprintk("%s: checksum buffer length, %u, too small for %s\n", | ||
168 | __func__, cksumout->len, kctx->gk5e->name); | ||
169 | return GSS_S_FAILURE; | ||
170 | } | ||
171 | |||
172 | if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { | ||
173 | dprintk("%s: invalid usage value %u\n", __func__, usage); | ||
174 | return GSS_S_FAILURE; | ||
175 | } | ||
176 | |||
177 | md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); | ||
178 | if (IS_ERR(md5)) | ||
179 | return GSS_S_FAILURE; | ||
180 | |||
181 | hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, | ||
182 | CRYPTO_ALG_ASYNC); | ||
183 | if (IS_ERR(hmac_md5)) { | ||
184 | crypto_free_hash(md5); | ||
185 | return GSS_S_FAILURE; | ||
186 | } | ||
187 | |||
188 | desc.tfm = md5; | ||
189 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
190 | |||
191 | err = crypto_hash_init(&desc); | ||
192 | if (err) | ||
193 | goto out; | ||
194 | sg_init_one(sg, rc4salt, 4); | ||
195 | err = crypto_hash_update(&desc, sg, 4); | ||
196 | if (err) | ||
197 | goto out; | ||
198 | |||
199 | sg_init_one(sg, header, hdrlen); | ||
200 | err = crypto_hash_update(&desc, sg, hdrlen); | ||
201 | if (err) | ||
202 | goto out; | ||
203 | err = xdr_process_buf(body, body_offset, body->len - body_offset, | ||
204 | checksummer, &desc); | ||
205 | if (err) | ||
206 | goto out; | ||
207 | err = crypto_hash_final(&desc, checksumdata); | ||
208 | if (err) | ||
209 | goto out; | ||
210 | |||
211 | desc.tfm = hmac_md5; | ||
212 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
213 | |||
214 | err = crypto_hash_init(&desc); | ||
215 | if (err) | ||
216 | goto out; | ||
217 | err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); | ||
218 | if (err) | ||
219 | goto out; | ||
220 | |||
221 | sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5)); | ||
222 | err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5), | ||
223 | checksumdata); | ||
224 | if (err) | ||
225 | goto out; | ||
226 | |||
227 | memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); | ||
228 | cksumout->len = kctx->gk5e->cksumlength; | ||
229 | out: | ||
230 | crypto_free_hash(md5); | ||
231 | crypto_free_hash(hmac_md5); | ||
232 | return err ? GSS_S_FAILURE : 0; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * checksum the plaintext data and hdrlen bytes of the token header | ||
237 | * The checksum is performed over the first 8 bytes of the | ||
238 | * gss token header and then over the data body | ||
239 | */ | ||
240 | u32 | ||
241 | make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, | ||
242 | struct xdr_buf *body, int body_offset, u8 *cksumkey, | ||
243 | unsigned int usage, struct xdr_netobj *cksumout) | ||
244 | { | ||
245 | struct hash_desc desc; | ||
246 | struct scatterlist sg[1]; | ||
247 | int err; | ||
248 | u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; | ||
249 | unsigned int checksumlen; | ||
250 | |||
251 | if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) | ||
252 | return make_checksum_hmac_md5(kctx, header, hdrlen, | ||
253 | body, body_offset, | ||
254 | cksumkey, usage, cksumout); | ||
255 | |||
256 | if (cksumout->len < kctx->gk5e->cksumlength) { | ||
257 | dprintk("%s: checksum buffer length, %u, too small for %s\n", | ||
258 | __func__, cksumout->len, kctx->gk5e->name); | ||
259 | return GSS_S_FAILURE; | ||
260 | } | ||
134 | 261 | ||
135 | desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); | 262 | desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); |
136 | if (IS_ERR(desc.tfm)) | 263 | if (IS_ERR(desc.tfm)) |
137 | return GSS_S_FAILURE; | 264 | return GSS_S_FAILURE; |
138 | cksum->len = crypto_hash_digestsize(desc.tfm); | ||
139 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 265 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
140 | 266 | ||
267 | checksumlen = crypto_hash_digestsize(desc.tfm); | ||
268 | |||
269 | if (cksumkey != NULL) { | ||
270 | err = crypto_hash_setkey(desc.tfm, cksumkey, | ||
271 | kctx->gk5e->keylength); | ||
272 | if (err) | ||
273 | goto out; | ||
274 | } | ||
275 | |||
141 | err = crypto_hash_init(&desc); | 276 | err = crypto_hash_init(&desc); |
142 | if (err) | 277 | if (err) |
143 | goto out; | 278 | goto out; |
@@ -149,15 +284,109 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, | |||
149 | checksummer, &desc); | 284 | checksummer, &desc); |
150 | if (err) | 285 | if (err) |
151 | goto out; | 286 | goto out; |
152 | err = crypto_hash_final(&desc, cksum->data); | 287 | err = crypto_hash_final(&desc, checksumdata); |
288 | if (err) | ||
289 | goto out; | ||
153 | 290 | ||
291 | switch (kctx->gk5e->ctype) { | ||
292 | case CKSUMTYPE_RSA_MD5: | ||
293 | err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata, | ||
294 | checksumdata, checksumlen); | ||
295 | if (err) | ||
296 | goto out; | ||
297 | memcpy(cksumout->data, | ||
298 | checksumdata + checksumlen - kctx->gk5e->cksumlength, | ||
299 | kctx->gk5e->cksumlength); | ||
300 | break; | ||
301 | case CKSUMTYPE_HMAC_SHA1_DES3: | ||
302 | memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); | ||
303 | break; | ||
304 | default: | ||
305 | BUG(); | ||
306 | break; | ||
307 | } | ||
308 | cksumout->len = kctx->gk5e->cksumlength; | ||
309 | out: | ||
310 | crypto_free_hash(desc.tfm); | ||
311 | return err ? GSS_S_FAILURE : 0; | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * checksum the plaintext data and hdrlen bytes of the token header | ||
316 | * Per rfc4121, sec. 4.2.4, the checksum is performed over the data | ||
317 | * body then over the first 16 octets of the MIC token | ||
318 | * Inclusion of the header data in the calculation of the | ||
319 | * checksum is optional. | ||
320 | */ | ||
321 | u32 | ||
322 | make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, | ||
323 | struct xdr_buf *body, int body_offset, u8 *cksumkey, | ||
324 | unsigned int usage, struct xdr_netobj *cksumout) | ||
325 | { | ||
326 | struct hash_desc desc; | ||
327 | struct scatterlist sg[1]; | ||
328 | int err; | ||
329 | u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; | ||
330 | unsigned int checksumlen; | ||
331 | |||
332 | if (kctx->gk5e->keyed_cksum == 0) { | ||
333 | dprintk("%s: expected keyed hash for %s\n", | ||
334 | __func__, kctx->gk5e->name); | ||
335 | return GSS_S_FAILURE; | ||
336 | } | ||
337 | if (cksumkey == NULL) { | ||
338 | dprintk("%s: no key supplied for %s\n", | ||
339 | __func__, kctx->gk5e->name); | ||
340 | return GSS_S_FAILURE; | ||
341 | } | ||
342 | |||
343 | desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, | ||
344 | CRYPTO_ALG_ASYNC); | ||
345 | if (IS_ERR(desc.tfm)) | ||
346 | return GSS_S_FAILURE; | ||
347 | checksumlen = crypto_hash_digestsize(desc.tfm); | ||
348 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
349 | |||
350 | err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength); | ||
351 | if (err) | ||
352 | goto out; | ||
353 | |||
354 | err = crypto_hash_init(&desc); | ||
355 | if (err) | ||
356 | goto out; | ||
357 | err = xdr_process_buf(body, body_offset, body->len - body_offset, | ||
358 | checksummer, &desc); | ||
359 | if (err) | ||
360 | goto out; | ||
361 | if (header != NULL) { | ||
362 | sg_init_one(sg, header, hdrlen); | ||
363 | err = crypto_hash_update(&desc, sg, hdrlen); | ||
364 | if (err) | ||
365 | goto out; | ||
366 | } | ||
367 | err = crypto_hash_final(&desc, checksumdata); | ||
368 | if (err) | ||
369 | goto out; | ||
370 | |||
371 | cksumout->len = kctx->gk5e->cksumlength; | ||
372 | |||
373 | switch (kctx->gk5e->ctype) { | ||
374 | case CKSUMTYPE_HMAC_SHA1_96_AES128: | ||
375 | case CKSUMTYPE_HMAC_SHA1_96_AES256: | ||
376 | /* note that this truncates the hash */ | ||
377 | memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); | ||
378 | break; | ||
379 | default: | ||
380 | BUG(); | ||
381 | break; | ||
382 | } | ||
154 | out: | 383 | out: |
155 | crypto_free_hash(desc.tfm); | 384 | crypto_free_hash(desc.tfm); |
156 | return err ? GSS_S_FAILURE : 0; | 385 | return err ? GSS_S_FAILURE : 0; |
157 | } | 386 | } |
158 | 387 | ||
159 | struct encryptor_desc { | 388 | struct encryptor_desc { |
160 | u8 iv[8]; /* XXX hard-coded blocksize */ | 389 | u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; |
161 | struct blkcipher_desc desc; | 390 | struct blkcipher_desc desc; |
162 | int pos; | 391 | int pos; |
163 | struct xdr_buf *outbuf; | 392 | struct xdr_buf *outbuf; |
@@ -198,7 +427,7 @@ encryptor(struct scatterlist *sg, void *data) | |||
198 | desc->fraglen += sg->length; | 427 | desc->fraglen += sg->length; |
199 | desc->pos += sg->length; | 428 | desc->pos += sg->length; |
200 | 429 | ||
201 | fraglen = thislen & 7; /* XXX hardcoded blocksize */ | 430 | fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); |
202 | thislen -= fraglen; | 431 | thislen -= fraglen; |
203 | 432 | ||
204 | if (thislen == 0) | 433 | if (thislen == 0) |
@@ -256,7 +485,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, | |||
256 | } | 485 | } |
257 | 486 | ||
258 | struct decryptor_desc { | 487 | struct decryptor_desc { |
259 | u8 iv[8]; /* XXX hard-coded blocksize */ | 488 | u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; |
260 | struct blkcipher_desc desc; | 489 | struct blkcipher_desc desc; |
261 | struct scatterlist frags[4]; | 490 | struct scatterlist frags[4]; |
262 | int fragno; | 491 | int fragno; |
@@ -278,7 +507,7 @@ decryptor(struct scatterlist *sg, void *data) | |||
278 | desc->fragno++; | 507 | desc->fragno++; |
279 | desc->fraglen += sg->length; | 508 | desc->fraglen += sg->length; |
280 | 509 | ||
281 | fraglen = thislen & 7; /* XXX hardcoded blocksize */ | 510 | fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); |
282 | thislen -= fraglen; | 511 | thislen -= fraglen; |
283 | 512 | ||
284 | if (thislen == 0) | 513 | if (thislen == 0) |
@@ -325,3 +554,437 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, | |||
325 | 554 | ||
326 | return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); | 555 | return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); |
327 | } | 556 | } |
557 | |||
558 | /* | ||
559 | * This function makes the assumption that it was ultimately called | ||
560 | * from gss_wrap(). | ||
561 | * | ||
562 | * The client auth_gss code moves any existing tail data into a | ||
563 | * separate page before calling gss_wrap. | ||
564 | * The server svcauth_gss code ensures that both the head and the | ||
565 | * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap. | ||
566 | * | ||
567 | * Even with that guarantee, this function may be called more than | ||
568 | * once in the processing of gss_wrap(). The best we can do is | ||
569 | * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the | ||
570 | * largest expected shift will fit within RPC_MAX_AUTH_SIZE. | ||
571 | * At run-time we can verify that a single invocation of this | ||
572 | * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE. | ||
573 | */ | ||
574 | |||
575 | int | ||
576 | xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen) | ||
577 | { | ||
578 | u8 *p; | ||
579 | |||
580 | if (shiftlen == 0) | ||
581 | return 0; | ||
582 | |||
583 | BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE); | ||
584 | BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE); | ||
585 | |||
586 | p = buf->head[0].iov_base + base; | ||
587 | |||
588 | memmove(p + shiftlen, p, buf->head[0].iov_len - base); | ||
589 | |||
590 | buf->head[0].iov_len += shiftlen; | ||
591 | buf->len += shiftlen; | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static u32 | ||
597 | gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf, | ||
598 | u32 offset, u8 *iv, struct page **pages, int encrypt) | ||
599 | { | ||
600 | u32 ret; | ||
601 | struct scatterlist sg[1]; | ||
602 | struct blkcipher_desc desc = { .tfm = cipher, .info = iv }; | ||
603 | u8 data[crypto_blkcipher_blocksize(cipher) * 2]; | ||
604 | struct page **save_pages; | ||
605 | u32 len = buf->len - offset; | ||
606 | |||
607 | BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2); | ||
608 | |||
609 | /* | ||
610 | * For encryption, we want to read from the cleartext | ||
611 | * page cache pages, and write the encrypted data to | ||
612 | * the supplied xdr_buf pages. | ||
613 | */ | ||
614 | save_pages = buf->pages; | ||
615 | if (encrypt) | ||
616 | buf->pages = pages; | ||
617 | |||
618 | ret = read_bytes_from_xdr_buf(buf, offset, data, len); | ||
619 | buf->pages = save_pages; | ||
620 | if (ret) | ||
621 | goto out; | ||
622 | |||
623 | sg_init_one(sg, data, len); | ||
624 | |||
625 | if (encrypt) | ||
626 | ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); | ||
627 | else | ||
628 | ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len); | ||
629 | |||
630 | if (ret) | ||
631 | goto out; | ||
632 | |||
633 | ret = write_bytes_to_xdr_buf(buf, offset, data, len); | ||
634 | |||
635 | out: | ||
636 | return ret; | ||
637 | } | ||
638 | |||
639 | u32 | ||
640 | gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, | ||
641 | struct xdr_buf *buf, int ec, struct page **pages) | ||
642 | { | ||
643 | u32 err; | ||
644 | struct xdr_netobj hmac; | ||
645 | u8 *cksumkey; | ||
646 | u8 *ecptr; | ||
647 | struct crypto_blkcipher *cipher, *aux_cipher; | ||
648 | int blocksize; | ||
649 | struct page **save_pages; | ||
650 | int nblocks, nbytes; | ||
651 | struct encryptor_desc desc; | ||
652 | u32 cbcbytes; | ||
653 | unsigned int usage; | ||
654 | |||
655 | if (kctx->initiate) { | ||
656 | cipher = kctx->initiator_enc; | ||
657 | aux_cipher = kctx->initiator_enc_aux; | ||
658 | cksumkey = kctx->initiator_integ; | ||
659 | usage = KG_USAGE_INITIATOR_SEAL; | ||
660 | } else { | ||
661 | cipher = kctx->acceptor_enc; | ||
662 | aux_cipher = kctx->acceptor_enc_aux; | ||
663 | cksumkey = kctx->acceptor_integ; | ||
664 | usage = KG_USAGE_ACCEPTOR_SEAL; | ||
665 | } | ||
666 | blocksize = crypto_blkcipher_blocksize(cipher); | ||
667 | |||
668 | /* hide the gss token header and insert the confounder */ | ||
669 | offset += GSS_KRB5_TOK_HDR_LEN; | ||
670 | if (xdr_extend_head(buf, offset, kctx->gk5e->conflen)) | ||
671 | return GSS_S_FAILURE; | ||
672 | gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen); | ||
673 | offset -= GSS_KRB5_TOK_HDR_LEN; | ||
674 | |||
675 | if (buf->tail[0].iov_base != NULL) { | ||
676 | ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; | ||
677 | } else { | ||
678 | buf->tail[0].iov_base = buf->head[0].iov_base | ||
679 | + buf->head[0].iov_len; | ||
680 | buf->tail[0].iov_len = 0; | ||
681 | ecptr = buf->tail[0].iov_base; | ||
682 | } | ||
683 | |||
684 | memset(ecptr, 'X', ec); | ||
685 | buf->tail[0].iov_len += ec; | ||
686 | buf->len += ec; | ||
687 | |||
688 | /* copy plaintext gss token header after filler (if any) */ | ||
689 | memcpy(ecptr + ec, buf->head[0].iov_base + offset, | ||
690 | GSS_KRB5_TOK_HDR_LEN); | ||
691 | buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; | ||
692 | buf->len += GSS_KRB5_TOK_HDR_LEN; | ||
693 | |||
694 | /* Do the HMAC */ | ||
695 | hmac.len = GSS_KRB5_MAX_CKSUM_LEN; | ||
696 | hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; | ||
697 | |||
698 | /* | ||
699 | * When we are called, pages points to the real page cache | ||
700 | * data -- which we can't go and encrypt! buf->pages points | ||
701 | * to scratch pages which we are going to send off to the | ||
702 | * client/server. Swap in the plaintext pages to calculate | ||
703 | * the hmac. | ||
704 | */ | ||
705 | save_pages = buf->pages; | ||
706 | buf->pages = pages; | ||
707 | |||
708 | err = make_checksum_v2(kctx, NULL, 0, buf, | ||
709 | offset + GSS_KRB5_TOK_HDR_LEN, | ||
710 | cksumkey, usage, &hmac); | ||
711 | buf->pages = save_pages; | ||
712 | if (err) | ||
713 | return GSS_S_FAILURE; | ||
714 | |||
715 | nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN; | ||
716 | nblocks = (nbytes + blocksize - 1) / blocksize; | ||
717 | cbcbytes = 0; | ||
718 | if (nblocks > 2) | ||
719 | cbcbytes = (nblocks - 2) * blocksize; | ||
720 | |||
721 | memset(desc.iv, 0, sizeof(desc.iv)); | ||
722 | |||
723 | if (cbcbytes) { | ||
724 | desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; | ||
725 | desc.fragno = 0; | ||
726 | desc.fraglen = 0; | ||
727 | desc.pages = pages; | ||
728 | desc.outbuf = buf; | ||
729 | desc.desc.info = desc.iv; | ||
730 | desc.desc.flags = 0; | ||
731 | desc.desc.tfm = aux_cipher; | ||
732 | |||
733 | sg_init_table(desc.infrags, 4); | ||
734 | sg_init_table(desc.outfrags, 4); | ||
735 | |||
736 | err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN, | ||
737 | cbcbytes, encryptor, &desc); | ||
738 | if (err) | ||
739 | goto out_err; | ||
740 | } | ||
741 | |||
742 | /* Make sure IV carries forward from any CBC results. */ | ||
743 | err = gss_krb5_cts_crypt(cipher, buf, | ||
744 | offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes, | ||
745 | desc.iv, pages, 1); | ||
746 | if (err) { | ||
747 | err = GSS_S_FAILURE; | ||
748 | goto out_err; | ||
749 | } | ||
750 | |||
751 | /* Now update buf to account for HMAC */ | ||
752 | buf->tail[0].iov_len += kctx->gk5e->cksumlength; | ||
753 | buf->len += kctx->gk5e->cksumlength; | ||
754 | |||
755 | out_err: | ||
756 | if (err) | ||
757 | err = GSS_S_FAILURE; | ||
758 | return err; | ||
759 | } | ||
760 | |||
761 | u32 | ||
762 | gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, | ||
763 | u32 *headskip, u32 *tailskip) | ||
764 | { | ||
765 | struct xdr_buf subbuf; | ||
766 | u32 ret = 0; | ||
767 | u8 *cksum_key; | ||
768 | struct crypto_blkcipher *cipher, *aux_cipher; | ||
769 | struct xdr_netobj our_hmac_obj; | ||
770 | u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; | ||
771 | u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; | ||
772 | int nblocks, blocksize, cbcbytes; | ||
773 | struct decryptor_desc desc; | ||
774 | unsigned int usage; | ||
775 | |||
776 | if (kctx->initiate) { | ||
777 | cipher = kctx->acceptor_enc; | ||
778 | aux_cipher = kctx->acceptor_enc_aux; | ||
779 | cksum_key = kctx->acceptor_integ; | ||
780 | usage = KG_USAGE_ACCEPTOR_SEAL; | ||
781 | } else { | ||
782 | cipher = kctx->initiator_enc; | ||
783 | aux_cipher = kctx->initiator_enc_aux; | ||
784 | cksum_key = kctx->initiator_integ; | ||
785 | usage = KG_USAGE_INITIATOR_SEAL; | ||
786 | } | ||
787 | blocksize = crypto_blkcipher_blocksize(cipher); | ||
788 | |||
789 | |||
790 | /* create a segment skipping the header and leaving out the checksum */ | ||
791 | xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, | ||
792 | (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - | ||
793 | kctx->gk5e->cksumlength)); | ||
794 | |||
795 | nblocks = (subbuf.len + blocksize - 1) / blocksize; | ||
796 | |||
797 | cbcbytes = 0; | ||
798 | if (nblocks > 2) | ||
799 | cbcbytes = (nblocks - 2) * blocksize; | ||
800 | |||
801 | memset(desc.iv, 0, sizeof(desc.iv)); | ||
802 | |||
803 | if (cbcbytes) { | ||
804 | desc.fragno = 0; | ||
805 | desc.fraglen = 0; | ||
806 | desc.desc.info = desc.iv; | ||
807 | desc.desc.flags = 0; | ||
808 | desc.desc.tfm = aux_cipher; | ||
809 | |||
810 | sg_init_table(desc.frags, 4); | ||
811 | |||
812 | ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); | ||
813 | if (ret) | ||
814 | goto out_err; | ||
815 | } | ||
816 | |||
817 | /* Make sure IV carries forward from any CBC results. */ | ||
818 | ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); | ||
819 | if (ret) | ||
820 | goto out_err; | ||
821 | |||
822 | |||
823 | /* Calculate our hmac over the plaintext data */ | ||
824 | our_hmac_obj.len = sizeof(our_hmac); | ||
825 | our_hmac_obj.data = our_hmac; | ||
826 | |||
827 | ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0, | ||
828 | cksum_key, usage, &our_hmac_obj); | ||
829 | if (ret) | ||
830 | goto out_err; | ||
831 | |||
832 | /* Get the packet's hmac value */ | ||
833 | ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, | ||
834 | pkt_hmac, kctx->gk5e->cksumlength); | ||
835 | if (ret) | ||
836 | goto out_err; | ||
837 | |||
838 | if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { | ||
839 | ret = GSS_S_BAD_SIG; | ||
840 | goto out_err; | ||
841 | } | ||
842 | *headskip = kctx->gk5e->conflen; | ||
843 | *tailskip = kctx->gk5e->cksumlength; | ||
844 | out_err: | ||
845 | if (ret && ret != GSS_S_BAD_SIG) | ||
846 | ret = GSS_S_FAILURE; | ||
847 | return ret; | ||
848 | } | ||
849 | |||
850 | /* | ||
851 | * Compute Kseq given the initial session key and the checksum. | ||
852 | * Set the key of the given cipher. | ||
853 | */ | ||
854 | int | ||
855 | krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher, | ||
856 | unsigned char *cksum) | ||
857 | { | ||
858 | struct crypto_hash *hmac; | ||
859 | struct hash_desc desc; | ||
860 | struct scatterlist sg[1]; | ||
861 | u8 Kseq[GSS_KRB5_MAX_KEYLEN]; | ||
862 | u32 zeroconstant = 0; | ||
863 | int err; | ||
864 | |||
865 | dprintk("%s: entered\n", __func__); | ||
866 | |||
867 | hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); | ||
868 | if (IS_ERR(hmac)) { | ||
869 | dprintk("%s: error %ld, allocating hash '%s'\n", | ||
870 | __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); | ||
871 | return PTR_ERR(hmac); | ||
872 | } | ||
873 | |||
874 | desc.tfm = hmac; | ||
875 | desc.flags = 0; | ||
876 | |||
877 | err = crypto_hash_init(&desc); | ||
878 | if (err) | ||
879 | goto out_err; | ||
880 | |||
881 | /* Compute intermediate Kseq from session key */ | ||
882 | err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); | ||
883 | if (err) | ||
884 | goto out_err; | ||
885 | |||
886 | sg_init_table(sg, 1); | ||
887 | sg_set_buf(sg, &zeroconstant, 4); | ||
888 | |||
889 | err = crypto_hash_digest(&desc, sg, 4, Kseq); | ||
890 | if (err) | ||
891 | goto out_err; | ||
892 | |||
893 | /* Compute final Kseq from the checksum and intermediate Kseq */ | ||
894 | err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength); | ||
895 | if (err) | ||
896 | goto out_err; | ||
897 | |||
898 | sg_set_buf(sg, cksum, 8); | ||
899 | |||
900 | err = crypto_hash_digest(&desc, sg, 8, Kseq); | ||
901 | if (err) | ||
902 | goto out_err; | ||
903 | |||
904 | err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); | ||
905 | if (err) | ||
906 | goto out_err; | ||
907 | |||
908 | err = 0; | ||
909 | |||
910 | out_err: | ||
911 | crypto_free_hash(hmac); | ||
912 | dprintk("%s: returning %d\n", __func__, err); | ||
913 | return err; | ||
914 | } | ||
915 | |||
916 | /* | ||
917 | * Compute Kcrypt given the initial session key and the plaintext seqnum. | ||
918 | * Set the key of cipher kctx->enc. | ||
919 | */ | ||
920 | int | ||
921 | krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher, | ||
922 | s32 seqnum) | ||
923 | { | ||
924 | struct crypto_hash *hmac; | ||
925 | struct hash_desc desc; | ||
926 | struct scatterlist sg[1]; | ||
927 | u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; | ||
928 | u8 zeroconstant[4] = {0}; | ||
929 | u8 seqnumarray[4]; | ||
930 | int err, i; | ||
931 | |||
932 | dprintk("%s: entered, seqnum %u\n", __func__, seqnum); | ||
933 | |||
934 | hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); | ||
935 | if (IS_ERR(hmac)) { | ||
936 | dprintk("%s: error %ld, allocating hash '%s'\n", | ||
937 | __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); | ||
938 | return PTR_ERR(hmac); | ||
939 | } | ||
940 | |||
941 | desc.tfm = hmac; | ||
942 | desc.flags = 0; | ||
943 | |||
944 | err = crypto_hash_init(&desc); | ||
945 | if (err) | ||
946 | goto out_err; | ||
947 | |||
948 | /* Compute intermediate Kcrypt from session key */ | ||
949 | for (i = 0; i < kctx->gk5e->keylength; i++) | ||
950 | Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; | ||
951 | |||
952 | err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); | ||
953 | if (err) | ||
954 | goto out_err; | ||
955 | |||
956 | sg_init_table(sg, 1); | ||
957 | sg_set_buf(sg, zeroconstant, 4); | ||
958 | |||
959 | err = crypto_hash_digest(&desc, sg, 4, Kcrypt); | ||
960 | if (err) | ||
961 | goto out_err; | ||
962 | |||
963 | /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ | ||
964 | err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); | ||
965 | if (err) | ||
966 | goto out_err; | ||
967 | |||
968 | seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); | ||
969 | seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); | ||
970 | seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); | ||
971 | seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); | ||
972 | |||
973 | sg_set_buf(sg, seqnumarray, 4); | ||
974 | |||
975 | err = crypto_hash_digest(&desc, sg, 4, Kcrypt); | ||
976 | if (err) | ||
977 | goto out_err; | ||
978 | |||
979 | err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); | ||
980 | if (err) | ||
981 | goto out_err; | ||
982 | |||
983 | err = 0; | ||
984 | |||
985 | out_err: | ||
986 | crypto_free_hash(hmac); | ||
987 | dprintk("%s: returning %d\n", __func__, err); | ||
988 | return err; | ||
989 | } | ||
990 | |||
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c new file mode 100644 index 000000000000..76e42e6be755 --- /dev/null +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c | |||
@@ -0,0 +1,336 @@ | |||
1 | /* | ||
2 | * COPYRIGHT (c) 2008 | ||
3 | * The Regents of the University of Michigan | ||
4 | * ALL RIGHTS RESERVED | ||
5 | * | ||
6 | * Permission is granted to use, copy, create derivative works | ||
7 | * and redistribute this software and such derivative works | ||
8 | * for any purpose, so long as the name of The University of | ||
9 | * Michigan is not used in any advertising or publicity | ||
10 | * pertaining to the use of distribution of this software | ||
11 | * without specific, written prior authorization. If the | ||
12 | * above copyright notice or any other identification of the | ||
13 | * University of Michigan is included in any copy of any | ||
14 | * portion of this software, then the disclaimer below must | ||
15 | * also be included. | ||
16 | * | ||
17 | * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION | ||
18 | * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY | ||
19 | * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF | ||
20 | * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING | ||
21 | * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF | ||
22 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE | ||
23 | * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE | ||
24 | * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR | ||
25 | * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING | ||
26 | * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN | ||
27 | * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF | ||
28 | * SUCH DAMAGES. | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * Copyright (C) 1998 by the FundsXpress, INC. | ||
33 | * | ||
34 | * All rights reserved. | ||
35 | * | ||
36 | * Export of this software from the United States of America may require | ||
37 | * a specific license from the United States Government. It is the | ||
38 | * responsibility of any person or organization contemplating export to | ||
39 | * obtain such a license before exporting. | ||
40 | * | ||
41 | * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and | ||
42 | * distribute this software and its documentation for any purpose and | ||
43 | * without fee is hereby granted, provided that the above copyright | ||
44 | * notice appear in all copies and that both that copyright notice and | ||
45 | * this permission notice appear in supporting documentation, and that | ||
46 | * the name of FundsXpress. not be used in advertising or publicity pertaining | ||
47 | * to distribution of the software without specific, written prior | ||
48 | * permission. FundsXpress makes no representations about the suitability of | ||
49 | * this software for any purpose. It is provided "as is" without express | ||
50 | * or implied warranty. | ||
51 | * | ||
52 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
53 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
54 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
55 | */ | ||
56 | |||
57 | #include <linux/err.h> | ||
58 | #include <linux/types.h> | ||
59 | #include <linux/crypto.h> | ||
60 | #include <linux/sunrpc/gss_krb5.h> | ||
61 | #include <linux/sunrpc/xdr.h> | ||
62 | |||
63 | #ifdef RPC_DEBUG | ||
64 | # define RPCDBG_FACILITY RPCDBG_AUTH | ||
65 | #endif | ||
66 | |||
67 | /* | ||
68 | * This is the n-fold function as described in rfc3961, sec 5.1 | ||
69 | * Taken from MIT Kerberos and modified. | ||
70 | */ | ||
71 | |||
72 | static void krb5_nfold(u32 inbits, const u8 *in, | ||
73 | u32 outbits, u8 *out) | ||
74 | { | ||
75 | int a, b, c, lcm; | ||
76 | int byte, i, msbit; | ||
77 | |||
78 | /* the code below is more readable if I make these bytes | ||
79 | instead of bits */ | ||
80 | |||
81 | inbits >>= 3; | ||
82 | outbits >>= 3; | ||
83 | |||
84 | /* first compute lcm(n,k) */ | ||
85 | |||
86 | a = outbits; | ||
87 | b = inbits; | ||
88 | |||
89 | while (b != 0) { | ||
90 | c = b; | ||
91 | b = a%b; | ||
92 | a = c; | ||
93 | } | ||
94 | |||
95 | lcm = outbits*inbits/a; | ||
96 | |||
97 | /* now do the real work */ | ||
98 | |||
99 | memset(out, 0, outbits); | ||
100 | byte = 0; | ||
101 | |||
102 | /* this will end up cycling through k lcm(k,n)/k times, which | ||
103 | is correct */ | ||
104 | for (i = lcm-1; i >= 0; i--) { | ||
105 | /* compute the msbit in k which gets added into this byte */ | ||
106 | msbit = ( | ||
107 | /* first, start with the msbit in the first, | ||
108 | * unrotated byte */ | ||
109 | ((inbits << 3) - 1) | ||
110 | /* then, for each byte, shift to the right | ||
111 | * for each repetition */ | ||
112 | + (((inbits << 3) + 13) * (i/inbits)) | ||
113 | /* last, pick out the correct byte within | ||
114 | * that shifted repetition */ | ||
115 | + ((inbits - (i % inbits)) << 3) | ||
116 | ) % (inbits << 3); | ||
117 | |||
118 | /* pull out the byte value itself */ | ||
119 | byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)| | ||
120 | (in[((inbits) - (msbit >> 3)) % inbits])) | ||
121 | >> ((msbit & 7) + 1)) & 0xff; | ||
122 | |||
123 | /* do the addition */ | ||
124 | byte += out[i % outbits]; | ||
125 | out[i % outbits] = byte & 0xff; | ||
126 | |||
127 | /* keep around the carry bit, if any */ | ||
128 | byte >>= 8; | ||
129 | |||
130 | } | ||
131 | |||
132 | /* if there's a carry bit left over, add it back in */ | ||
133 | if (byte) { | ||
134 | for (i = outbits - 1; i >= 0; i--) { | ||
135 | /* do the addition */ | ||
136 | byte += out[i]; | ||
137 | out[i] = byte & 0xff; | ||
138 | |||
139 | /* keep around the carry bit, if any */ | ||
140 | byte >>= 8; | ||
141 | } | ||
142 | } | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * This is the DK (derive_key) function as described in rfc3961, sec 5.1 | ||
147 | * Taken from MIT Kerberos and modified. | ||
148 | */ | ||
149 | |||
150 | u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e, | ||
151 | const struct xdr_netobj *inkey, | ||
152 | struct xdr_netobj *outkey, | ||
153 | const struct xdr_netobj *in_constant, | ||
154 | gfp_t gfp_mask) | ||
155 | { | ||
156 | size_t blocksize, keybytes, keylength, n; | ||
157 | unsigned char *inblockdata, *outblockdata, *rawkey; | ||
158 | struct xdr_netobj inblock, outblock; | ||
159 | struct crypto_blkcipher *cipher; | ||
160 | u32 ret = EINVAL; | ||
161 | |||
162 | blocksize = gk5e->blocksize; | ||
163 | keybytes = gk5e->keybytes; | ||
164 | keylength = gk5e->keylength; | ||
165 | |||
166 | if ((inkey->len != keylength) || (outkey->len != keylength)) | ||
167 | goto err_return; | ||
168 | |||
169 | cipher = crypto_alloc_blkcipher(gk5e->encrypt_name, 0, | ||
170 | CRYPTO_ALG_ASYNC); | ||
171 | if (IS_ERR(cipher)) | ||
172 | goto err_return; | ||
173 | if (crypto_blkcipher_setkey(cipher, inkey->data, inkey->len)) | ||
174 | goto err_return; | ||
175 | |||
176 | /* allocate and set up buffers */ | ||
177 | |||
178 | ret = ENOMEM; | ||
179 | inblockdata = kmalloc(blocksize, gfp_mask); | ||
180 | if (inblockdata == NULL) | ||
181 | goto err_free_cipher; | ||
182 | |||
183 | outblockdata = kmalloc(blocksize, gfp_mask); | ||
184 | if (outblockdata == NULL) | ||
185 | goto err_free_in; | ||
186 | |||
187 | rawkey = kmalloc(keybytes, gfp_mask); | ||
188 | if (rawkey == NULL) | ||
189 | goto err_free_out; | ||
190 | |||
191 | inblock.data = (char *) inblockdata; | ||
192 | inblock.len = blocksize; | ||
193 | |||
194 | outblock.data = (char *) outblockdata; | ||
195 | outblock.len = blocksize; | ||
196 | |||
197 | /* initialize the input block */ | ||
198 | |||
199 | if (in_constant->len == inblock.len) { | ||
200 | memcpy(inblock.data, in_constant->data, inblock.len); | ||
201 | } else { | ||
202 | krb5_nfold(in_constant->len * 8, in_constant->data, | ||
203 | inblock.len * 8, inblock.data); | ||
204 | } | ||
205 | |||
206 | /* loop encrypting the blocks until enough key bytes are generated */ | ||
207 | |||
208 | n = 0; | ||
209 | while (n < keybytes) { | ||
210 | (*(gk5e->encrypt))(cipher, NULL, inblock.data, | ||
211 | outblock.data, inblock.len); | ||
212 | |||
213 | if ((keybytes - n) <= outblock.len) { | ||
214 | memcpy(rawkey + n, outblock.data, (keybytes - n)); | ||
215 | break; | ||
216 | } | ||
217 | |||
218 | memcpy(rawkey + n, outblock.data, outblock.len); | ||
219 | memcpy(inblock.data, outblock.data, outblock.len); | ||
220 | n += outblock.len; | ||
221 | } | ||
222 | |||
223 | /* postprocess the key */ | ||
224 | |||
225 | inblock.data = (char *) rawkey; | ||
226 | inblock.len = keybytes; | ||
227 | |||
228 | BUG_ON(gk5e->mk_key == NULL); | ||
229 | ret = (*(gk5e->mk_key))(gk5e, &inblock, outkey); | ||
230 | if (ret) { | ||
231 | dprintk("%s: got %d from mk_key function for '%s'\n", | ||
232 | __func__, ret, gk5e->encrypt_name); | ||
233 | goto err_free_raw; | ||
234 | } | ||
235 | |||
236 | /* clean memory, free resources and exit */ | ||
237 | |||
238 | ret = 0; | ||
239 | |||
240 | err_free_raw: | ||
241 | memset(rawkey, 0, keybytes); | ||
242 | kfree(rawkey); | ||
243 | err_free_out: | ||
244 | memset(outblockdata, 0, blocksize); | ||
245 | kfree(outblockdata); | ||
246 | err_free_in: | ||
247 | memset(inblockdata, 0, blocksize); | ||
248 | kfree(inblockdata); | ||
249 | err_free_cipher: | ||
250 | crypto_free_blkcipher(cipher); | ||
251 | err_return: | ||
252 | return ret; | ||
253 | } | ||
254 | |||
255 | #define smask(step) ((1<<step)-1) | ||
256 | #define pstep(x, step) (((x)&smask(step))^(((x)>>step)&smask(step))) | ||
257 | #define parity_char(x) pstep(pstep(pstep((x), 4), 2), 1) | ||
258 | |||
259 | static void mit_des_fixup_key_parity(u8 key[8]) | ||
260 | { | ||
261 | int i; | ||
262 | for (i = 0; i < 8; i++) { | ||
263 | key[i] &= 0xfe; | ||
264 | key[i] |= 1^parity_char(key[i]); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * This is the des3 key derivation postprocess function | ||
270 | */ | ||
271 | u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e, | ||
272 | struct xdr_netobj *randombits, | ||
273 | struct xdr_netobj *key) | ||
274 | { | ||
275 | int i; | ||
276 | u32 ret = EINVAL; | ||
277 | |||
278 | if (key->len != 24) { | ||
279 | dprintk("%s: key->len is %d\n", __func__, key->len); | ||
280 | goto err_out; | ||
281 | } | ||
282 | if (randombits->len != 21) { | ||
283 | dprintk("%s: randombits->len is %d\n", | ||
284 | __func__, randombits->len); | ||
285 | goto err_out; | ||
286 | } | ||
287 | |||
288 | /* take the seven bytes, move them around into the top 7 bits of the | ||
289 | 8 key bytes, then compute the parity bits. Do this three times. */ | ||
290 | |||
291 | for (i = 0; i < 3; i++) { | ||
292 | memcpy(key->data + i*8, randombits->data + i*7, 7); | ||
293 | key->data[i*8+7] = (((key->data[i*8]&1)<<1) | | ||
294 | ((key->data[i*8+1]&1)<<2) | | ||
295 | ((key->data[i*8+2]&1)<<3) | | ||
296 | ((key->data[i*8+3]&1)<<4) | | ||
297 | ((key->data[i*8+4]&1)<<5) | | ||
298 | ((key->data[i*8+5]&1)<<6) | | ||
299 | ((key->data[i*8+6]&1)<<7)); | ||
300 | |||
301 | mit_des_fixup_key_parity(key->data + i*8); | ||
302 | } | ||
303 | ret = 0; | ||
304 | err_out: | ||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * This is the aes key derivation postprocess function | ||
310 | */ | ||
311 | u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e, | ||
312 | struct xdr_netobj *randombits, | ||
313 | struct xdr_netobj *key) | ||
314 | { | ||
315 | u32 ret = EINVAL; | ||
316 | |||
317 | if (key->len != 16 && key->len != 32) { | ||
318 | dprintk("%s: key->len is %d\n", __func__, key->len); | ||
319 | goto err_out; | ||
320 | } | ||
321 | if (randombits->len != 16 && randombits->len != 32) { | ||
322 | dprintk("%s: randombits->len is %d\n", | ||
323 | __func__, randombits->len); | ||
324 | goto err_out; | ||
325 | } | ||
326 | if (randombits->len != key->len) { | ||
327 | dprintk("%s: randombits->len is %d, key->len is %d\n", | ||
328 | __func__, randombits->len, key->len); | ||
329 | goto err_out; | ||
330 | } | ||
331 | memcpy(key->data, randombits->data, key->len); | ||
332 | ret = 0; | ||
333 | err_out: | ||
334 | return ret; | ||
335 | } | ||
336 | |||
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 2deb0ed72ff4..032644610524 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/net/sunrpc/gss_krb5_mech.c | 2 | * linux/net/sunrpc/gss_krb5_mech.c |
3 | * | 3 | * |
4 | * Copyright (c) 2001 The Regents of the University of Michigan. | 4 | * Copyright (c) 2001-2008 The Regents of the University of Michigan. |
5 | * All rights reserved. | 5 | * All rights reserved. |
6 | * | 6 | * |
7 | * Andy Adamson <andros@umich.edu> | 7 | * Andy Adamson <andros@umich.edu> |
@@ -48,6 +48,143 @@ | |||
48 | # define RPCDBG_FACILITY RPCDBG_AUTH | 48 | # define RPCDBG_FACILITY RPCDBG_AUTH |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | static struct gss_api_mech gss_kerberos_mech; /* forward declaration */ | ||
52 | |||
53 | static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { | ||
54 | /* | ||
55 | * DES (All DES enctypes are mapped to the same gss functionality) | ||
56 | */ | ||
57 | { | ||
58 | .etype = ENCTYPE_DES_CBC_RAW, | ||
59 | .ctype = CKSUMTYPE_RSA_MD5, | ||
60 | .name = "des-cbc-crc", | ||
61 | .encrypt_name = "cbc(des)", | ||
62 | .cksum_name = "md5", | ||
63 | .encrypt = krb5_encrypt, | ||
64 | .decrypt = krb5_decrypt, | ||
65 | .mk_key = NULL, | ||
66 | .signalg = SGN_ALG_DES_MAC_MD5, | ||
67 | .sealalg = SEAL_ALG_DES, | ||
68 | .keybytes = 7, | ||
69 | .keylength = 8, | ||
70 | .blocksize = 8, | ||
71 | .conflen = 8, | ||
72 | .cksumlength = 8, | ||
73 | .keyed_cksum = 0, | ||
74 | }, | ||
75 | /* | ||
76 | * RC4-HMAC | ||
77 | */ | ||
78 | { | ||
79 | .etype = ENCTYPE_ARCFOUR_HMAC, | ||
80 | .ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR, | ||
81 | .name = "rc4-hmac", | ||
82 | .encrypt_name = "ecb(arc4)", | ||
83 | .cksum_name = "hmac(md5)", | ||
84 | .encrypt = krb5_encrypt, | ||
85 | .decrypt = krb5_decrypt, | ||
86 | .mk_key = NULL, | ||
87 | .signalg = SGN_ALG_HMAC_MD5, | ||
88 | .sealalg = SEAL_ALG_MICROSOFT_RC4, | ||
89 | .keybytes = 16, | ||
90 | .keylength = 16, | ||
91 | .blocksize = 1, | ||
92 | .conflen = 8, | ||
93 | .cksumlength = 8, | ||
94 | .keyed_cksum = 1, | ||
95 | }, | ||
96 | /* | ||
97 | * 3DES | ||
98 | */ | ||
99 | { | ||
100 | .etype = ENCTYPE_DES3_CBC_RAW, | ||
101 | .ctype = CKSUMTYPE_HMAC_SHA1_DES3, | ||
102 | .name = "des3-hmac-sha1", | ||
103 | .encrypt_name = "cbc(des3_ede)", | ||
104 | .cksum_name = "hmac(sha1)", | ||
105 | .encrypt = krb5_encrypt, | ||
106 | .decrypt = krb5_decrypt, | ||
107 | .mk_key = gss_krb5_des3_make_key, | ||
108 | .signalg = SGN_ALG_HMAC_SHA1_DES3_KD, | ||
109 | .sealalg = SEAL_ALG_DES3KD, | ||
110 | .keybytes = 21, | ||
111 | .keylength = 24, | ||
112 | .blocksize = 8, | ||
113 | .conflen = 8, | ||
114 | .cksumlength = 20, | ||
115 | .keyed_cksum = 1, | ||
116 | }, | ||
117 | /* | ||
118 | * AES128 | ||
119 | */ | ||
120 | { | ||
121 | .etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96, | ||
122 | .ctype = CKSUMTYPE_HMAC_SHA1_96_AES128, | ||
123 | .name = "aes128-cts", | ||
124 | .encrypt_name = "cts(cbc(aes))", | ||
125 | .cksum_name = "hmac(sha1)", | ||
126 | .encrypt = krb5_encrypt, | ||
127 | .decrypt = krb5_decrypt, | ||
128 | .mk_key = gss_krb5_aes_make_key, | ||
129 | .encrypt_v2 = gss_krb5_aes_encrypt, | ||
130 | .decrypt_v2 = gss_krb5_aes_decrypt, | ||
131 | .signalg = -1, | ||
132 | .sealalg = -1, | ||
133 | .keybytes = 16, | ||
134 | .keylength = 16, | ||
135 | .blocksize = 16, | ||
136 | .conflen = 16, | ||
137 | .cksumlength = 12, | ||
138 | .keyed_cksum = 1, | ||
139 | }, | ||
140 | /* | ||
141 | * AES256 | ||
142 | */ | ||
143 | { | ||
144 | .etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96, | ||
145 | .ctype = CKSUMTYPE_HMAC_SHA1_96_AES256, | ||
146 | .name = "aes256-cts", | ||
147 | .encrypt_name = "cts(cbc(aes))", | ||
148 | .cksum_name = "hmac(sha1)", | ||
149 | .encrypt = krb5_encrypt, | ||
150 | .decrypt = krb5_decrypt, | ||
151 | .mk_key = gss_krb5_aes_make_key, | ||
152 | .encrypt_v2 = gss_krb5_aes_encrypt, | ||
153 | .decrypt_v2 = gss_krb5_aes_decrypt, | ||
154 | .signalg = -1, | ||
155 | .sealalg = -1, | ||
156 | .keybytes = 32, | ||
157 | .keylength = 32, | ||
158 | .blocksize = 16, | ||
159 | .conflen = 16, | ||
160 | .cksumlength = 12, | ||
161 | .keyed_cksum = 1, | ||
162 | }, | ||
163 | }; | ||
164 | |||
165 | static const int num_supported_enctypes = | ||
166 | ARRAY_SIZE(supported_gss_krb5_enctypes); | ||
167 | |||
168 | static int | ||
169 | supported_gss_krb5_enctype(int etype) | ||
170 | { | ||
171 | int i; | ||
172 | for (i = 0; i < num_supported_enctypes; i++) | ||
173 | if (supported_gss_krb5_enctypes[i].etype == etype) | ||
174 | return 1; | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | static const struct gss_krb5_enctype * | ||
179 | get_gss_krb5_enctype(int etype) | ||
180 | { | ||
181 | int i; | ||
182 | for (i = 0; i < num_supported_enctypes; i++) | ||
183 | if (supported_gss_krb5_enctypes[i].etype == etype) | ||
184 | return &supported_gss_krb5_enctypes[i]; | ||
185 | return NULL; | ||
186 | } | ||
187 | |||
51 | static const void * | 188 | static const void * |
52 | simple_get_bytes(const void *p, const void *end, void *res, int len) | 189 | simple_get_bytes(const void *p, const void *end, void *res, int len) |
53 | { | 190 | { |
@@ -78,35 +215,45 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) | |||
78 | } | 215 | } |
79 | 216 | ||
80 | static inline const void * | 217 | static inline const void * |
81 | get_key(const void *p, const void *end, struct crypto_blkcipher **res) | 218 | get_key(const void *p, const void *end, |
219 | struct krb5_ctx *ctx, struct crypto_blkcipher **res) | ||
82 | { | 220 | { |
83 | struct xdr_netobj key; | 221 | struct xdr_netobj key; |
84 | int alg; | 222 | int alg; |
85 | char *alg_name; | ||
86 | 223 | ||
87 | p = simple_get_bytes(p, end, &alg, sizeof(alg)); | 224 | p = simple_get_bytes(p, end, &alg, sizeof(alg)); |
88 | if (IS_ERR(p)) | 225 | if (IS_ERR(p)) |
89 | goto out_err; | 226 | goto out_err; |
227 | |||
228 | switch (alg) { | ||
229 | case ENCTYPE_DES_CBC_CRC: | ||
230 | case ENCTYPE_DES_CBC_MD4: | ||
231 | case ENCTYPE_DES_CBC_MD5: | ||
232 | /* Map all these key types to ENCTYPE_DES_CBC_RAW */ | ||
233 | alg = ENCTYPE_DES_CBC_RAW; | ||
234 | break; | ||
235 | } | ||
236 | |||
237 | if (!supported_gss_krb5_enctype(alg)) { | ||
238 | printk(KERN_WARNING "gss_kerberos_mech: unsupported " | ||
239 | "encryption key algorithm %d\n", alg); | ||
240 | goto out_err; | ||
241 | } | ||
90 | p = simple_get_netobj(p, end, &key); | 242 | p = simple_get_netobj(p, end, &key); |
91 | if (IS_ERR(p)) | 243 | if (IS_ERR(p)) |
92 | goto out_err; | 244 | goto out_err; |
93 | 245 | ||
94 | switch (alg) { | 246 | *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, |
95 | case ENCTYPE_DES_CBC_RAW: | 247 | CRYPTO_ALG_ASYNC); |
96 | alg_name = "cbc(des)"; | ||
97 | break; | ||
98 | default: | ||
99 | printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); | ||
100 | goto out_err_free_key; | ||
101 | } | ||
102 | *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); | ||
103 | if (IS_ERR(*res)) { | 248 | if (IS_ERR(*res)) { |
104 | printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); | 249 | printk(KERN_WARNING "gss_kerberos_mech: unable to initialize " |
250 | "crypto algorithm %s\n", ctx->gk5e->encrypt_name); | ||
105 | *res = NULL; | 251 | *res = NULL; |
106 | goto out_err_free_key; | 252 | goto out_err_free_key; |
107 | } | 253 | } |
108 | if (crypto_blkcipher_setkey(*res, key.data, key.len)) { | 254 | if (crypto_blkcipher_setkey(*res, key.data, key.len)) { |
109 | printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); | 255 | printk(KERN_WARNING "gss_kerberos_mech: error setting key for " |
256 | "crypto algorithm %s\n", ctx->gk5e->encrypt_name); | ||
110 | goto out_err_free_tfm; | 257 | goto out_err_free_tfm; |
111 | } | 258 | } |
112 | 259 | ||
@@ -123,56 +270,55 @@ out_err: | |||
123 | } | 270 | } |
124 | 271 | ||
125 | static int | 272 | static int |
126 | gss_import_sec_context_kerberos(const void *p, | 273 | gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) |
127 | size_t len, | ||
128 | struct gss_ctx *ctx_id) | ||
129 | { | 274 | { |
130 | const void *end = (const void *)((const char *)p + len); | ||
131 | struct krb5_ctx *ctx; | ||
132 | int tmp; | 275 | int tmp; |
133 | 276 | ||
134 | if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) { | ||
135 | p = ERR_PTR(-ENOMEM); | ||
136 | goto out_err; | ||
137 | } | ||
138 | |||
139 | p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); | 277 | p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); |
140 | if (IS_ERR(p)) | 278 | if (IS_ERR(p)) |
141 | goto out_err_free_ctx; | 279 | goto out_err; |
280 | |||
281 | /* Old format supports only DES! Any other enctype uses new format */ | ||
282 | ctx->enctype = ENCTYPE_DES_CBC_RAW; | ||
283 | |||
284 | ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); | ||
285 | if (ctx->gk5e == NULL) | ||
286 | goto out_err; | ||
287 | |||
142 | /* The downcall format was designed before we completely understood | 288 | /* The downcall format was designed before we completely understood |
143 | * the uses of the context fields; so it includes some stuff we | 289 | * the uses of the context fields; so it includes some stuff we |
144 | * just give some minimal sanity-checking, and some we ignore | 290 | * just give some minimal sanity-checking, and some we ignore |
145 | * completely (like the next twenty bytes): */ | 291 | * completely (like the next twenty bytes): */ |
146 | if (unlikely(p + 20 > end || p + 20 < p)) | 292 | if (unlikely(p + 20 > end || p + 20 < p)) |
147 | goto out_err_free_ctx; | 293 | goto out_err; |
148 | p += 20; | 294 | p += 20; |
149 | p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); | 295 | p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); |
150 | if (IS_ERR(p)) | 296 | if (IS_ERR(p)) |
151 | goto out_err_free_ctx; | 297 | goto out_err; |
152 | if (tmp != SGN_ALG_DES_MAC_MD5) { | 298 | if (tmp != SGN_ALG_DES_MAC_MD5) { |
153 | p = ERR_PTR(-ENOSYS); | 299 | p = ERR_PTR(-ENOSYS); |
154 | goto out_err_free_ctx; | 300 | goto out_err; |
155 | } | 301 | } |
156 | p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); | 302 | p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); |
157 | if (IS_ERR(p)) | 303 | if (IS_ERR(p)) |
158 | goto out_err_free_ctx; | 304 | goto out_err; |
159 | if (tmp != SEAL_ALG_DES) { | 305 | if (tmp != SEAL_ALG_DES) { |
160 | p = ERR_PTR(-ENOSYS); | 306 | p = ERR_PTR(-ENOSYS); |
161 | goto out_err_free_ctx; | 307 | goto out_err; |
162 | } | 308 | } |
163 | p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); | 309 | p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); |
164 | if (IS_ERR(p)) | 310 | if (IS_ERR(p)) |
165 | goto out_err_free_ctx; | 311 | goto out_err; |
166 | p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); | 312 | p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); |
167 | if (IS_ERR(p)) | 313 | if (IS_ERR(p)) |
168 | goto out_err_free_ctx; | 314 | goto out_err; |
169 | p = simple_get_netobj(p, end, &ctx->mech_used); | 315 | p = simple_get_netobj(p, end, &ctx->mech_used); |
170 | if (IS_ERR(p)) | 316 | if (IS_ERR(p)) |
171 | goto out_err_free_ctx; | 317 | goto out_err; |
172 | p = get_key(p, end, &ctx->enc); | 318 | p = get_key(p, end, ctx, &ctx->enc); |
173 | if (IS_ERR(p)) | 319 | if (IS_ERR(p)) |
174 | goto out_err_free_mech; | 320 | goto out_err_free_mech; |
175 | p = get_key(p, end, &ctx->seq); | 321 | p = get_key(p, end, ctx, &ctx->seq); |
176 | if (IS_ERR(p)) | 322 | if (IS_ERR(p)) |
177 | goto out_err_free_key1; | 323 | goto out_err_free_key1; |
178 | if (p != end) { | 324 | if (p != end) { |
@@ -180,9 +326,6 @@ gss_import_sec_context_kerberos(const void *p, | |||
180 | goto out_err_free_key2; | 326 | goto out_err_free_key2; |
181 | } | 327 | } |
182 | 328 | ||
183 | ctx_id->internal_ctx_id = ctx; | ||
184 | |||
185 | dprintk("RPC: Successfully imported new context.\n"); | ||
186 | return 0; | 329 | return 0; |
187 | 330 | ||
188 | out_err_free_key2: | 331 | out_err_free_key2: |
@@ -191,18 +334,378 @@ out_err_free_key1: | |||
191 | crypto_free_blkcipher(ctx->enc); | 334 | crypto_free_blkcipher(ctx->enc); |
192 | out_err_free_mech: | 335 | out_err_free_mech: |
193 | kfree(ctx->mech_used.data); | 336 | kfree(ctx->mech_used.data); |
194 | out_err_free_ctx: | ||
195 | kfree(ctx); | ||
196 | out_err: | 337 | out_err: |
197 | return PTR_ERR(p); | 338 | return PTR_ERR(p); |
198 | } | 339 | } |
199 | 340 | ||
341 | struct crypto_blkcipher * | ||
342 | context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) | ||
343 | { | ||
344 | struct crypto_blkcipher *cp; | ||
345 | |||
346 | cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC); | ||
347 | if (IS_ERR(cp)) { | ||
348 | dprintk("gss_kerberos_mech: unable to initialize " | ||
349 | "crypto algorithm %s\n", cname); | ||
350 | return NULL; | ||
351 | } | ||
352 | if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) { | ||
353 | dprintk("gss_kerberos_mech: error setting key for " | ||
354 | "crypto algorithm %s\n", cname); | ||
355 | crypto_free_blkcipher(cp); | ||
356 | return NULL; | ||
357 | } | ||
358 | return cp; | ||
359 | } | ||
360 | |||
361 | static inline void | ||
362 | set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed) | ||
363 | { | ||
364 | cdata[0] = (usage>>24)&0xff; | ||
365 | cdata[1] = (usage>>16)&0xff; | ||
366 | cdata[2] = (usage>>8)&0xff; | ||
367 | cdata[3] = usage&0xff; | ||
368 | cdata[4] = seed; | ||
369 | } | ||
370 | |||
371 | static int | ||
372 | context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) | ||
373 | { | ||
374 | struct xdr_netobj c, keyin, keyout; | ||
375 | u8 cdata[GSS_KRB5_K5CLENGTH]; | ||
376 | u32 err; | ||
377 | |||
378 | c.len = GSS_KRB5_K5CLENGTH; | ||
379 | c.data = cdata; | ||
380 | |||
381 | keyin.data = ctx->Ksess; | ||
382 | keyin.len = ctx->gk5e->keylength; | ||
383 | keyout.len = ctx->gk5e->keylength; | ||
384 | |||
385 | /* seq uses the raw key */ | ||
386 | ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, | ||
387 | ctx->Ksess); | ||
388 | if (ctx->seq == NULL) | ||
389 | goto out_err; | ||
390 | |||
391 | ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, | ||
392 | ctx->Ksess); | ||
393 | if (ctx->enc == NULL) | ||
394 | goto out_free_seq; | ||
395 | |||
396 | /* derive cksum */ | ||
397 | set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM); | ||
398 | keyout.data = ctx->cksum; | ||
399 | err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); | ||
400 | if (err) { | ||
401 | dprintk("%s: Error %d deriving cksum key\n", | ||
402 | __func__, err); | ||
403 | goto out_free_enc; | ||
404 | } | ||
405 | |||
406 | return 0; | ||
407 | |||
408 | out_free_enc: | ||
409 | crypto_free_blkcipher(ctx->enc); | ||
410 | out_free_seq: | ||
411 | crypto_free_blkcipher(ctx->seq); | ||
412 | out_err: | ||
413 | return -EINVAL; | ||
414 | } | ||
415 | |||
416 | /* | ||
417 | * Note that RC4 depends on deriving keys using the sequence | ||
418 | * number or the checksum of a token. Therefore, the final keys | ||
419 | * cannot be calculated until the token is being constructed! | ||
420 | */ | ||
421 | static int | ||
422 | context_derive_keys_rc4(struct krb5_ctx *ctx) | ||
423 | { | ||
424 | struct crypto_hash *hmac; | ||
425 | char sigkeyconstant[] = "signaturekey"; | ||
426 | int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ | ||
427 | struct hash_desc desc; | ||
428 | struct scatterlist sg[1]; | ||
429 | int err; | ||
430 | |||
431 | dprintk("RPC: %s: entered\n", __func__); | ||
432 | /* | ||
433 | * derive cksum (aka Ksign) key | ||
434 | */ | ||
435 | hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); | ||
436 | if (IS_ERR(hmac)) { | ||
437 | dprintk("%s: error %ld allocating hash '%s'\n", | ||
438 | __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name); | ||
439 | err = PTR_ERR(hmac); | ||
440 | goto out_err; | ||
441 | } | ||
442 | |||
443 | err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength); | ||
444 | if (err) | ||
445 | goto out_err_free_hmac; | ||
446 | |||
447 | sg_init_table(sg, 1); | ||
448 | sg_set_buf(sg, sigkeyconstant, slen); | ||
449 | |||
450 | desc.tfm = hmac; | ||
451 | desc.flags = 0; | ||
452 | |||
453 | err = crypto_hash_init(&desc); | ||
454 | if (err) | ||
455 | goto out_err_free_hmac; | ||
456 | |||
457 | err = crypto_hash_digest(&desc, sg, slen, ctx->cksum); | ||
458 | if (err) | ||
459 | goto out_err_free_hmac; | ||
460 | /* | ||
461 | * allocate hash, and blkciphers for data and seqnum encryption | ||
462 | */ | ||
463 | ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, | ||
464 | CRYPTO_ALG_ASYNC); | ||
465 | if (IS_ERR(ctx->enc)) { | ||
466 | err = PTR_ERR(ctx->enc); | ||
467 | goto out_err_free_hmac; | ||
468 | } | ||
469 | |||
470 | ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, | ||
471 | CRYPTO_ALG_ASYNC); | ||
472 | if (IS_ERR(ctx->seq)) { | ||
473 | crypto_free_blkcipher(ctx->enc); | ||
474 | err = PTR_ERR(ctx->seq); | ||
475 | goto out_err_free_hmac; | ||
476 | } | ||
477 | |||
478 | dprintk("RPC: %s: returning success\n", __func__); | ||
479 | |||
480 | err = 0; | ||
481 | |||
482 | out_err_free_hmac: | ||
483 | crypto_free_hash(hmac); | ||
484 | out_err: | ||
485 | dprintk("RPC: %s: returning %d\n", __func__, err); | ||
486 | return err; | ||
487 | } | ||
488 | |||
489 | static int | ||
490 | context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) | ||
491 | { | ||
492 | struct xdr_netobj c, keyin, keyout; | ||
493 | u8 cdata[GSS_KRB5_K5CLENGTH]; | ||
494 | u32 err; | ||
495 | |||
496 | c.len = GSS_KRB5_K5CLENGTH; | ||
497 | c.data = cdata; | ||
498 | |||
499 | keyin.data = ctx->Ksess; | ||
500 | keyin.len = ctx->gk5e->keylength; | ||
501 | keyout.len = ctx->gk5e->keylength; | ||
502 | |||
503 | /* initiator seal encryption */ | ||
504 | set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); | ||
505 | keyout.data = ctx->initiator_seal; | ||
506 | err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); | ||
507 | if (err) { | ||
508 | dprintk("%s: Error %d deriving initiator_seal key\n", | ||
509 | __func__, err); | ||
510 | goto out_err; | ||
511 | } | ||
512 | ctx->initiator_enc = context_v2_alloc_cipher(ctx, | ||
513 | ctx->gk5e->encrypt_name, | ||
514 | ctx->initiator_seal); | ||
515 | if (ctx->initiator_enc == NULL) | ||
516 | goto out_err; | ||
517 | |||
518 | /* acceptor seal encryption */ | ||
519 | set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); | ||
520 | keyout.data = ctx->acceptor_seal; | ||
521 | err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); | ||
522 | if (err) { | ||
523 | dprintk("%s: Error %d deriving acceptor_seal key\n", | ||
524 | __func__, err); | ||
525 | goto out_free_initiator_enc; | ||
526 | } | ||
527 | ctx->acceptor_enc = context_v2_alloc_cipher(ctx, | ||
528 | ctx->gk5e->encrypt_name, | ||
529 | ctx->acceptor_seal); | ||
530 | if (ctx->acceptor_enc == NULL) | ||
531 | goto out_free_initiator_enc; | ||
532 | |||
533 | /* initiator sign checksum */ | ||
534 | set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM); | ||
535 | keyout.data = ctx->initiator_sign; | ||
536 | err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); | ||
537 | if (err) { | ||
538 | dprintk("%s: Error %d deriving initiator_sign key\n", | ||
539 | __func__, err); | ||
540 | goto out_free_acceptor_enc; | ||
541 | } | ||
542 | |||
543 | /* acceptor sign checksum */ | ||
544 | set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM); | ||
545 | keyout.data = ctx->acceptor_sign; | ||
546 | err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); | ||
547 | if (err) { | ||
548 | dprintk("%s: Error %d deriving acceptor_sign key\n", | ||
549 | __func__, err); | ||
550 | goto out_free_acceptor_enc; | ||
551 | } | ||
552 | |||
553 | /* initiator seal integrity */ | ||
554 | set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY); | ||
555 | keyout.data = ctx->initiator_integ; | ||
556 | err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); | ||
557 | if (err) { | ||
558 | dprintk("%s: Error %d deriving initiator_integ key\n", | ||
559 | __func__, err); | ||
560 | goto out_free_acceptor_enc; | ||
561 | } | ||
562 | |||
563 | /* acceptor seal integrity */ | ||
564 | set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY); | ||
565 | keyout.data = ctx->acceptor_integ; | ||
566 | err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); | ||
567 | if (err) { | ||
568 | dprintk("%s: Error %d deriving acceptor_integ key\n", | ||
569 | __func__, err); | ||
570 | goto out_free_acceptor_enc; | ||
571 | } | ||
572 | |||
573 | switch (ctx->enctype) { | ||
574 | case ENCTYPE_AES128_CTS_HMAC_SHA1_96: | ||
575 | case ENCTYPE_AES256_CTS_HMAC_SHA1_96: | ||
576 | ctx->initiator_enc_aux = | ||
577 | context_v2_alloc_cipher(ctx, "cbc(aes)", | ||
578 | ctx->initiator_seal); | ||
579 | if (ctx->initiator_enc_aux == NULL) | ||
580 | goto out_free_acceptor_enc; | ||
581 | ctx->acceptor_enc_aux = | ||
582 | context_v2_alloc_cipher(ctx, "cbc(aes)", | ||
583 | ctx->acceptor_seal); | ||
584 | if (ctx->acceptor_enc_aux == NULL) { | ||
585 | crypto_free_blkcipher(ctx->initiator_enc_aux); | ||
586 | goto out_free_acceptor_enc; | ||
587 | } | ||
588 | } | ||
589 | |||
590 | return 0; | ||
591 | |||
592 | out_free_acceptor_enc: | ||
593 | crypto_free_blkcipher(ctx->acceptor_enc); | ||
594 | out_free_initiator_enc: | ||
595 | crypto_free_blkcipher(ctx->initiator_enc); | ||
596 | out_err: | ||
597 | return -EINVAL; | ||
598 | } | ||
599 | |||
600 | static int | ||
601 | gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, | ||
602 | gfp_t gfp_mask) | ||
603 | { | ||
604 | int keylen; | ||
605 | |||
606 | p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); | ||
607 | if (IS_ERR(p)) | ||
608 | goto out_err; | ||
609 | ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR; | ||
610 | |||
611 | p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); | ||
612 | if (IS_ERR(p)) | ||
613 | goto out_err; | ||
614 | p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64)); | ||
615 | if (IS_ERR(p)) | ||
616 | goto out_err; | ||
617 | /* set seq_send for use by "older" enctypes */ | ||
618 | ctx->seq_send = ctx->seq_send64; | ||
619 | if (ctx->seq_send64 != ctx->seq_send) { | ||
620 | dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, | ||
621 | (long unsigned)ctx->seq_send64, ctx->seq_send); | ||
622 | goto out_err; | ||
623 | } | ||
624 | p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); | ||
625 | if (IS_ERR(p)) | ||
626 | goto out_err; | ||
627 | /* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */ | ||
628 | if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1) | ||
629 | ctx->enctype = ENCTYPE_DES3_CBC_RAW; | ||
630 | ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); | ||
631 | if (ctx->gk5e == NULL) { | ||
632 | dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n", | ||
633 | ctx->enctype); | ||
634 | p = ERR_PTR(-EINVAL); | ||
635 | goto out_err; | ||
636 | } | ||
637 | keylen = ctx->gk5e->keylength; | ||
638 | |||
639 | p = simple_get_bytes(p, end, ctx->Ksess, keylen); | ||
640 | if (IS_ERR(p)) | ||
641 | goto out_err; | ||
642 | |||
643 | if (p != end) { | ||
644 | p = ERR_PTR(-EINVAL); | ||
645 | goto out_err; | ||
646 | } | ||
647 | |||
648 | ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data, | ||
649 | gss_kerberos_mech.gm_oid.len, gfp_mask); | ||
650 | if (unlikely(ctx->mech_used.data == NULL)) { | ||
651 | p = ERR_PTR(-ENOMEM); | ||
652 | goto out_err; | ||
653 | } | ||
654 | ctx->mech_used.len = gss_kerberos_mech.gm_oid.len; | ||
655 | |||
656 | switch (ctx->enctype) { | ||
657 | case ENCTYPE_DES3_CBC_RAW: | ||
658 | return context_derive_keys_des3(ctx, gfp_mask); | ||
659 | case ENCTYPE_ARCFOUR_HMAC: | ||
660 | return context_derive_keys_rc4(ctx); | ||
661 | case ENCTYPE_AES128_CTS_HMAC_SHA1_96: | ||
662 | case ENCTYPE_AES256_CTS_HMAC_SHA1_96: | ||
663 | return context_derive_keys_new(ctx, gfp_mask); | ||
664 | default: | ||
665 | return -EINVAL; | ||
666 | } | ||
667 | |||
668 | out_err: | ||
669 | return PTR_ERR(p); | ||
670 | } | ||
671 | |||
672 | static int | ||
673 | gss_import_sec_context_kerberos(const void *p, size_t len, | ||
674 | struct gss_ctx *ctx_id, | ||
675 | gfp_t gfp_mask) | ||
676 | { | ||
677 | const void *end = (const void *)((const char *)p + len); | ||
678 | struct krb5_ctx *ctx; | ||
679 | int ret; | ||
680 | |||
681 | ctx = kzalloc(sizeof(*ctx), gfp_mask); | ||
682 | if (ctx == NULL) | ||
683 | return -ENOMEM; | ||
684 | |||
685 | if (len == 85) | ||
686 | ret = gss_import_v1_context(p, end, ctx); | ||
687 | else | ||
688 | ret = gss_import_v2_context(p, end, ctx, gfp_mask); | ||
689 | |||
690 | if (ret == 0) | ||
691 | ctx_id->internal_ctx_id = ctx; | ||
692 | else | ||
693 | kfree(ctx); | ||
694 | |||
695 | dprintk("RPC: %s: returning %d\n", __func__, ret); | ||
696 | return ret; | ||
697 | } | ||
698 | |||
200 | static void | 699 | static void |
201 | gss_delete_sec_context_kerberos(void *internal_ctx) { | 700 | gss_delete_sec_context_kerberos(void *internal_ctx) { |
202 | struct krb5_ctx *kctx = internal_ctx; | 701 | struct krb5_ctx *kctx = internal_ctx; |
203 | 702 | ||
204 | crypto_free_blkcipher(kctx->seq); | 703 | crypto_free_blkcipher(kctx->seq); |
205 | crypto_free_blkcipher(kctx->enc); | 704 | crypto_free_blkcipher(kctx->enc); |
705 | crypto_free_blkcipher(kctx->acceptor_enc); | ||
706 | crypto_free_blkcipher(kctx->initiator_enc); | ||
707 | crypto_free_blkcipher(kctx->acceptor_enc_aux); | ||
708 | crypto_free_blkcipher(kctx->initiator_enc_aux); | ||
206 | kfree(kctx->mech_used.data); | 709 | kfree(kctx->mech_used.data); |
207 | kfree(kctx); | 710 | kfree(kctx); |
208 | } | 711 | } |
@@ -241,6 +744,7 @@ static struct gss_api_mech gss_kerberos_mech = { | |||
241 | .gm_ops = &gss_kerberos_ops, | 744 | .gm_ops = &gss_kerberos_ops, |
242 | .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), | 745 | .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), |
243 | .gm_pfs = gss_kerberos_pfs, | 746 | .gm_pfs = gss_kerberos_pfs, |
747 | .gm_upcall_enctypes = "enctypes=18,17,16,23,3,1,2 ", | ||
244 | }; | 748 | }; |
245 | 749 | ||
246 | static int __init init_kerberos_module(void) | 750 | static int __init init_kerberos_module(void) |
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 88fe6e75ed7e..d7941eab7796 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5seal.c | 4 | * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5seal.c |
5 | * | 5 | * |
6 | * Copyright (c) 2000 The Regents of the University of Michigan. | 6 | * Copyright (c) 2000-2008 The Regents of the University of Michigan. |
7 | * All rights reserved. | 7 | * All rights reserved. |
8 | * | 8 | * |
9 | * Andy Adamson <andros@umich.edu> | 9 | * Andy Adamson <andros@umich.edu> |
@@ -70,53 +70,154 @@ | |||
70 | 70 | ||
71 | DEFINE_SPINLOCK(krb5_seq_lock); | 71 | DEFINE_SPINLOCK(krb5_seq_lock); |
72 | 72 | ||
73 | u32 | 73 | static char * |
74 | gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, | 74 | setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token) |
75 | { | ||
76 | __be16 *ptr, *krb5_hdr; | ||
77 | int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; | ||
78 | |||
79 | token->len = g_token_size(&ctx->mech_used, body_size); | ||
80 | |||
81 | ptr = (__be16 *)token->data; | ||
82 | g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr); | ||
83 | |||
84 | /* ptr now at start of header described in rfc 1964, section 1.2.1: */ | ||
85 | krb5_hdr = ptr; | ||
86 | *ptr++ = KG_TOK_MIC_MSG; | ||
87 | *ptr++ = cpu_to_le16(ctx->gk5e->signalg); | ||
88 | *ptr++ = SEAL_ALG_NONE; | ||
89 | *ptr++ = 0xffff; | ||
90 | |||
91 | return (char *)krb5_hdr; | ||
92 | } | ||
93 | |||
94 | static void * | ||
95 | setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) | ||
96 | { | ||
97 | __be16 *ptr, *krb5_hdr; | ||
98 | u8 *p, flags = 0x00; | ||
99 | |||
100 | if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) | ||
101 | flags |= 0x01; | ||
102 | if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) | ||
103 | flags |= 0x04; | ||
104 | |||
105 | /* Per rfc 4121, sec 4.2.6.1, there is no header, | ||
106 | * just start the token */ | ||
107 | krb5_hdr = ptr = (__be16 *)token->data; | ||
108 | |||
109 | *ptr++ = KG2_TOK_MIC; | ||
110 | p = (u8 *)ptr; | ||
111 | *p++ = flags; | ||
112 | *p++ = 0xff; | ||
113 | ptr = (__be16 *)p; | ||
114 | *ptr++ = 0xffff; | ||
115 | *ptr++ = 0xffff; | ||
116 | |||
117 | token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; | ||
118 | return krb5_hdr; | ||
119 | } | ||
120 | |||
121 | static u32 | ||
122 | gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, | ||
75 | struct xdr_netobj *token) | 123 | struct xdr_netobj *token) |
76 | { | 124 | { |
77 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; | 125 | char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; |
78 | char cksumdata[16]; | 126 | struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), |
79 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 127 | .data = cksumdata}; |
80 | unsigned char *ptr, *msg_start; | 128 | void *ptr; |
81 | s32 now; | 129 | s32 now; |
82 | u32 seq_send; | 130 | u32 seq_send; |
131 | u8 *cksumkey; | ||
83 | 132 | ||
84 | dprintk("RPC: gss_krb5_seal\n"); | 133 | dprintk("RPC: %s\n", __func__); |
85 | BUG_ON(ctx == NULL); | 134 | BUG_ON(ctx == NULL); |
86 | 135 | ||
87 | now = get_seconds(); | 136 | now = get_seconds(); |
88 | 137 | ||
89 | token->len = g_token_size(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8); | 138 | ptr = setup_token(ctx, token); |
90 | 139 | ||
91 | ptr = token->data; | 140 | if (ctx->gk5e->keyed_cksum) |
92 | g_make_token_header(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8, &ptr); | 141 | cksumkey = ctx->cksum; |
142 | else | ||
143 | cksumkey = NULL; | ||
93 | 144 | ||
94 | /* ptr now at header described in rfc 1964, section 1.2.1: */ | 145 | if (make_checksum(ctx, ptr, 8, text, 0, cksumkey, |
95 | ptr[0] = (unsigned char) ((KG_TOK_MIC_MSG >> 8) & 0xff); | 146 | KG_USAGE_SIGN, &md5cksum)) |
96 | ptr[1] = (unsigned char) (KG_TOK_MIC_MSG & 0xff); | 147 | return GSS_S_FAILURE; |
97 | 148 | ||
98 | msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8; | 149 | memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); |
99 | 150 | ||
100 | *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); | 151 | spin_lock(&krb5_seq_lock); |
101 | memset(ptr + 4, 0xff, 4); | 152 | seq_send = ctx->seq_send++; |
153 | spin_unlock(&krb5_seq_lock); | ||
102 | 154 | ||
103 | if (make_checksum("md5", ptr, 8, text, 0, &md5cksum)) | 155 | if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff, |
156 | seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)) | ||
104 | return GSS_S_FAILURE; | 157 | return GSS_S_FAILURE; |
105 | 158 | ||
106 | if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, | 159 | return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; |
107 | md5cksum.data, md5cksum.len)) | 160 | } |
108 | return GSS_S_FAILURE; | 161 | |
162 | u32 | ||
163 | gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, | ||
164 | struct xdr_netobj *token) | ||
165 | { | ||
166 | char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; | ||
167 | struct xdr_netobj cksumobj = { .len = sizeof(cksumdata), | ||
168 | .data = cksumdata}; | ||
169 | void *krb5_hdr; | ||
170 | s32 now; | ||
171 | u64 seq_send; | ||
172 | u8 *cksumkey; | ||
173 | unsigned int cksum_usage; | ||
109 | 174 | ||
110 | memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); | 175 | dprintk("RPC: %s\n", __func__); |
111 | 176 | ||
177 | krb5_hdr = setup_token_v2(ctx, token); | ||
178 | |||
179 | /* Set up the sequence number. Now 64-bits in clear | ||
180 | * text and w/o direction indicator */ | ||
112 | spin_lock(&krb5_seq_lock); | 181 | spin_lock(&krb5_seq_lock); |
113 | seq_send = ctx->seq_send++; | 182 | seq_send = ctx->seq_send64++; |
114 | spin_unlock(&krb5_seq_lock); | 183 | spin_unlock(&krb5_seq_lock); |
115 | 184 | *((u64 *)(krb5_hdr + 8)) = cpu_to_be64(seq_send); | |
116 | if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, | 185 | |
117 | seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, | 186 | if (ctx->initiate) { |
118 | ptr + 8)) | 187 | cksumkey = ctx->initiator_sign; |
188 | cksum_usage = KG_USAGE_INITIATOR_SIGN; | ||
189 | } else { | ||
190 | cksumkey = ctx->acceptor_sign; | ||
191 | cksum_usage = KG_USAGE_ACCEPTOR_SIGN; | ||
192 | } | ||
193 | |||
194 | if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN, | ||
195 | text, 0, cksumkey, cksum_usage, &cksumobj)) | ||
119 | return GSS_S_FAILURE; | 196 | return GSS_S_FAILURE; |
120 | 197 | ||
198 | memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len); | ||
199 | |||
200 | now = get_seconds(); | ||
201 | |||
121 | return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; | 202 | return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; |
122 | } | 203 | } |
204 | |||
205 | u32 | ||
206 | gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, | ||
207 | struct xdr_netobj *token) | ||
208 | { | ||
209 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; | ||
210 | |||
211 | switch (ctx->enctype) { | ||
212 | default: | ||
213 | BUG(); | ||
214 | case ENCTYPE_DES_CBC_RAW: | ||
215 | case ENCTYPE_DES3_CBC_RAW: | ||
216 | case ENCTYPE_ARCFOUR_HMAC: | ||
217 | return gss_get_mic_v1(ctx, text, token); | ||
218 | case ENCTYPE_AES128_CTS_HMAC_SHA1_96: | ||
219 | case ENCTYPE_AES256_CTS_HMAC_SHA1_96: | ||
220 | return gss_get_mic_v2(ctx, text, token); | ||
221 | } | ||
222 | } | ||
223 | |||
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index 6331cd6866ec..415c013ba382 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c | |||
@@ -39,14 +39,51 @@ | |||
39 | # define RPCDBG_FACILITY RPCDBG_AUTH | 39 | # define RPCDBG_FACILITY RPCDBG_AUTH |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | static s32 | ||
43 | krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, | ||
44 | unsigned char *cksum, unsigned char *buf) | ||
45 | { | ||
46 | struct crypto_blkcipher *cipher; | ||
47 | unsigned char plain[8]; | ||
48 | s32 code; | ||
49 | |||
50 | dprintk("RPC: %s:\n", __func__); | ||
51 | cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, | ||
52 | CRYPTO_ALG_ASYNC); | ||
53 | if (IS_ERR(cipher)) | ||
54 | return PTR_ERR(cipher); | ||
55 | |||
56 | plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); | ||
57 | plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); | ||
58 | plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); | ||
59 | plain[3] = (unsigned char) ((seqnum >> 0) & 0xff); | ||
60 | plain[4] = direction; | ||
61 | plain[5] = direction; | ||
62 | plain[6] = direction; | ||
63 | plain[7] = direction; | ||
64 | |||
65 | code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); | ||
66 | if (code) | ||
67 | goto out; | ||
68 | |||
69 | code = krb5_encrypt(cipher, cksum, plain, buf, 8); | ||
70 | out: | ||
71 | crypto_free_blkcipher(cipher); | ||
72 | return code; | ||
73 | } | ||
42 | s32 | 74 | s32 |
43 | krb5_make_seq_num(struct crypto_blkcipher *key, | 75 | krb5_make_seq_num(struct krb5_ctx *kctx, |
76 | struct crypto_blkcipher *key, | ||
44 | int direction, | 77 | int direction, |
45 | u32 seqnum, | 78 | u32 seqnum, |
46 | unsigned char *cksum, unsigned char *buf) | 79 | unsigned char *cksum, unsigned char *buf) |
47 | { | 80 | { |
48 | unsigned char plain[8]; | 81 | unsigned char plain[8]; |
49 | 82 | ||
83 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) | ||
84 | return krb5_make_rc4_seq_num(kctx, direction, seqnum, | ||
85 | cksum, buf); | ||
86 | |||
50 | plain[0] = (unsigned char) (seqnum & 0xff); | 87 | plain[0] = (unsigned char) (seqnum & 0xff); |
51 | plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); | 88 | plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); |
52 | plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); | 89 | plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); |
@@ -60,17 +97,59 @@ krb5_make_seq_num(struct crypto_blkcipher *key, | |||
60 | return krb5_encrypt(key, cksum, plain, buf, 8); | 97 | return krb5_encrypt(key, cksum, plain, buf, 8); |
61 | } | 98 | } |
62 | 99 | ||
100 | static s32 | ||
101 | krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, | ||
102 | unsigned char *buf, int *direction, s32 *seqnum) | ||
103 | { | ||
104 | struct crypto_blkcipher *cipher; | ||
105 | unsigned char plain[8]; | ||
106 | s32 code; | ||
107 | |||
108 | dprintk("RPC: %s:\n", __func__); | ||
109 | cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, | ||
110 | CRYPTO_ALG_ASYNC); | ||
111 | if (IS_ERR(cipher)) | ||
112 | return PTR_ERR(cipher); | ||
113 | |||
114 | code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); | ||
115 | if (code) | ||
116 | goto out; | ||
117 | |||
118 | code = krb5_decrypt(cipher, cksum, buf, plain, 8); | ||
119 | if (code) | ||
120 | goto out; | ||
121 | |||
122 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) | ||
123 | || (plain[4] != plain[7])) { | ||
124 | code = (s32)KG_BAD_SEQ; | ||
125 | goto out; | ||
126 | } | ||
127 | |||
128 | *direction = plain[4]; | ||
129 | |||
130 | *seqnum = ((plain[0] << 24) | (plain[1] << 16) | | ||
131 | (plain[2] << 8) | (plain[3])); | ||
132 | out: | ||
133 | crypto_free_blkcipher(cipher); | ||
134 | return code; | ||
135 | } | ||
136 | |||
63 | s32 | 137 | s32 |
64 | krb5_get_seq_num(struct crypto_blkcipher *key, | 138 | krb5_get_seq_num(struct krb5_ctx *kctx, |
65 | unsigned char *cksum, | 139 | unsigned char *cksum, |
66 | unsigned char *buf, | 140 | unsigned char *buf, |
67 | int *direction, u32 *seqnum) | 141 | int *direction, u32 *seqnum) |
68 | { | 142 | { |
69 | s32 code; | 143 | s32 code; |
70 | unsigned char plain[8]; | 144 | unsigned char plain[8]; |
145 | struct crypto_blkcipher *key = kctx->seq; | ||
71 | 146 | ||
72 | dprintk("RPC: krb5_get_seq_num:\n"); | 147 | dprintk("RPC: krb5_get_seq_num:\n"); |
73 | 148 | ||
149 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) | ||
150 | return krb5_get_rc4_seq_num(kctx, cksum, buf, | ||
151 | direction, seqnum); | ||
152 | |||
74 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) | 153 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) |
75 | return code; | 154 | return code; |
76 | 155 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index ce6c247edad0..6cd930f3678f 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5unseal.c | 4 | * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5unseal.c |
5 | * | 5 | * |
6 | * Copyright (c) 2000 The Regents of the University of Michigan. | 6 | * Copyright (c) 2000-2008 The Regents of the University of Michigan. |
7 | * All rights reserved. | 7 | * All rights reserved. |
8 | * | 8 | * |
9 | * Andy Adamson <andros@umich.edu> | 9 | * Andy Adamson <andros@umich.edu> |
@@ -70,20 +70,21 @@ | |||
70 | /* read_token is a mic token, and message_buffer is the data that the mic was | 70 | /* read_token is a mic token, and message_buffer is the data that the mic was |
71 | * supposedly taken over. */ | 71 | * supposedly taken over. */ |
72 | 72 | ||
73 | u32 | 73 | static u32 |
74 | gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | 74 | gss_verify_mic_v1(struct krb5_ctx *ctx, |
75 | struct xdr_buf *message_buffer, struct xdr_netobj *read_token) | 75 | struct xdr_buf *message_buffer, struct xdr_netobj *read_token) |
76 | { | 76 | { |
77 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; | ||
78 | int signalg; | 77 | int signalg; |
79 | int sealalg; | 78 | int sealalg; |
80 | char cksumdata[16]; | 79 | char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; |
81 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 80 | struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), |
81 | .data = cksumdata}; | ||
82 | s32 now; | 82 | s32 now; |
83 | int direction; | 83 | int direction; |
84 | u32 seqnum; | 84 | u32 seqnum; |
85 | unsigned char *ptr = (unsigned char *)read_token->data; | 85 | unsigned char *ptr = (unsigned char *)read_token->data; |
86 | int bodysize; | 86 | int bodysize; |
87 | u8 *cksumkey; | ||
87 | 88 | ||
88 | dprintk("RPC: krb5_read_token\n"); | 89 | dprintk("RPC: krb5_read_token\n"); |
89 | 90 | ||
@@ -98,7 +99,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | |||
98 | /* XXX sanity-check bodysize?? */ | 99 | /* XXX sanity-check bodysize?? */ |
99 | 100 | ||
100 | signalg = ptr[2] + (ptr[3] << 8); | 101 | signalg = ptr[2] + (ptr[3] << 8); |
101 | if (signalg != SGN_ALG_DES_MAC_MD5) | 102 | if (signalg != ctx->gk5e->signalg) |
102 | return GSS_S_DEFECTIVE_TOKEN; | 103 | return GSS_S_DEFECTIVE_TOKEN; |
103 | 104 | ||
104 | sealalg = ptr[4] + (ptr[5] << 8); | 105 | sealalg = ptr[4] + (ptr[5] << 8); |
@@ -108,13 +109,17 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | |||
108 | if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) | 109 | if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) |
109 | return GSS_S_DEFECTIVE_TOKEN; | 110 | return GSS_S_DEFECTIVE_TOKEN; |
110 | 111 | ||
111 | if (make_checksum("md5", ptr, 8, message_buffer, 0, &md5cksum)) | 112 | if (ctx->gk5e->keyed_cksum) |
112 | return GSS_S_FAILURE; | 113 | cksumkey = ctx->cksum; |
114 | else | ||
115 | cksumkey = NULL; | ||
113 | 116 | ||
114 | if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16)) | 117 | if (make_checksum(ctx, ptr, 8, message_buffer, 0, |
118 | cksumkey, KG_USAGE_SIGN, &md5cksum)) | ||
115 | return GSS_S_FAILURE; | 119 | return GSS_S_FAILURE; |
116 | 120 | ||
117 | if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) | 121 | if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, |
122 | ctx->gk5e->cksumlength)) | ||
118 | return GSS_S_BAD_SIG; | 123 | return GSS_S_BAD_SIG; |
119 | 124 | ||
120 | /* it got through unscathed. Make sure the context is unexpired */ | 125 | /* it got through unscathed. Make sure the context is unexpired */ |
@@ -126,7 +131,8 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | |||
126 | 131 | ||
127 | /* do sequencing checks */ | 132 | /* do sequencing checks */ |
128 | 133 | ||
129 | if (krb5_get_seq_num(ctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, &direction, &seqnum)) | 134 | if (krb5_get_seq_num(ctx, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, |
135 | &direction, &seqnum)) | ||
130 | return GSS_S_FAILURE; | 136 | return GSS_S_FAILURE; |
131 | 137 | ||
132 | if ((ctx->initiate && direction != 0xff) || | 138 | if ((ctx->initiate && direction != 0xff) || |
@@ -135,3 +141,86 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | |||
135 | 141 | ||
136 | return GSS_S_COMPLETE; | 142 | return GSS_S_COMPLETE; |
137 | } | 143 | } |
144 | |||
145 | static u32 | ||
146 | gss_verify_mic_v2(struct krb5_ctx *ctx, | ||
147 | struct xdr_buf *message_buffer, struct xdr_netobj *read_token) | ||
148 | { | ||
149 | char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; | ||
150 | struct xdr_netobj cksumobj = {.len = sizeof(cksumdata), | ||
151 | .data = cksumdata}; | ||
152 | s32 now; | ||
153 | u64 seqnum; | ||
154 | u8 *ptr = read_token->data; | ||
155 | u8 *cksumkey; | ||
156 | u8 flags; | ||
157 | int i; | ||
158 | unsigned int cksum_usage; | ||
159 | |||
160 | dprintk("RPC: %s\n", __func__); | ||
161 | |||
162 | if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_MIC) | ||
163 | return GSS_S_DEFECTIVE_TOKEN; | ||
164 | |||
165 | flags = ptr[2]; | ||
166 | if ((!ctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || | ||
167 | (ctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) | ||
168 | return GSS_S_BAD_SIG; | ||
169 | |||
170 | if (flags & KG2_TOKEN_FLAG_SEALED) { | ||
171 | dprintk("%s: token has unexpected sealed flag\n", __func__); | ||
172 | return GSS_S_FAILURE; | ||
173 | } | ||
174 | |||
175 | for (i = 3; i < 8; i++) | ||
176 | if (ptr[i] != 0xff) | ||
177 | return GSS_S_DEFECTIVE_TOKEN; | ||
178 | |||
179 | if (ctx->initiate) { | ||
180 | cksumkey = ctx->acceptor_sign; | ||
181 | cksum_usage = KG_USAGE_ACCEPTOR_SIGN; | ||
182 | } else { | ||
183 | cksumkey = ctx->initiator_sign; | ||
184 | cksum_usage = KG_USAGE_INITIATOR_SIGN; | ||
185 | } | ||
186 | |||
187 | if (make_checksum_v2(ctx, ptr, GSS_KRB5_TOK_HDR_LEN, message_buffer, 0, | ||
188 | cksumkey, cksum_usage, &cksumobj)) | ||
189 | return GSS_S_FAILURE; | ||
190 | |||
191 | if (memcmp(cksumobj.data, ptr + GSS_KRB5_TOK_HDR_LEN, | ||
192 | ctx->gk5e->cksumlength)) | ||
193 | return GSS_S_BAD_SIG; | ||
194 | |||
195 | /* it got through unscathed. Make sure the context is unexpired */ | ||
196 | now = get_seconds(); | ||
197 | if (now > ctx->endtime) | ||
198 | return GSS_S_CONTEXT_EXPIRED; | ||
199 | |||
200 | /* do sequencing checks */ | ||
201 | |||
202 | seqnum = be64_to_cpup((__be64 *)ptr + 8); | ||
203 | |||
204 | return GSS_S_COMPLETE; | ||
205 | } | ||
206 | |||
207 | u32 | ||
208 | gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | ||
209 | struct xdr_buf *message_buffer, | ||
210 | struct xdr_netobj *read_token) | ||
211 | { | ||
212 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; | ||
213 | |||
214 | switch (ctx->enctype) { | ||
215 | default: | ||
216 | BUG(); | ||
217 | case ENCTYPE_DES_CBC_RAW: | ||
218 | case ENCTYPE_DES3_CBC_RAW: | ||
219 | case ENCTYPE_ARCFOUR_HMAC: | ||
220 | return gss_verify_mic_v1(ctx, message_buffer, read_token); | ||
221 | case ENCTYPE_AES128_CTS_HMAC_SHA1_96: | ||
222 | case ENCTYPE_AES256_CTS_HMAC_SHA1_96: | ||
223 | return gss_verify_mic_v2(ctx, message_buffer, read_token); | ||
224 | } | ||
225 | } | ||
226 | |||
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index a6e905637e03..2763e3e48db4 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c | |||
@@ -1,3 +1,33 @@ | |||
1 | /* | ||
2 | * COPYRIGHT (c) 2008 | ||
3 | * The Regents of the University of Michigan | ||
4 | * ALL RIGHTS RESERVED | ||
5 | * | ||
6 | * Permission is granted to use, copy, create derivative works | ||
7 | * and redistribute this software and such derivative works | ||
8 | * for any purpose, so long as the name of The University of | ||
9 | * Michigan is not used in any advertising or publicity | ||
10 | * pertaining to the use of distribution of this software | ||
11 | * without specific, written prior authorization. If the | ||
12 | * above copyright notice or any other identification of the | ||
13 | * University of Michigan is included in any copy of any | ||
14 | * portion of this software, then the disclaimer below must | ||
15 | * also be included. | ||
16 | * | ||
17 | * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION | ||
18 | * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY | ||
19 | * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF | ||
20 | * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING | ||
21 | * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF | ||
22 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE | ||
23 | * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE | ||
24 | * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR | ||
25 | * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING | ||
26 | * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN | ||
27 | * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF | ||
28 | * SUCH DAMAGES. | ||
29 | */ | ||
30 | |||
1 | #include <linux/types.h> | 31 | #include <linux/types.h> |
2 | #include <linux/jiffies.h> | 32 | #include <linux/jiffies.h> |
3 | #include <linux/sunrpc/gss_krb5.h> | 33 | #include <linux/sunrpc/gss_krb5.h> |
@@ -12,10 +42,7 @@ | |||
12 | static inline int | 42 | static inline int |
13 | gss_krb5_padding(int blocksize, int length) | 43 | gss_krb5_padding(int blocksize, int length) |
14 | { | 44 | { |
15 | /* Most of the code is block-size independent but currently we | 45 | return blocksize - (length % blocksize); |
16 | * use only 8: */ | ||
17 | BUG_ON(blocksize != 8); | ||
18 | return 8 - (length & 7); | ||
19 | } | 46 | } |
20 | 47 | ||
21 | static inline void | 48 | static inline void |
@@ -86,8 +113,8 @@ out: | |||
86 | return 0; | 113 | return 0; |
87 | } | 114 | } |
88 | 115 | ||
89 | static void | 116 | void |
90 | make_confounder(char *p, u32 conflen) | 117 | gss_krb5_make_confounder(char *p, u32 conflen) |
91 | { | 118 | { |
92 | static u64 i = 0; | 119 | static u64 i = 0; |
93 | u64 *q = (u64 *)p; | 120 | u64 *q = (u64 *)p; |
@@ -127,69 +154,73 @@ make_confounder(char *p, u32 conflen) | |||
127 | 154 | ||
128 | /* XXX factor out common code with seal/unseal. */ | 155 | /* XXX factor out common code with seal/unseal. */ |
129 | 156 | ||
130 | u32 | 157 | static u32 |
131 | gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | 158 | gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, |
132 | struct xdr_buf *buf, struct page **pages) | 159 | struct xdr_buf *buf, struct page **pages) |
133 | { | 160 | { |
134 | struct krb5_ctx *kctx = ctx->internal_ctx_id; | 161 | char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; |
135 | char cksumdata[16]; | 162 | struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), |
136 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 163 | .data = cksumdata}; |
137 | int blocksize = 0, plainlen; | 164 | int blocksize = 0, plainlen; |
138 | unsigned char *ptr, *msg_start; | 165 | unsigned char *ptr, *msg_start; |
139 | s32 now; | 166 | s32 now; |
140 | int headlen; | 167 | int headlen; |
141 | struct page **tmp_pages; | 168 | struct page **tmp_pages; |
142 | u32 seq_send; | 169 | u32 seq_send; |
170 | u8 *cksumkey; | ||
171 | u32 conflen = kctx->gk5e->conflen; | ||
143 | 172 | ||
144 | dprintk("RPC: gss_wrap_kerberos\n"); | 173 | dprintk("RPC: %s\n", __func__); |
145 | 174 | ||
146 | now = get_seconds(); | 175 | now = get_seconds(); |
147 | 176 | ||
148 | blocksize = crypto_blkcipher_blocksize(kctx->enc); | 177 | blocksize = crypto_blkcipher_blocksize(kctx->enc); |
149 | gss_krb5_add_padding(buf, offset, blocksize); | 178 | gss_krb5_add_padding(buf, offset, blocksize); |
150 | BUG_ON((buf->len - offset) % blocksize); | 179 | BUG_ON((buf->len - offset) % blocksize); |
151 | plainlen = blocksize + buf->len - offset; | 180 | plainlen = conflen + buf->len - offset; |
152 | 181 | ||
153 | headlen = g_token_size(&kctx->mech_used, 24 + plainlen) - | 182 | headlen = g_token_size(&kctx->mech_used, |
154 | (buf->len - offset); | 183 | GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) - |
184 | (buf->len - offset); | ||
155 | 185 | ||
156 | ptr = buf->head[0].iov_base + offset; | 186 | ptr = buf->head[0].iov_base + offset; |
157 | /* shift data to make room for header. */ | 187 | /* shift data to make room for header. */ |
188 | xdr_extend_head(buf, offset, headlen); | ||
189 | |||
158 | /* XXX Would be cleverer to encrypt while copying. */ | 190 | /* XXX Would be cleverer to encrypt while copying. */ |
159 | /* XXX bounds checking, slack, etc. */ | ||
160 | memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset); | ||
161 | buf->head[0].iov_len += headlen; | ||
162 | buf->len += headlen; | ||
163 | BUG_ON((buf->len - offset - headlen) % blocksize); | 191 | BUG_ON((buf->len - offset - headlen) % blocksize); |
164 | 192 | ||
165 | g_make_token_header(&kctx->mech_used, | 193 | g_make_token_header(&kctx->mech_used, |
166 | GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr); | 194 | GSS_KRB5_TOK_HDR_LEN + |
195 | kctx->gk5e->cksumlength + plainlen, &ptr); | ||
167 | 196 | ||
168 | 197 | ||
169 | /* ptr now at header described in rfc 1964, section 1.2.1: */ | 198 | /* ptr now at header described in rfc 1964, section 1.2.1: */ |
170 | ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); | 199 | ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); |
171 | ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); | 200 | ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); |
172 | 201 | ||
173 | msg_start = ptr + 24; | 202 | msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength; |
174 | 203 | ||
175 | *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); | 204 | *(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg); |
176 | memset(ptr + 4, 0xff, 4); | 205 | memset(ptr + 4, 0xff, 4); |
177 | *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES); | 206 | *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); |
178 | 207 | ||
179 | make_confounder(msg_start, blocksize); | 208 | gss_krb5_make_confounder(msg_start, conflen); |
209 | |||
210 | if (kctx->gk5e->keyed_cksum) | ||
211 | cksumkey = kctx->cksum; | ||
212 | else | ||
213 | cksumkey = NULL; | ||
180 | 214 | ||
181 | /* XXXJBF: UGH!: */ | 215 | /* XXXJBF: UGH!: */ |
182 | tmp_pages = buf->pages; | 216 | tmp_pages = buf->pages; |
183 | buf->pages = pages; | 217 | buf->pages = pages; |
184 | if (make_checksum("md5", ptr, 8, buf, | 218 | if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen, |
185 | offset + headlen - blocksize, &md5cksum)) | 219 | cksumkey, KG_USAGE_SEAL, &md5cksum)) |
186 | return GSS_S_FAILURE; | 220 | return GSS_S_FAILURE; |
187 | buf->pages = tmp_pages; | 221 | buf->pages = tmp_pages; |
188 | 222 | ||
189 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, | 223 | memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); |
190 | md5cksum.data, md5cksum.len)) | ||
191 | return GSS_S_FAILURE; | ||
192 | memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); | ||
193 | 224 | ||
194 | spin_lock(&krb5_seq_lock); | 225 | spin_lock(&krb5_seq_lock); |
195 | seq_send = kctx->seq_send++; | 226 | seq_send = kctx->seq_send++; |
@@ -197,25 +228,42 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | |||
197 | 228 | ||
198 | /* XXX would probably be more efficient to compute checksum | 229 | /* XXX would probably be more efficient to compute checksum |
199 | * and encrypt at the same time: */ | 230 | * and encrypt at the same time: */ |
200 | if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, | 231 | if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff, |
201 | seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) | 232 | seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) |
202 | return GSS_S_FAILURE; | 233 | return GSS_S_FAILURE; |
203 | 234 | ||
204 | if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, | 235 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { |
205 | pages)) | 236 | struct crypto_blkcipher *cipher; |
206 | return GSS_S_FAILURE; | 237 | int err; |
238 | cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, | ||
239 | CRYPTO_ALG_ASYNC); | ||
240 | if (IS_ERR(cipher)) | ||
241 | return GSS_S_FAILURE; | ||
242 | |||
243 | krb5_rc4_setup_enc_key(kctx, cipher, seq_send); | ||
244 | |||
245 | err = gss_encrypt_xdr_buf(cipher, buf, | ||
246 | offset + headlen - conflen, pages); | ||
247 | crypto_free_blkcipher(cipher); | ||
248 | if (err) | ||
249 | return GSS_S_FAILURE; | ||
250 | } else { | ||
251 | if (gss_encrypt_xdr_buf(kctx->enc, buf, | ||
252 | offset + headlen - conflen, pages)) | ||
253 | return GSS_S_FAILURE; | ||
254 | } | ||
207 | 255 | ||
208 | return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; | 256 | return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; |
209 | } | 257 | } |
210 | 258 | ||
211 | u32 | 259 | static u32 |
212 | gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | 260 | gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) |
213 | { | 261 | { |
214 | struct krb5_ctx *kctx = ctx->internal_ctx_id; | ||
215 | int signalg; | 262 | int signalg; |
216 | int sealalg; | 263 | int sealalg; |
217 | char cksumdata[16]; | 264 | char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; |
218 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 265 | struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), |
266 | .data = cksumdata}; | ||
219 | s32 now; | 267 | s32 now; |
220 | int direction; | 268 | int direction; |
221 | s32 seqnum; | 269 | s32 seqnum; |
@@ -224,6 +272,9 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
224 | void *data_start, *orig_start; | 272 | void *data_start, *orig_start; |
225 | int data_len; | 273 | int data_len; |
226 | int blocksize; | 274 | int blocksize; |
275 | u32 conflen = kctx->gk5e->conflen; | ||
276 | int crypt_offset; | ||
277 | u8 *cksumkey; | ||
227 | 278 | ||
228 | dprintk("RPC: gss_unwrap_kerberos\n"); | 279 | dprintk("RPC: gss_unwrap_kerberos\n"); |
229 | 280 | ||
@@ -241,29 +292,65 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
241 | /* get the sign and seal algorithms */ | 292 | /* get the sign and seal algorithms */ |
242 | 293 | ||
243 | signalg = ptr[2] + (ptr[3] << 8); | 294 | signalg = ptr[2] + (ptr[3] << 8); |
244 | if (signalg != SGN_ALG_DES_MAC_MD5) | 295 | if (signalg != kctx->gk5e->signalg) |
245 | return GSS_S_DEFECTIVE_TOKEN; | 296 | return GSS_S_DEFECTIVE_TOKEN; |
246 | 297 | ||
247 | sealalg = ptr[4] + (ptr[5] << 8); | 298 | sealalg = ptr[4] + (ptr[5] << 8); |
248 | if (sealalg != SEAL_ALG_DES) | 299 | if (sealalg != kctx->gk5e->sealalg) |
249 | return GSS_S_DEFECTIVE_TOKEN; | 300 | return GSS_S_DEFECTIVE_TOKEN; |
250 | 301 | ||
251 | if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) | 302 | if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) |
252 | return GSS_S_DEFECTIVE_TOKEN; | 303 | return GSS_S_DEFECTIVE_TOKEN; |
253 | 304 | ||
254 | if (gss_decrypt_xdr_buf(kctx->enc, buf, | 305 | /* |
255 | ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base)) | 306 | * Data starts after token header and checksum. ptr points |
256 | return GSS_S_DEFECTIVE_TOKEN; | 307 | * to the beginning of the token header |
308 | */ | ||
309 | crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - | ||
310 | (unsigned char *)buf->head[0].iov_base; | ||
311 | |||
312 | /* | ||
313 | * Need plaintext seqnum to derive encryption key for arcfour-hmac | ||
314 | */ | ||
315 | if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, | ||
316 | ptr + 8, &direction, &seqnum)) | ||
317 | return GSS_S_BAD_SIG; | ||
257 | 318 | ||
258 | if (make_checksum("md5", ptr, 8, buf, | 319 | if ((kctx->initiate && direction != 0xff) || |
259 | ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) | 320 | (!kctx->initiate && direction != 0)) |
260 | return GSS_S_FAILURE; | 321 | return GSS_S_BAD_SIG; |
322 | |||
323 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { | ||
324 | struct crypto_blkcipher *cipher; | ||
325 | int err; | ||
326 | |||
327 | cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, | ||
328 | CRYPTO_ALG_ASYNC); | ||
329 | if (IS_ERR(cipher)) | ||
330 | return GSS_S_FAILURE; | ||
331 | |||
332 | krb5_rc4_setup_enc_key(kctx, cipher, seqnum); | ||
261 | 333 | ||
262 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, | 334 | err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset); |
263 | md5cksum.data, md5cksum.len)) | 335 | crypto_free_blkcipher(cipher); |
336 | if (err) | ||
337 | return GSS_S_DEFECTIVE_TOKEN; | ||
338 | } else { | ||
339 | if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) | ||
340 | return GSS_S_DEFECTIVE_TOKEN; | ||
341 | } | ||
342 | |||
343 | if (kctx->gk5e->keyed_cksum) | ||
344 | cksumkey = kctx->cksum; | ||
345 | else | ||
346 | cksumkey = NULL; | ||
347 | |||
348 | if (make_checksum(kctx, ptr, 8, buf, crypt_offset, | ||
349 | cksumkey, KG_USAGE_SEAL, &md5cksum)) | ||
264 | return GSS_S_FAILURE; | 350 | return GSS_S_FAILURE; |
265 | 351 | ||
266 | if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) | 352 | if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, |
353 | kctx->gk5e->cksumlength)) | ||
267 | return GSS_S_BAD_SIG; | 354 | return GSS_S_BAD_SIG; |
268 | 355 | ||
269 | /* it got through unscathed. Make sure the context is unexpired */ | 356 | /* it got through unscathed. Make sure the context is unexpired */ |
@@ -275,19 +362,12 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
275 | 362 | ||
276 | /* do sequencing checks */ | 363 | /* do sequencing checks */ |
277 | 364 | ||
278 | if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, | ||
279 | &direction, &seqnum)) | ||
280 | return GSS_S_BAD_SIG; | ||
281 | |||
282 | if ((kctx->initiate && direction != 0xff) || | ||
283 | (!kctx->initiate && direction != 0)) | ||
284 | return GSS_S_BAD_SIG; | ||
285 | |||
286 | /* Copy the data back to the right position. XXX: Would probably be | 365 | /* Copy the data back to the right position. XXX: Would probably be |
287 | * better to copy and encrypt at the same time. */ | 366 | * better to copy and encrypt at the same time. */ |
288 | 367 | ||
289 | blocksize = crypto_blkcipher_blocksize(kctx->enc); | 368 | blocksize = crypto_blkcipher_blocksize(kctx->enc); |
290 | data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize; | 369 | data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + |
370 | conflen; | ||
291 | orig_start = buf->head[0].iov_base + offset; | 371 | orig_start = buf->head[0].iov_base + offset; |
292 | data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; | 372 | data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; |
293 | memmove(orig_start, data_start, data_len); | 373 | memmove(orig_start, data_start, data_len); |
@@ -299,3 +379,209 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
299 | 379 | ||
300 | return GSS_S_COMPLETE; | 380 | return GSS_S_COMPLETE; |
301 | } | 381 | } |
382 | |||
383 | /* | ||
384 | * We cannot currently handle tokens with rotated data. We need a | ||
385 | * generalized routine to rotate the data in place. It is anticipated | ||
386 | * that we won't encounter rotated data in the general case. | ||
387 | */ | ||
388 | static u32 | ||
389 | rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc) | ||
390 | { | ||
391 | unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN); | ||
392 | |||
393 | if (realrrc == 0) | ||
394 | return 0; | ||
395 | |||
396 | dprintk("%s: cannot process token with rotated data: " | ||
397 | "rrc %u, realrrc %u\n", __func__, rrc, realrrc); | ||
398 | return 1; | ||
399 | } | ||
400 | |||
401 | static u32 | ||
402 | gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, | ||
403 | struct xdr_buf *buf, struct page **pages) | ||
404 | { | ||
405 | int blocksize; | ||
406 | u8 *ptr, *plainhdr; | ||
407 | s32 now; | ||
408 | u8 flags = 0x00; | ||
409 | __be16 *be16ptr, ec = 0; | ||
410 | __be64 *be64ptr; | ||
411 | u32 err; | ||
412 | |||
413 | dprintk("RPC: %s\n", __func__); | ||
414 | |||
415 | if (kctx->gk5e->encrypt_v2 == NULL) | ||
416 | return GSS_S_FAILURE; | ||
417 | |||
418 | /* make room for gss token header */ | ||
419 | if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN)) | ||
420 | return GSS_S_FAILURE; | ||
421 | |||
422 | /* construct gss token header */ | ||
423 | ptr = plainhdr = buf->head[0].iov_base + offset; | ||
424 | *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff); | ||
425 | *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff); | ||
426 | |||
427 | if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) | ||
428 | flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR; | ||
429 | if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0) | ||
430 | flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY; | ||
431 | /* We always do confidentiality in wrap tokens */ | ||
432 | flags |= KG2_TOKEN_FLAG_SEALED; | ||
433 | |||
434 | *ptr++ = flags; | ||
435 | *ptr++ = 0xff; | ||
436 | be16ptr = (__be16 *)ptr; | ||
437 | |||
438 | blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc); | ||
439 | *be16ptr++ = cpu_to_be16(ec); | ||
440 | /* "inner" token header always uses 0 for RRC */ | ||
441 | *be16ptr++ = cpu_to_be16(0); | ||
442 | |||
443 | be64ptr = (__be64 *)be16ptr; | ||
444 | spin_lock(&krb5_seq_lock); | ||
445 | *be64ptr = cpu_to_be64(kctx->seq_send64++); | ||
446 | spin_unlock(&krb5_seq_lock); | ||
447 | |||
448 | err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages); | ||
449 | if (err) | ||
450 | return err; | ||
451 | |||
452 | now = get_seconds(); | ||
453 | return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; | ||
454 | } | ||
455 | |||
456 | static u32 | ||
457 | gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) | ||
458 | { | ||
459 | s32 now; | ||
460 | u64 seqnum; | ||
461 | u8 *ptr; | ||
462 | u8 flags = 0x00; | ||
463 | u16 ec, rrc; | ||
464 | int err; | ||
465 | u32 headskip, tailskip; | ||
466 | u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN]; | ||
467 | unsigned int movelen; | ||
468 | |||
469 | |||
470 | dprintk("RPC: %s\n", __func__); | ||
471 | |||
472 | if (kctx->gk5e->decrypt_v2 == NULL) | ||
473 | return GSS_S_FAILURE; | ||
474 | |||
475 | ptr = buf->head[0].iov_base + offset; | ||
476 | |||
477 | if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP) | ||
478 | return GSS_S_DEFECTIVE_TOKEN; | ||
479 | |||
480 | flags = ptr[2]; | ||
481 | if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || | ||
482 | (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) | ||
483 | return GSS_S_BAD_SIG; | ||
484 | |||
485 | if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) { | ||
486 | dprintk("%s: token missing expected sealed flag\n", __func__); | ||
487 | return GSS_S_DEFECTIVE_TOKEN; | ||
488 | } | ||
489 | |||
490 | if (ptr[3] != 0xff) | ||
491 | return GSS_S_DEFECTIVE_TOKEN; | ||
492 | |||
493 | ec = be16_to_cpup((__be16 *)(ptr + 4)); | ||
494 | rrc = be16_to_cpup((__be16 *)(ptr + 6)); | ||
495 | |||
496 | seqnum = be64_to_cpup((__be64 *)(ptr + 8)); | ||
497 | |||
498 | if (rrc != 0) { | ||
499 | err = rotate_left(kctx, offset, buf, rrc); | ||
500 | if (err) | ||
501 | return GSS_S_FAILURE; | ||
502 | } | ||
503 | |||
504 | err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, | ||
505 | &headskip, &tailskip); | ||
506 | if (err) | ||
507 | return GSS_S_FAILURE; | ||
508 | |||
509 | /* | ||
510 | * Retrieve the decrypted gss token header and verify | ||
511 | * it against the original | ||
512 | */ | ||
513 | err = read_bytes_from_xdr_buf(buf, | ||
514 | buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip, | ||
515 | decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); | ||
516 | if (err) { | ||
517 | dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); | ||
518 | return GSS_S_FAILURE; | ||
519 | } | ||
520 | if (memcmp(ptr, decrypted_hdr, 6) | ||
521 | || memcmp(ptr + 8, decrypted_hdr + 8, 8)) { | ||
522 | dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__); | ||
523 | return GSS_S_FAILURE; | ||
524 | } | ||
525 | |||
526 | /* do sequencing checks */ | ||
527 | |||
528 | /* it got through unscathed. Make sure the context is unexpired */ | ||
529 | now = get_seconds(); | ||
530 | if (now > kctx->endtime) | ||
531 | return GSS_S_CONTEXT_EXPIRED; | ||
532 | |||
533 | /* | ||
534 | * Move the head data back to the right position in xdr_buf. | ||
535 | * We ignore any "ec" data since it might be in the head or | ||
536 | * the tail, and we really don't need to deal with it. | ||
537 | * Note that buf->head[0].iov_len may indicate the available | ||
538 | * head buffer space rather than that actually occupied. | ||
539 | */ | ||
540 | movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len); | ||
541 | movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; | ||
542 | BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > | ||
543 | buf->head[0].iov_len); | ||
544 | memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); | ||
545 | buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; | ||
546 | buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; | ||
547 | |||
548 | return GSS_S_COMPLETE; | ||
549 | } | ||
550 | |||
551 | u32 | ||
552 | gss_wrap_kerberos(struct gss_ctx *gctx, int offset, | ||
553 | struct xdr_buf *buf, struct page **pages) | ||
554 | { | ||
555 | struct krb5_ctx *kctx = gctx->internal_ctx_id; | ||
556 | |||
557 | switch (kctx->enctype) { | ||
558 | default: | ||
559 | BUG(); | ||
560 | case ENCTYPE_DES_CBC_RAW: | ||
561 | case ENCTYPE_DES3_CBC_RAW: | ||
562 | case ENCTYPE_ARCFOUR_HMAC: | ||
563 | return gss_wrap_kerberos_v1(kctx, offset, buf, pages); | ||
564 | case ENCTYPE_AES128_CTS_HMAC_SHA1_96: | ||
565 | case ENCTYPE_AES256_CTS_HMAC_SHA1_96: | ||
566 | return gss_wrap_kerberos_v2(kctx, offset, buf, pages); | ||
567 | } | ||
568 | } | ||
569 | |||
570 | u32 | ||
571 | gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) | ||
572 | { | ||
573 | struct krb5_ctx *kctx = gctx->internal_ctx_id; | ||
574 | |||
575 | switch (kctx->enctype) { | ||
576 | default: | ||
577 | BUG(); | ||
578 | case ENCTYPE_DES_CBC_RAW: | ||
579 | case ENCTYPE_DES3_CBC_RAW: | ||
580 | case ENCTYPE_ARCFOUR_HMAC: | ||
581 | return gss_unwrap_kerberos_v1(kctx, offset, buf); | ||
582 | case ENCTYPE_AES128_CTS_HMAC_SHA1_96: | ||
583 | case ENCTYPE_AES256_CTS_HMAC_SHA1_96: | ||
584 | return gss_unwrap_kerberos_v2(kctx, offset, buf); | ||
585 | } | ||
586 | } | ||
587 | |||
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 76e4c6f4ac3c..2689de39dc78 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c | |||
@@ -249,14 +249,15 @@ EXPORT_SYMBOL_GPL(gss_mech_put); | |||
249 | int | 249 | int |
250 | gss_import_sec_context(const void *input_token, size_t bufsize, | 250 | gss_import_sec_context(const void *input_token, size_t bufsize, |
251 | struct gss_api_mech *mech, | 251 | struct gss_api_mech *mech, |
252 | struct gss_ctx **ctx_id) | 252 | struct gss_ctx **ctx_id, |
253 | gfp_t gfp_mask) | ||
253 | { | 254 | { |
254 | if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) | 255 | if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask))) |
255 | return -ENOMEM; | 256 | return -ENOMEM; |
256 | (*ctx_id)->mech_type = gss_mech_get(mech); | 257 | (*ctx_id)->mech_type = gss_mech_get(mech); |
257 | 258 | ||
258 | return mech->gm_ops | 259 | return mech->gm_ops |
259 | ->gss_import_sec_context(input_token, bufsize, *ctx_id); | 260 | ->gss_import_sec_context(input_token, bufsize, *ctx_id, gfp_mask); |
260 | } | 261 | } |
261 | 262 | ||
262 | /* gss_get_mic: compute a mic over message and return mic_token. */ | 263 | /* gss_get_mic: compute a mic over message and return mic_token. */ |
@@ -285,6 +286,20 @@ gss_verify_mic(struct gss_ctx *context_handle, | |||
285 | mic_token); | 286 | mic_token); |
286 | } | 287 | } |
287 | 288 | ||
289 | /* | ||
290 | * This function is called from both the client and server code. | ||
291 | * Each makes guarantees about how much "slack" space is available | ||
292 | * for the underlying function in "buf"'s head and tail while | ||
293 | * performing the wrap. | ||
294 | * | ||
295 | * The client and server code allocate RPC_MAX_AUTH_SIZE extra | ||
296 | * space in both the head and tail which is available for use by | ||
297 | * the wrap function. | ||
298 | * | ||
299 | * Underlying functions should verify they do not use more than | ||
300 | * RPC_MAX_AUTH_SIZE of extra space in either the head or tail | ||
301 | * when performing the wrap. | ||
302 | */ | ||
288 | u32 | 303 | u32 |
289 | gss_wrap(struct gss_ctx *ctx_id, | 304 | gss_wrap(struct gss_ctx *ctx_id, |
290 | int offset, | 305 | int offset, |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 035e1dd6af1b..dc3f1f5ed865 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
@@ -84,13 +84,14 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) | |||
84 | 84 | ||
85 | static int | 85 | static int |
86 | gss_import_sec_context_spkm3(const void *p, size_t len, | 86 | gss_import_sec_context_spkm3(const void *p, size_t len, |
87 | struct gss_ctx *ctx_id) | 87 | struct gss_ctx *ctx_id, |
88 | gfp_t gfp_mask) | ||
88 | { | 89 | { |
89 | const void *end = (const void *)((const char *)p + len); | 90 | const void *end = (const void *)((const char *)p + len); |
90 | struct spkm3_ctx *ctx; | 91 | struct spkm3_ctx *ctx; |
91 | int version; | 92 | int version; |
92 | 93 | ||
93 | if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) | 94 | if (!(ctx = kzalloc(sizeof(*ctx), gfp_mask))) |
94 | goto out_err; | 95 | goto out_err; |
95 | 96 | ||
96 | p = simple_get_bytes(p, end, &version, sizeof(version)); | 97 | p = simple_get_bytes(p, end, &version, sizeof(version)); |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index b81e790ef9f4..cc385b3a59c2 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -494,7 +494,7 @@ static int rsc_parse(struct cache_detail *cd, | |||
494 | len = qword_get(&mesg, buf, mlen); | 494 | len = qword_get(&mesg, buf, mlen); |
495 | if (len < 0) | 495 | if (len < 0) |
496 | goto out; | 496 | goto out; |
497 | status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); | 497 | status = gss_import_sec_context(buf, len, gm, &rsci.mechctx, GFP_KERNEL); |
498 | if (status) | 498 | if (status) |
499 | goto out; | 499 | goto out; |
500 | 500 | ||
@@ -1315,6 +1315,14 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp) | |||
1315 | inpages = resbuf->pages; | 1315 | inpages = resbuf->pages; |
1316 | /* XXX: Would be better to write some xdr helper functions for | 1316 | /* XXX: Would be better to write some xdr helper functions for |
1317 | * nfs{2,3,4}xdr.c that place the data right, instead of copying: */ | 1317 | * nfs{2,3,4}xdr.c that place the data right, instead of copying: */ |
1318 | |||
1319 | /* | ||
1320 | * If there is currently tail data, make sure there is | ||
1321 | * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in | ||
1322 | * the page, and move the current tail data such that | ||
1323 | * there is RPC_MAX_AUTH_SIZE slack space available in | ||
1324 | * both the head and tail. | ||
1325 | */ | ||
1318 | if (resbuf->tail[0].iov_base) { | 1326 | if (resbuf->tail[0].iov_base) { |
1319 | BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base | 1327 | BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base |
1320 | + PAGE_SIZE); | 1328 | + PAGE_SIZE); |
@@ -1327,6 +1335,13 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp) | |||
1327 | resbuf->tail[0].iov_len); | 1335 | resbuf->tail[0].iov_len); |
1328 | resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE; | 1336 | resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE; |
1329 | } | 1337 | } |
1338 | /* | ||
1339 | * If there is no current tail data, make sure there is | ||
1340 | * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the | ||
1341 | * allotted page, and set up tail information such that there | ||
1342 | * is RPC_MAX_AUTH_SIZE slack space available in both the | ||
1343 | * head and tail. | ||
1344 | */ | ||
1330 | if (resbuf->tail[0].iov_base == NULL) { | 1345 | if (resbuf->tail[0].iov_base == NULL) { |
1331 | if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE) | 1346 | if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE) |
1332 | return -ENOMEM; | 1347 | return -ENOMEM; |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 39bddba53ba1..58de76c8540c 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -28,11 +28,13 @@ | |||
28 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
29 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
30 | #include <linux/pagemap.h> | 30 | #include <linux/pagemap.h> |
31 | #include <linux/smp_lock.h> | ||
31 | #include <asm/ioctls.h> | 32 | #include <asm/ioctls.h> |
32 | #include <linux/sunrpc/types.h> | 33 | #include <linux/sunrpc/types.h> |
33 | #include <linux/sunrpc/cache.h> | 34 | #include <linux/sunrpc/cache.h> |
34 | #include <linux/sunrpc/stats.h> | 35 | #include <linux/sunrpc/stats.h> |
35 | #include <linux/sunrpc/rpc_pipe_fs.h> | 36 | #include <linux/sunrpc/rpc_pipe_fs.h> |
37 | #include <linux/smp_lock.h> | ||
36 | 38 | ||
37 | #define RPCDBG_FACILITY RPCDBG_CACHE | 39 | #define RPCDBG_FACILITY RPCDBG_CACHE |
38 | 40 | ||
@@ -49,11 +51,17 @@ static void cache_init(struct cache_head *h) | |||
49 | h->last_refresh = now; | 51 | h->last_refresh = now; |
50 | } | 52 | } |
51 | 53 | ||
54 | static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) | ||
55 | { | ||
56 | return (h->expiry_time < get_seconds()) || | ||
57 | (detail->flush_time > h->last_refresh); | ||
58 | } | ||
59 | |||
52 | struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | 60 | struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, |
53 | struct cache_head *key, int hash) | 61 | struct cache_head *key, int hash) |
54 | { | 62 | { |
55 | struct cache_head **head, **hp; | 63 | struct cache_head **head, **hp; |
56 | struct cache_head *new = NULL; | 64 | struct cache_head *new = NULL, *freeme = NULL; |
57 | 65 | ||
58 | head = &detail->hash_table[hash]; | 66 | head = &detail->hash_table[hash]; |
59 | 67 | ||
@@ -62,6 +70,9 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
62 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { | 70 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { |
63 | struct cache_head *tmp = *hp; | 71 | struct cache_head *tmp = *hp; |
64 | if (detail->match(tmp, key)) { | 72 | if (detail->match(tmp, key)) { |
73 | if (cache_is_expired(detail, tmp)) | ||
74 | /* This entry is expired, we will discard it. */ | ||
75 | break; | ||
65 | cache_get(tmp); | 76 | cache_get(tmp); |
66 | read_unlock(&detail->hash_lock); | 77 | read_unlock(&detail->hash_lock); |
67 | return tmp; | 78 | return tmp; |
@@ -86,6 +97,13 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
86 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { | 97 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { |
87 | struct cache_head *tmp = *hp; | 98 | struct cache_head *tmp = *hp; |
88 | if (detail->match(tmp, key)) { | 99 | if (detail->match(tmp, key)) { |
100 | if (cache_is_expired(detail, tmp)) { | ||
101 | *hp = tmp->next; | ||
102 | tmp->next = NULL; | ||
103 | detail->entries --; | ||
104 | freeme = tmp; | ||
105 | break; | ||
106 | } | ||
89 | cache_get(tmp); | 107 | cache_get(tmp); |
90 | write_unlock(&detail->hash_lock); | 108 | write_unlock(&detail->hash_lock); |
91 | cache_put(new, detail); | 109 | cache_put(new, detail); |
@@ -98,6 +116,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
98 | cache_get(new); | 116 | cache_get(new); |
99 | write_unlock(&detail->hash_lock); | 117 | write_unlock(&detail->hash_lock); |
100 | 118 | ||
119 | if (freeme) | ||
120 | cache_put(freeme, detail); | ||
101 | return new; | 121 | return new; |
102 | } | 122 | } |
103 | EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); | 123 | EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); |
@@ -183,10 +203,7 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) | |||
183 | 203 | ||
184 | static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) | 204 | static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) |
185 | { | 205 | { |
186 | if (!test_bit(CACHE_VALID, &h->flags) || | 206 | if (!test_bit(CACHE_VALID, &h->flags)) |
187 | h->expiry_time < get_seconds()) | ||
188 | return -EAGAIN; | ||
189 | else if (detail->flush_time > h->last_refresh) | ||
190 | return -EAGAIN; | 207 | return -EAGAIN; |
191 | else { | 208 | else { |
192 | /* entry is valid */ | 209 | /* entry is valid */ |
@@ -397,31 +414,27 @@ static int cache_clean(void) | |||
397 | /* Ok, now to clean this strand */ | 414 | /* Ok, now to clean this strand */ |
398 | 415 | ||
399 | cp = & current_detail->hash_table[current_index]; | 416 | cp = & current_detail->hash_table[current_index]; |
400 | ch = *cp; | 417 | for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) { |
401 | for (; ch; cp= & ch->next, ch= *cp) { | ||
402 | if (current_detail->nextcheck > ch->expiry_time) | 418 | if (current_detail->nextcheck > ch->expiry_time) |
403 | current_detail->nextcheck = ch->expiry_time+1; | 419 | current_detail->nextcheck = ch->expiry_time+1; |
404 | if (ch->expiry_time >= get_seconds() && | 420 | if (!cache_is_expired(current_detail, ch)) |
405 | ch->last_refresh >= current_detail->flush_time) | ||
406 | continue; | 421 | continue; |
407 | if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) | ||
408 | cache_dequeue(current_detail, ch); | ||
409 | 422 | ||
410 | if (atomic_read(&ch->ref.refcount) == 1) | ||
411 | break; | ||
412 | } | ||
413 | if (ch) { | ||
414 | *cp = ch->next; | 423 | *cp = ch->next; |
415 | ch->next = NULL; | 424 | ch->next = NULL; |
416 | current_detail->entries--; | 425 | current_detail->entries--; |
417 | rv = 1; | 426 | rv = 1; |
427 | break; | ||
418 | } | 428 | } |
429 | |||
419 | write_unlock(¤t_detail->hash_lock); | 430 | write_unlock(¤t_detail->hash_lock); |
420 | d = current_detail; | 431 | d = current_detail; |
421 | if (!ch) | 432 | if (!ch) |
422 | current_index ++; | 433 | current_index ++; |
423 | spin_unlock(&cache_list_lock); | 434 | spin_unlock(&cache_list_lock); |
424 | if (ch) { | 435 | if (ch) { |
436 | if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) | ||
437 | cache_dequeue(current_detail, ch); | ||
425 | cache_revisit_request(ch); | 438 | cache_revisit_request(ch); |
426 | cache_put(ch, d); | 439 | cache_put(ch, d); |
427 | } | 440 | } |
@@ -1233,8 +1246,10 @@ static int content_open(struct inode *inode, struct file *file, | |||
1233 | if (!cd || !try_module_get(cd->owner)) | 1246 | if (!cd || !try_module_get(cd->owner)) |
1234 | return -EACCES; | 1247 | return -EACCES; |
1235 | han = __seq_open_private(file, &cache_content_op, sizeof(*han)); | 1248 | han = __seq_open_private(file, &cache_content_op, sizeof(*han)); |
1236 | if (han == NULL) | 1249 | if (han == NULL) { |
1250 | module_put(cd->owner); | ||
1237 | return -ENOMEM; | 1251 | return -ENOMEM; |
1252 | } | ||
1238 | 1253 | ||
1239 | han->cd = cd; | 1254 | han->cd = cd; |
1240 | return 0; | 1255 | return 0; |
@@ -1331,12 +1346,18 @@ static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait) | |||
1331 | return cache_poll(filp, wait, cd); | 1346 | return cache_poll(filp, wait, cd); |
1332 | } | 1347 | } |
1333 | 1348 | ||
1334 | static int cache_ioctl_procfs(struct inode *inode, struct file *filp, | 1349 | static long cache_ioctl_procfs(struct file *filp, |
1335 | unsigned int cmd, unsigned long arg) | 1350 | unsigned int cmd, unsigned long arg) |
1336 | { | 1351 | { |
1352 | long ret; | ||
1353 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
1337 | struct cache_detail *cd = PDE(inode)->data; | 1354 | struct cache_detail *cd = PDE(inode)->data; |
1338 | 1355 | ||
1339 | return cache_ioctl(inode, filp, cmd, arg, cd); | 1356 | lock_kernel(); |
1357 | ret = cache_ioctl(inode, filp, cmd, arg, cd); | ||
1358 | unlock_kernel(); | ||
1359 | |||
1360 | return ret; | ||
1340 | } | 1361 | } |
1341 | 1362 | ||
1342 | static int cache_open_procfs(struct inode *inode, struct file *filp) | 1363 | static int cache_open_procfs(struct inode *inode, struct file *filp) |
@@ -1359,7 +1380,7 @@ static const struct file_operations cache_file_operations_procfs = { | |||
1359 | .read = cache_read_procfs, | 1380 | .read = cache_read_procfs, |
1360 | .write = cache_write_procfs, | 1381 | .write = cache_write_procfs, |
1361 | .poll = cache_poll_procfs, | 1382 | .poll = cache_poll_procfs, |
1362 | .ioctl = cache_ioctl_procfs, /* for FIONREAD */ | 1383 | .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */ |
1363 | .open = cache_open_procfs, | 1384 | .open = cache_open_procfs, |
1364 | .release = cache_release_procfs, | 1385 | .release = cache_release_procfs, |
1365 | }; | 1386 | }; |
@@ -1525,12 +1546,18 @@ static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait) | |||
1525 | return cache_poll(filp, wait, cd); | 1546 | return cache_poll(filp, wait, cd); |
1526 | } | 1547 | } |
1527 | 1548 | ||
1528 | static int cache_ioctl_pipefs(struct inode *inode, struct file *filp, | 1549 | static long cache_ioctl_pipefs(struct file *filp, |
1529 | unsigned int cmd, unsigned long arg) | 1550 | unsigned int cmd, unsigned long arg) |
1530 | { | 1551 | { |
1552 | struct inode *inode = filp->f_dentry->d_inode; | ||
1531 | struct cache_detail *cd = RPC_I(inode)->private; | 1553 | struct cache_detail *cd = RPC_I(inode)->private; |
1554 | long ret; | ||
1532 | 1555 | ||
1533 | return cache_ioctl(inode, filp, cmd, arg, cd); | 1556 | lock_kernel(); |
1557 | ret = cache_ioctl(inode, filp, cmd, arg, cd); | ||
1558 | unlock_kernel(); | ||
1559 | |||
1560 | return ret; | ||
1534 | } | 1561 | } |
1535 | 1562 | ||
1536 | static int cache_open_pipefs(struct inode *inode, struct file *filp) | 1563 | static int cache_open_pipefs(struct inode *inode, struct file *filp) |
@@ -1553,7 +1580,7 @@ const struct file_operations cache_file_operations_pipefs = { | |||
1553 | .read = cache_read_pipefs, | 1580 | .read = cache_read_pipefs, |
1554 | .write = cache_write_pipefs, | 1581 | .write = cache_write_pipefs, |
1555 | .poll = cache_poll_pipefs, | 1582 | .poll = cache_poll_pipefs, |
1556 | .ioctl = cache_ioctl_pipefs, /* for FIONREAD */ | 1583 | .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */ |
1557 | .open = cache_open_pipefs, | 1584 | .open = cache_open_pipefs, |
1558 | .release = cache_release_pipefs, | 1585 | .release = cache_release_pipefs, |
1559 | }; | 1586 | }; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 19c9983d5360..756fc324db9e 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -556,26 +556,16 @@ static const struct rpc_call_ops rpc_default_ops = { | |||
556 | */ | 556 | */ |
557 | struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) | 557 | struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) |
558 | { | 558 | { |
559 | struct rpc_task *task, *ret; | 559 | struct rpc_task *task; |
560 | 560 | ||
561 | task = rpc_new_task(task_setup_data); | 561 | task = rpc_new_task(task_setup_data); |
562 | if (task == NULL) { | 562 | if (IS_ERR(task)) |
563 | rpc_release_calldata(task_setup_data->callback_ops, | ||
564 | task_setup_data->callback_data); | ||
565 | ret = ERR_PTR(-ENOMEM); | ||
566 | goto out; | 563 | goto out; |
567 | } | ||
568 | 564 | ||
569 | if (task->tk_status != 0) { | ||
570 | ret = ERR_PTR(task->tk_status); | ||
571 | rpc_put_task(task); | ||
572 | goto out; | ||
573 | } | ||
574 | atomic_inc(&task->tk_count); | 565 | atomic_inc(&task->tk_count); |
575 | rpc_execute(task); | 566 | rpc_execute(task); |
576 | ret = task; | ||
577 | out: | 567 | out: |
578 | return ret; | 568 | return task; |
579 | } | 569 | } |
580 | EXPORT_SYMBOL_GPL(rpc_run_task); | 570 | EXPORT_SYMBOL_GPL(rpc_run_task); |
581 | 571 | ||
@@ -657,9 +647,8 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, | |||
657 | * Create an rpc_task to send the data | 647 | * Create an rpc_task to send the data |
658 | */ | 648 | */ |
659 | task = rpc_new_task(&task_setup_data); | 649 | task = rpc_new_task(&task_setup_data); |
660 | if (!task) { | 650 | if (IS_ERR(task)) { |
661 | xprt_free_bc_request(req); | 651 | xprt_free_bc_request(req); |
662 | task = ERR_PTR(-ENOMEM); | ||
663 | goto out; | 652 | goto out; |
664 | } | 653 | } |
665 | task->tk_rqstp = req; | 654 | task->tk_rqstp = req; |
@@ -1518,7 +1507,6 @@ call_refreshresult(struct rpc_task *task) | |||
1518 | task->tk_action = call_refresh; | 1507 | task->tk_action = call_refresh; |
1519 | if (status != -ETIMEDOUT) | 1508 | if (status != -ETIMEDOUT) |
1520 | rpc_delay(task, 3*HZ); | 1509 | rpc_delay(task, 3*HZ); |
1521 | return; | ||
1522 | } | 1510 | } |
1523 | 1511 | ||
1524 | static __be32 * | 1512 | static __be32 * |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 20e30c6f8355..95ccbcf45d3e 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/sunrpc/rpc_pipe_fs.h> | 28 | #include <linux/sunrpc/rpc_pipe_fs.h> |
29 | #include <linux/sunrpc/cache.h> | 29 | #include <linux/sunrpc/cache.h> |
30 | #include <linux/smp_lock.h> | ||
30 | 31 | ||
31 | static struct vfsmount *rpc_mount __read_mostly; | 32 | static struct vfsmount *rpc_mount __read_mostly; |
32 | static int rpc_mount_count; | 33 | static int rpc_mount_count; |
@@ -309,8 +310,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) | |||
309 | } | 310 | } |
310 | 311 | ||
311 | static int | 312 | static int |
312 | rpc_pipe_ioctl(struct inode *ino, struct file *filp, | 313 | rpc_pipe_ioctl_unlocked(struct file *filp, unsigned int cmd, unsigned long arg) |
313 | unsigned int cmd, unsigned long arg) | ||
314 | { | 314 | { |
315 | struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode); | 315 | struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode); |
316 | int len; | 316 | int len; |
@@ -331,13 +331,25 @@ rpc_pipe_ioctl(struct inode *ino, struct file *filp, | |||
331 | } | 331 | } |
332 | } | 332 | } |
333 | 333 | ||
334 | static long | ||
335 | rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
336 | { | ||
337 | long ret; | ||
338 | |||
339 | lock_kernel(); | ||
340 | ret = rpc_pipe_ioctl_unlocked(filp, cmd, arg); | ||
341 | unlock_kernel(); | ||
342 | |||
343 | return ret; | ||
344 | } | ||
345 | |||
334 | static const struct file_operations rpc_pipe_fops = { | 346 | static const struct file_operations rpc_pipe_fops = { |
335 | .owner = THIS_MODULE, | 347 | .owner = THIS_MODULE, |
336 | .llseek = no_llseek, | 348 | .llseek = no_llseek, |
337 | .read = rpc_pipe_read, | 349 | .read = rpc_pipe_read, |
338 | .write = rpc_pipe_write, | 350 | .write = rpc_pipe_write, |
339 | .poll = rpc_pipe_poll, | 351 | .poll = rpc_pipe_poll, |
340 | .ioctl = rpc_pipe_ioctl, | 352 | .unlocked_ioctl = rpc_pipe_ioctl, |
341 | .open = rpc_pipe_open, | 353 | .open = rpc_pipe_open, |
342 | .release = rpc_pipe_release, | 354 | .release = rpc_pipe_release, |
343 | }; | 355 | }; |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 121105355f60..dac219a56ae1 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -783,7 +783,7 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p, | |||
783 | port = ntohl(*p); | 783 | port = ntohl(*p); |
784 | dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, | 784 | dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, |
785 | task->tk_msg.rpc_proc->p_name, port); | 785 | task->tk_msg.rpc_proc->p_name, port); |
786 | if (unlikely(port > USHORT_MAX)) | 786 | if (unlikely(port > USHRT_MAX)) |
787 | return -EIO; | 787 | return -EIO; |
788 | 788 | ||
789 | rpcb->r_port = port; | 789 | rpcb->r_port = port; |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index aae6907fd546..4a843b883b89 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -25,7 +25,6 @@ | |||
25 | 25 | ||
26 | #ifdef RPC_DEBUG | 26 | #ifdef RPC_DEBUG |
27 | #define RPCDBG_FACILITY RPCDBG_SCHED | 27 | #define RPCDBG_FACILITY RPCDBG_SCHED |
28 | #define RPC_TASK_MAGIC_ID 0xf00baa | ||
29 | #endif | 28 | #endif |
30 | 29 | ||
31 | /* | 30 | /* |
@@ -237,7 +236,6 @@ static void rpc_task_set_debuginfo(struct rpc_task *task) | |||
237 | { | 236 | { |
238 | static atomic_t rpc_pid; | 237 | static atomic_t rpc_pid; |
239 | 238 | ||
240 | task->tk_magic = RPC_TASK_MAGIC_ID; | ||
241 | task->tk_pid = atomic_inc_return(&rpc_pid); | 239 | task->tk_pid = atomic_inc_return(&rpc_pid); |
242 | } | 240 | } |
243 | #else | 241 | #else |
@@ -360,9 +358,6 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task | |||
360 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", | 358 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
361 | task->tk_pid, jiffies); | 359 | task->tk_pid, jiffies); |
362 | 360 | ||
363 | #ifdef RPC_DEBUG | ||
364 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | ||
365 | #endif | ||
366 | /* Has the task been executed yet? If not, we cannot wake it up! */ | 361 | /* Has the task been executed yet? If not, we cannot wake it up! */ |
367 | if (!RPC_IS_ACTIVATED(task)) { | 362 | if (!RPC_IS_ACTIVATED(task)) { |
368 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); | 363 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); |
@@ -834,7 +829,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta | |||
834 | } | 829 | } |
835 | 830 | ||
836 | /* starting timestamp */ | 831 | /* starting timestamp */ |
837 | task->tk_start = jiffies; | 832 | task->tk_start = ktime_get(); |
838 | 833 | ||
839 | dprintk("RPC: new task initialized, procpid %u\n", | 834 | dprintk("RPC: new task initialized, procpid %u\n", |
840 | task_pid_nr(current)); | 835 | task_pid_nr(current)); |
@@ -856,16 +851,23 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) | |||
856 | 851 | ||
857 | if (task == NULL) { | 852 | if (task == NULL) { |
858 | task = rpc_alloc_task(); | 853 | task = rpc_alloc_task(); |
859 | if (task == NULL) | 854 | if (task == NULL) { |
860 | goto out; | 855 | rpc_release_calldata(setup_data->callback_ops, |
856 | setup_data->callback_data); | ||
857 | return ERR_PTR(-ENOMEM); | ||
858 | } | ||
861 | flags = RPC_TASK_DYNAMIC; | 859 | flags = RPC_TASK_DYNAMIC; |
862 | } | 860 | } |
863 | 861 | ||
864 | rpc_init_task(task, setup_data); | 862 | rpc_init_task(task, setup_data); |
863 | if (task->tk_status < 0) { | ||
864 | int err = task->tk_status; | ||
865 | rpc_put_task(task); | ||
866 | return ERR_PTR(err); | ||
867 | } | ||
865 | 868 | ||
866 | task->tk_flags |= flags; | 869 | task->tk_flags |= flags; |
867 | dprintk("RPC: allocated task %p\n", task); | 870 | dprintk("RPC: allocated task %p\n", task); |
868 | out: | ||
869 | return task; | 871 | return task; |
870 | } | 872 | } |
871 | 873 | ||
@@ -909,9 +911,6 @@ EXPORT_SYMBOL_GPL(rpc_put_task); | |||
909 | 911 | ||
910 | static void rpc_release_task(struct rpc_task *task) | 912 | static void rpc_release_task(struct rpc_task *task) |
911 | { | 913 | { |
912 | #ifdef RPC_DEBUG | ||
913 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | ||
914 | #endif | ||
915 | dprintk("RPC: %5u release task\n", task->tk_pid); | 914 | dprintk("RPC: %5u release task\n", task->tk_pid); |
916 | 915 | ||
917 | if (!list_empty(&task->tk_task)) { | 916 | if (!list_empty(&task->tk_task)) { |
@@ -923,9 +922,6 @@ static void rpc_release_task(struct rpc_task *task) | |||
923 | } | 922 | } |
924 | BUG_ON (RPC_IS_QUEUED(task)); | 923 | BUG_ON (RPC_IS_QUEUED(task)); |
925 | 924 | ||
926 | #ifdef RPC_DEBUG | ||
927 | task->tk_magic = 0; | ||
928 | #endif | ||
929 | /* Wake up anyone who is waiting for task completion */ | 925 | /* Wake up anyone who is waiting for task completion */ |
930 | rpc_mark_complete_task(task); | 926 | rpc_mark_complete_task(task); |
931 | 927 | ||
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 5785d2037f45..ea1046f3f9a3 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c | |||
@@ -144,7 +144,7 @@ void rpc_count_iostats(struct rpc_task *task) | |||
144 | struct rpc_rqst *req = task->tk_rqstp; | 144 | struct rpc_rqst *req = task->tk_rqstp; |
145 | struct rpc_iostats *stats; | 145 | struct rpc_iostats *stats; |
146 | struct rpc_iostats *op_metrics; | 146 | struct rpc_iostats *op_metrics; |
147 | long rtt, execute, queue; | 147 | ktime_t delta; |
148 | 148 | ||
149 | if (!task->tk_client || !task->tk_client->cl_metrics || !req) | 149 | if (!task->tk_client || !task->tk_client->cl_metrics || !req) |
150 | return; | 150 | return; |
@@ -156,23 +156,16 @@ void rpc_count_iostats(struct rpc_task *task) | |||
156 | op_metrics->om_ntrans += req->rq_ntrans; | 156 | op_metrics->om_ntrans += req->rq_ntrans; |
157 | op_metrics->om_timeouts += task->tk_timeouts; | 157 | op_metrics->om_timeouts += task->tk_timeouts; |
158 | 158 | ||
159 | op_metrics->om_bytes_sent += task->tk_bytes_sent; | 159 | op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent; |
160 | op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd; | 160 | op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd; |
161 | 161 | ||
162 | queue = (long)req->rq_xtime - task->tk_start; | 162 | delta = ktime_sub(req->rq_xtime, task->tk_start); |
163 | if (queue < 0) | 163 | op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta); |
164 | queue = -queue; | ||
165 | op_metrics->om_queue += queue; | ||
166 | 164 | ||
167 | rtt = task->tk_rtt; | 165 | op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt); |
168 | if (rtt < 0) | ||
169 | rtt = -rtt; | ||
170 | op_metrics->om_rtt += rtt; | ||
171 | 166 | ||
172 | execute = (long)jiffies - task->tk_start; | 167 | delta = ktime_sub(ktime_get(), task->tk_start); |
173 | if (execute < 0) | 168 | op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta); |
174 | execute = -execute; | ||
175 | op_metrics->om_execute += execute; | ||
176 | } | 169 | } |
177 | 170 | ||
178 | static void _print_name(struct seq_file *seq, unsigned int op, | 171 | static void _print_name(struct seq_file *seq, unsigned int op, |
@@ -186,8 +179,6 @@ static void _print_name(struct seq_file *seq, unsigned int op, | |||
186 | seq_printf(seq, "\t%12u: ", op); | 179 | seq_printf(seq, "\t%12u: ", op); |
187 | } | 180 | } |
188 | 181 | ||
189 | #define MILLISECS_PER_JIFFY (1000 / HZ) | ||
190 | |||
191 | void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) | 182 | void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) |
192 | { | 183 | { |
193 | struct rpc_iostats *stats = clnt->cl_metrics; | 184 | struct rpc_iostats *stats = clnt->cl_metrics; |
@@ -214,9 +205,9 @@ void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) | |||
214 | metrics->om_timeouts, | 205 | metrics->om_timeouts, |
215 | metrics->om_bytes_sent, | 206 | metrics->om_bytes_sent, |
216 | metrics->om_bytes_recv, | 207 | metrics->om_bytes_recv, |
217 | metrics->om_queue * MILLISECS_PER_JIFFY, | 208 | ktime_to_ms(metrics->om_queue), |
218 | metrics->om_rtt * MILLISECS_PER_JIFFY, | 209 | ktime_to_ms(metrics->om_rtt), |
219 | metrics->om_execute * MILLISECS_PER_JIFFY); | 210 | ktime_to_ms(metrics->om_execute)); |
220 | } | 211 | } |
221 | } | 212 | } |
222 | EXPORT_SYMBOL_GPL(rpc_print_iostats); | 213 | EXPORT_SYMBOL_GPL(rpc_print_iostats); |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 061b2e0f9118..cbc084939dd8 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -744,8 +744,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
744 | if (rqstp->rq_deferred) { | 744 | if (rqstp->rq_deferred) { |
745 | svc_xprt_received(xprt); | 745 | svc_xprt_received(xprt); |
746 | len = svc_deferred_recv(rqstp); | 746 | len = svc_deferred_recv(rqstp); |
747 | } else | 747 | } else { |
748 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); | 748 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); |
749 | svc_xprt_received(xprt); | ||
750 | } | ||
749 | dprintk("svc: got len=%d\n", len); | 751 | dprintk("svc: got len=%d\n", len); |
750 | } | 752 | } |
751 | 753 | ||
@@ -893,12 +895,12 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
893 | */ | 895 | */ |
894 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | 896 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
895 | serv->sv_tmpcnt--; | 897 | serv->sv_tmpcnt--; |
898 | spin_unlock_bh(&serv->sv_lock); | ||
896 | 899 | ||
897 | while ((dr = svc_deferred_dequeue(xprt)) != NULL) | 900 | while ((dr = svc_deferred_dequeue(xprt)) != NULL) |
898 | kfree(dr); | 901 | kfree(dr); |
899 | 902 | ||
900 | svc_xprt_put(xprt); | 903 | svc_xprt_put(xprt); |
901 | spin_unlock_bh(&serv->sv_lock); | ||
902 | } | 904 | } |
903 | 905 | ||
904 | void svc_close_xprt(struct svc_xprt *xprt) | 906 | void svc_close_xprt(struct svc_xprt *xprt) |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index ce0d5b35c2ac..7e534dd09077 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -150,7 +150,6 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | |||
150 | } | 150 | } |
151 | break; | 151 | break; |
152 | } | 152 | } |
153 | return; | ||
154 | } | 153 | } |
155 | 154 | ||
156 | /* | 155 | /* |
@@ -547,7 +546,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
547 | dprintk("svc: recvfrom returned error %d\n", -err); | 546 | dprintk("svc: recvfrom returned error %d\n", -err); |
548 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 547 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
549 | } | 548 | } |
550 | svc_xprt_received(&svsk->sk_xprt); | ||
551 | return -EAGAIN; | 549 | return -EAGAIN; |
552 | } | 550 | } |
553 | len = svc_addr_len(svc_addr(rqstp)); | 551 | len = svc_addr_len(svc_addr(rqstp)); |
@@ -562,11 +560,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
562 | svsk->sk_sk->sk_stamp = skb->tstamp; | 560 | svsk->sk_sk->sk_stamp = skb->tstamp; |
563 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ | 561 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ |
564 | 562 | ||
565 | /* | ||
566 | * Maybe more packets - kick another thread ASAP. | ||
567 | */ | ||
568 | svc_xprt_received(&svsk->sk_xprt); | ||
569 | |||
570 | len = skb->len - sizeof(struct udphdr); | 563 | len = skb->len - sizeof(struct udphdr); |
571 | rqstp->rq_arg.len = len; | 564 | rqstp->rq_arg.len = len; |
572 | 565 | ||
@@ -917,7 +910,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
917 | if (len < want) { | 910 | if (len < want) { |
918 | dprintk("svc: short recvfrom while reading record " | 911 | dprintk("svc: short recvfrom while reading record " |
919 | "length (%d of %d)\n", len, want); | 912 | "length (%d of %d)\n", len, want); |
920 | svc_xprt_received(&svsk->sk_xprt); | ||
921 | goto err_again; /* record header not complete */ | 913 | goto err_again; /* record header not complete */ |
922 | } | 914 | } |
923 | 915 | ||
@@ -953,7 +945,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
953 | if (len < svsk->sk_reclen) { | 945 | if (len < svsk->sk_reclen) { |
954 | dprintk("svc: incomplete TCP record (%d of %d)\n", | 946 | dprintk("svc: incomplete TCP record (%d of %d)\n", |
955 | len, svsk->sk_reclen); | 947 | len, svsk->sk_reclen); |
956 | svc_xprt_received(&svsk->sk_xprt); | ||
957 | goto err_again; /* record not complete */ | 948 | goto err_again; /* record not complete */ |
958 | } | 949 | } |
959 | len = svsk->sk_reclen; | 950 | len = svsk->sk_reclen; |
@@ -961,14 +952,11 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
961 | 952 | ||
962 | return len; | 953 | return len; |
963 | error: | 954 | error: |
964 | if (len == -EAGAIN) { | 955 | if (len == -EAGAIN) |
965 | dprintk("RPC: TCP recv_record got EAGAIN\n"); | 956 | dprintk("RPC: TCP recv_record got EAGAIN\n"); |
966 | svc_xprt_received(&svsk->sk_xprt); | ||
967 | } | ||
968 | return len; | 957 | return len; |
969 | err_delete: | 958 | err_delete: |
970 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 959 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
971 | svc_xprt_received(&svsk->sk_xprt); | ||
972 | err_again: | 960 | err_again: |
973 | return -EAGAIN; | 961 | return -EAGAIN; |
974 | } | 962 | } |
@@ -1110,7 +1098,6 @@ out: | |||
1110 | svsk->sk_tcplen = 0; | 1098 | svsk->sk_tcplen = 0; |
1111 | 1099 | ||
1112 | svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt); | 1100 | svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt); |
1113 | svc_xprt_received(&svsk->sk_xprt); | ||
1114 | if (serv->sv_stats) | 1101 | if (serv->sv_stats) |
1115 | serv->sv_stats->nettcpcnt++; | 1102 | serv->sv_stats->nettcpcnt++; |
1116 | 1103 | ||
@@ -1119,7 +1106,6 @@ out: | |||
1119 | err_again: | 1106 | err_again: |
1120 | if (len == -EAGAIN) { | 1107 | if (len == -EAGAIN) { |
1121 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); | 1108 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); |
1122 | svc_xprt_received(&svsk->sk_xprt); | ||
1123 | return len; | 1109 | return len; |
1124 | } | 1110 | } |
1125 | error: | 1111 | error: |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 2763fde88499..a1f82a87d34d 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -762,6 +762,7 @@ int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, un | |||
762 | __write_bytes_to_xdr_buf(&subbuf, obj, len); | 762 | __write_bytes_to_xdr_buf(&subbuf, obj, len); |
763 | return 0; | 763 | return 0; |
764 | } | 764 | } |
765 | EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); | ||
765 | 766 | ||
766 | int | 767 | int |
767 | xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) | 768 | xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 699ade68aac1..dcd0132396ba 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/interrupt.h> | 43 | #include <linux/interrupt.h> |
44 | #include <linux/workqueue.h> | 44 | #include <linux/workqueue.h> |
45 | #include <linux/net.h> | 45 | #include <linux/net.h> |
46 | #include <linux/ktime.h> | ||
46 | 47 | ||
47 | #include <linux/sunrpc/clnt.h> | 48 | #include <linux/sunrpc/clnt.h> |
48 | #include <linux/sunrpc/metrics.h> | 49 | #include <linux/sunrpc/metrics.h> |
@@ -62,7 +63,6 @@ | |||
62 | * Local functions | 63 | * Local functions |
63 | */ | 64 | */ |
64 | static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); | 65 | static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); |
65 | static inline void do_xprt_reserve(struct rpc_task *); | ||
66 | static void xprt_connect_status(struct rpc_task *task); | 66 | static void xprt_connect_status(struct rpc_task *task); |
67 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); | 67 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); |
68 | 68 | ||
@@ -166,7 +166,6 @@ EXPORT_SYMBOL_GPL(xprt_unregister_transport); | |||
166 | int xprt_load_transport(const char *transport_name) | 166 | int xprt_load_transport(const char *transport_name) |
167 | { | 167 | { |
168 | struct xprt_class *t; | 168 | struct xprt_class *t; |
169 | char module_name[sizeof t->name + 5]; | ||
170 | int result; | 169 | int result; |
171 | 170 | ||
172 | result = 0; | 171 | result = 0; |
@@ -178,9 +177,7 @@ int xprt_load_transport(const char *transport_name) | |||
178 | } | 177 | } |
179 | } | 178 | } |
180 | spin_unlock(&xprt_list_lock); | 179 | spin_unlock(&xprt_list_lock); |
181 | strcpy(module_name, "xprt"); | 180 | result = request_module("xprt%s", transport_name); |
182 | strncat(module_name, transport_name, sizeof t->name); | ||
183 | result = request_module(module_name); | ||
184 | out: | 181 | out: |
185 | return result; | 182 | return result; |
186 | } | 183 | } |
@@ -711,12 +708,16 @@ void xprt_connect(struct rpc_task *task) | |||
711 | if (task->tk_rqstp) | 708 | if (task->tk_rqstp) |
712 | task->tk_rqstp->rq_bytes_sent = 0; | 709 | task->tk_rqstp->rq_bytes_sent = 0; |
713 | 710 | ||
714 | task->tk_timeout = xprt->connect_timeout; | 711 | task->tk_timeout = task->tk_rqstp->rq_timeout; |
715 | rpc_sleep_on(&xprt->pending, task, xprt_connect_status); | 712 | rpc_sleep_on(&xprt->pending, task, xprt_connect_status); |
713 | |||
714 | if (test_bit(XPRT_CLOSING, &xprt->state)) | ||
715 | return; | ||
716 | if (xprt_test_and_set_connecting(xprt)) | ||
717 | return; | ||
716 | xprt->stat.connect_start = jiffies; | 718 | xprt->stat.connect_start = jiffies; |
717 | xprt->ops->connect(task); | 719 | xprt->ops->connect(task); |
718 | } | 720 | } |
719 | return; | ||
720 | } | 721 | } |
721 | 722 | ||
722 | static void xprt_connect_status(struct rpc_task *task) | 723 | static void xprt_connect_status(struct rpc_task *task) |
@@ -771,25 +772,19 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) | |||
771 | } | 772 | } |
772 | EXPORT_SYMBOL_GPL(xprt_lookup_rqst); | 773 | EXPORT_SYMBOL_GPL(xprt_lookup_rqst); |
773 | 774 | ||
774 | /** | 775 | static void xprt_update_rtt(struct rpc_task *task) |
775 | * xprt_update_rtt - update an RPC client's RTT state after receiving a reply | ||
776 | * @task: RPC request that recently completed | ||
777 | * | ||
778 | */ | ||
779 | void xprt_update_rtt(struct rpc_task *task) | ||
780 | { | 776 | { |
781 | struct rpc_rqst *req = task->tk_rqstp; | 777 | struct rpc_rqst *req = task->tk_rqstp; |
782 | struct rpc_rtt *rtt = task->tk_client->cl_rtt; | 778 | struct rpc_rtt *rtt = task->tk_client->cl_rtt; |
783 | unsigned timer = task->tk_msg.rpc_proc->p_timer; | 779 | unsigned timer = task->tk_msg.rpc_proc->p_timer; |
780 | long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); | ||
784 | 781 | ||
785 | if (timer) { | 782 | if (timer) { |
786 | if (req->rq_ntrans == 1) | 783 | if (req->rq_ntrans == 1) |
787 | rpc_update_rtt(rtt, timer, | 784 | rpc_update_rtt(rtt, timer, m); |
788 | (long)jiffies - req->rq_xtime); | ||
789 | rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); | 785 | rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); |
790 | } | 786 | } |
791 | } | 787 | } |
792 | EXPORT_SYMBOL_GPL(xprt_update_rtt); | ||
793 | 788 | ||
794 | /** | 789 | /** |
795 | * xprt_complete_rqst - called when reply processing is complete | 790 | * xprt_complete_rqst - called when reply processing is complete |
@@ -807,7 +802,9 @@ void xprt_complete_rqst(struct rpc_task *task, int copied) | |||
807 | task->tk_pid, ntohl(req->rq_xid), copied); | 802 | task->tk_pid, ntohl(req->rq_xid), copied); |
808 | 803 | ||
809 | xprt->stat.recvs++; | 804 | xprt->stat.recvs++; |
810 | task->tk_rtt = (long)jiffies - req->rq_xtime; | 805 | req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime); |
806 | if (xprt->ops->timer != NULL) | ||
807 | xprt_update_rtt(task); | ||
811 | 808 | ||
812 | list_del_init(&req->rq_list); | 809 | list_del_init(&req->rq_list); |
813 | req->rq_private_buf.len = copied; | 810 | req->rq_private_buf.len = copied; |
@@ -906,7 +903,7 @@ void xprt_transmit(struct rpc_task *task) | |||
906 | return; | 903 | return; |
907 | 904 | ||
908 | req->rq_connect_cookie = xprt->connect_cookie; | 905 | req->rq_connect_cookie = xprt->connect_cookie; |
909 | req->rq_xtime = jiffies; | 906 | req->rq_xtime = ktime_get(); |
910 | status = xprt->ops->send_request(task); | 907 | status = xprt->ops->send_request(task); |
911 | if (status != 0) { | 908 | if (status != 0) { |
912 | task->tk_status = status; | 909 | task->tk_status = status; |
@@ -935,7 +932,7 @@ void xprt_transmit(struct rpc_task *task) | |||
935 | spin_unlock_bh(&xprt->transport_lock); | 932 | spin_unlock_bh(&xprt->transport_lock); |
936 | } | 933 | } |
937 | 934 | ||
938 | static inline void do_xprt_reserve(struct rpc_task *task) | 935 | static void xprt_alloc_slot(struct rpc_task *task) |
939 | { | 936 | { |
940 | struct rpc_xprt *xprt = task->tk_xprt; | 937 | struct rpc_xprt *xprt = task->tk_xprt; |
941 | 938 | ||
@@ -955,6 +952,16 @@ static inline void do_xprt_reserve(struct rpc_task *task) | |||
955 | rpc_sleep_on(&xprt->backlog, task, NULL); | 952 | rpc_sleep_on(&xprt->backlog, task, NULL); |
956 | } | 953 | } |
957 | 954 | ||
955 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | ||
956 | { | ||
957 | memset(req, 0, sizeof(*req)); /* mark unused */ | ||
958 | |||
959 | spin_lock(&xprt->reserve_lock); | ||
960 | list_add(&req->rq_list, &xprt->free); | ||
961 | rpc_wake_up_next(&xprt->backlog); | ||
962 | spin_unlock(&xprt->reserve_lock); | ||
963 | } | ||
964 | |||
958 | /** | 965 | /** |
959 | * xprt_reserve - allocate an RPC request slot | 966 | * xprt_reserve - allocate an RPC request slot |
960 | * @task: RPC task requesting a slot allocation | 967 | * @task: RPC task requesting a slot allocation |
@@ -968,7 +975,7 @@ void xprt_reserve(struct rpc_task *task) | |||
968 | 975 | ||
969 | task->tk_status = -EIO; | 976 | task->tk_status = -EIO; |
970 | spin_lock(&xprt->reserve_lock); | 977 | spin_lock(&xprt->reserve_lock); |
971 | do_xprt_reserve(task); | 978 | xprt_alloc_slot(task); |
972 | spin_unlock(&xprt->reserve_lock); | 979 | spin_unlock(&xprt->reserve_lock); |
973 | } | 980 | } |
974 | 981 | ||
@@ -1006,14 +1013,10 @@ void xprt_release(struct rpc_task *task) | |||
1006 | { | 1013 | { |
1007 | struct rpc_xprt *xprt; | 1014 | struct rpc_xprt *xprt; |
1008 | struct rpc_rqst *req; | 1015 | struct rpc_rqst *req; |
1009 | int is_bc_request; | ||
1010 | 1016 | ||
1011 | if (!(req = task->tk_rqstp)) | 1017 | if (!(req = task->tk_rqstp)) |
1012 | return; | 1018 | return; |
1013 | 1019 | ||
1014 | /* Preallocated backchannel request? */ | ||
1015 | is_bc_request = bc_prealloc(req); | ||
1016 | |||
1017 | xprt = req->rq_xprt; | 1020 | xprt = req->rq_xprt; |
1018 | rpc_count_iostats(task); | 1021 | rpc_count_iostats(task); |
1019 | spin_lock_bh(&xprt->transport_lock); | 1022 | spin_lock_bh(&xprt->transport_lock); |
@@ -1027,21 +1030,16 @@ void xprt_release(struct rpc_task *task) | |||
1027 | mod_timer(&xprt->timer, | 1030 | mod_timer(&xprt->timer, |
1028 | xprt->last_used + xprt->idle_timeout); | 1031 | xprt->last_used + xprt->idle_timeout); |
1029 | spin_unlock_bh(&xprt->transport_lock); | 1032 | spin_unlock_bh(&xprt->transport_lock); |
1030 | if (!bc_prealloc(req)) | 1033 | if (req->rq_buffer) |
1031 | xprt->ops->buf_free(req->rq_buffer); | 1034 | xprt->ops->buf_free(req->rq_buffer); |
1032 | task->tk_rqstp = NULL; | 1035 | task->tk_rqstp = NULL; |
1033 | if (req->rq_release_snd_buf) | 1036 | if (req->rq_release_snd_buf) |
1034 | req->rq_release_snd_buf(req); | 1037 | req->rq_release_snd_buf(req); |
1035 | 1038 | ||
1036 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); | 1039 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); |
1037 | if (likely(!is_bc_request)) { | 1040 | if (likely(!bc_prealloc(req))) |
1038 | memset(req, 0, sizeof(*req)); /* mark unused */ | 1041 | xprt_free_slot(xprt, req); |
1039 | 1042 | else | |
1040 | spin_lock(&xprt->reserve_lock); | ||
1041 | list_add(&req->rq_list, &xprt->free); | ||
1042 | rpc_wake_up_next(&xprt->backlog); | ||
1043 | spin_unlock(&xprt->reserve_lock); | ||
1044 | } else | ||
1045 | xprt_free_bc_request(req); | 1043 | xprt_free_bc_request(req); |
1046 | } | 1044 | } |
1047 | 1045 | ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index f92e37eb413c..0194de814933 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
@@ -566,7 +566,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp, | |||
566 | ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, | 566 | ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, |
567 | rqstp->rq_arg.head[0].iov_len); | 567 | rqstp->rq_arg.head[0].iov_len); |
568 | 568 | ||
569 | svc_xprt_received(rqstp->rq_xprt); | ||
570 | return ret; | 569 | return ret; |
571 | } | 570 | } |
572 | 571 | ||
@@ -665,7 +664,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) | |||
665 | rqstp->rq_arg.head[0].iov_len); | 664 | rqstp->rq_arg.head[0].iov_len); |
666 | rqstp->rq_prot = IPPROTO_MAX; | 665 | rqstp->rq_prot = IPPROTO_MAX; |
667 | svc_xprt_copy_addrs(rqstp, xprt); | 666 | svc_xprt_copy_addrs(rqstp, xprt); |
668 | svc_xprt_received(xprt); | ||
669 | return ret; | 667 | return ret; |
670 | 668 | ||
671 | close_out: | 669 | close_out: |
@@ -678,6 +676,5 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) | |||
678 | */ | 676 | */ |
679 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 677 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
680 | defer: | 678 | defer: |
681 | svc_xprt_received(xprt); | ||
682 | return 0; | 679 | return 0; |
683 | } | 680 | } |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 187257b1d880..a85e866a77f7 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -305,7 +305,6 @@ xprt_setup_rdma(struct xprt_create *args) | |||
305 | /* 60 second timeout, no retries */ | 305 | /* 60 second timeout, no retries */ |
306 | xprt->timeout = &xprt_rdma_default_timeout; | 306 | xprt->timeout = &xprt_rdma_default_timeout; |
307 | xprt->bind_timeout = (60U * HZ); | 307 | xprt->bind_timeout = (60U * HZ); |
308 | xprt->connect_timeout = (60U * HZ); | ||
309 | xprt->reestablish_timeout = (5U * HZ); | 308 | xprt->reestablish_timeout = (5U * HZ); |
310 | xprt->idle_timeout = (5U * 60 * HZ); | 309 | xprt->idle_timeout = (5U * 60 * HZ); |
311 | 310 | ||
@@ -449,21 +448,19 @@ xprt_rdma_connect(struct rpc_task *task) | |||
449 | struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt; | 448 | struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt; |
450 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 449 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
451 | 450 | ||
452 | if (!xprt_test_and_set_connecting(xprt)) { | 451 | if (r_xprt->rx_ep.rep_connected != 0) { |
453 | if (r_xprt->rx_ep.rep_connected != 0) { | 452 | /* Reconnect */ |
454 | /* Reconnect */ | 453 | schedule_delayed_work(&r_xprt->rdma_connect, |
455 | schedule_delayed_work(&r_xprt->rdma_connect, | 454 | xprt->reestablish_timeout); |
456 | xprt->reestablish_timeout); | 455 | xprt->reestablish_timeout <<= 1; |
457 | xprt->reestablish_timeout <<= 1; | 456 | if (xprt->reestablish_timeout > (30 * HZ)) |
458 | if (xprt->reestablish_timeout > (30 * HZ)) | 457 | xprt->reestablish_timeout = (30 * HZ); |
459 | xprt->reestablish_timeout = (30 * HZ); | 458 | else if (xprt->reestablish_timeout < (5 * HZ)) |
460 | else if (xprt->reestablish_timeout < (5 * HZ)) | 459 | xprt->reestablish_timeout = (5 * HZ); |
461 | xprt->reestablish_timeout = (5 * HZ); | 460 | } else { |
462 | } else { | 461 | schedule_delayed_work(&r_xprt->rdma_connect, 0); |
463 | schedule_delayed_work(&r_xprt->rdma_connect, 0); | 462 | if (!RPC_IS_ASYNC(task)) |
464 | if (!RPC_IS_ASYNC(task)) | 463 | flush_scheduled_work(); |
465 | flush_scheduled_work(); | ||
466 | } | ||
467 | } | 464 | } |
468 | } | 465 | } |
469 | 466 | ||
@@ -677,7 +674,7 @@ xprt_rdma_send_request(struct rpc_task *task) | |||
677 | if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) | 674 | if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) |
678 | goto drop_connection; | 675 | goto drop_connection; |
679 | 676 | ||
680 | task->tk_bytes_sent += rqst->rq_snd_buf.len; | 677 | rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; |
681 | rqst->rq_bytes_sent = 0; | 678 | rqst->rq_bytes_sent = 0; |
682 | return 0; | 679 | return 0; |
683 | 680 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 9847c30b5001..2a9675136c68 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -138,20 +138,6 @@ static ctl_table sunrpc_table[] = { | |||
138 | #endif | 138 | #endif |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Time out for an RPC UDP socket connect. UDP socket connects are | ||
142 | * synchronous, but we set a timeout anyway in case of resource | ||
143 | * exhaustion on the local host. | ||
144 | */ | ||
145 | #define XS_UDP_CONN_TO (5U * HZ) | ||
146 | |||
147 | /* | ||
148 | * Wait duration for an RPC TCP connection to be established. Solaris | ||
149 | * NFS over TCP uses 60 seconds, for example, which is in line with how | ||
150 | * long a server takes to reboot. | ||
151 | */ | ||
152 | #define XS_TCP_CONN_TO (60U * HZ) | ||
153 | |||
154 | /* | ||
155 | * Wait duration for a reply from the RPC portmapper. | 141 | * Wait duration for a reply from the RPC portmapper. |
156 | */ | 142 | */ |
157 | #define XS_BIND_TO (60U * HZ) | 143 | #define XS_BIND_TO (60U * HZ) |
@@ -542,7 +528,7 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
542 | xdr->len - req->rq_bytes_sent, status); | 528 | xdr->len - req->rq_bytes_sent, status); |
543 | 529 | ||
544 | if (status >= 0) { | 530 | if (status >= 0) { |
545 | task->tk_bytes_sent += status; | 531 | req->rq_xmit_bytes_sent += status; |
546 | if (status >= req->rq_slen) | 532 | if (status >= req->rq_slen) |
547 | return 0; | 533 | return 0; |
548 | /* Still some bytes left; set up for a retry later. */ | 534 | /* Still some bytes left; set up for a retry later. */ |
@@ -638,7 +624,7 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
638 | /* If we've sent the entire packet, immediately | 624 | /* If we've sent the entire packet, immediately |
639 | * reset the count of bytes sent. */ | 625 | * reset the count of bytes sent. */ |
640 | req->rq_bytes_sent += status; | 626 | req->rq_bytes_sent += status; |
641 | task->tk_bytes_sent += status; | 627 | req->rq_xmit_bytes_sent += status; |
642 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { | 628 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { |
643 | req->rq_bytes_sent = 0; | 629 | req->rq_bytes_sent = 0; |
644 | return 0; | 630 | return 0; |
@@ -858,7 +844,6 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
858 | dst_confirm(skb_dst(skb)); | 844 | dst_confirm(skb_dst(skb)); |
859 | 845 | ||
860 | xprt_adjust_cwnd(task, copied); | 846 | xprt_adjust_cwnd(task, copied); |
861 | xprt_update_rtt(task); | ||
862 | xprt_complete_rqst(task, copied); | 847 | xprt_complete_rqst(task, copied); |
863 | 848 | ||
864 | out_unlock: | 849 | out_unlock: |
@@ -1050,8 +1035,6 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt, | |||
1050 | if (transport->tcp_flags & TCP_RCV_LAST_FRAG) | 1035 | if (transport->tcp_flags & TCP_RCV_LAST_FRAG) |
1051 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; | 1036 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
1052 | } | 1037 | } |
1053 | |||
1054 | return; | ||
1055 | } | 1038 | } |
1056 | 1039 | ||
1057 | /* | 1040 | /* |
@@ -2016,9 +1999,6 @@ static void xs_connect(struct rpc_task *task) | |||
2016 | struct rpc_xprt *xprt = task->tk_xprt; | 1999 | struct rpc_xprt *xprt = task->tk_xprt; |
2017 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 2000 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
2018 | 2001 | ||
2019 | if (xprt_test_and_set_connecting(xprt)) | ||
2020 | return; | ||
2021 | |||
2022 | if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { | 2002 | if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { |
2023 | dprintk("RPC: xs_connect delayed xprt %p for %lu " | 2003 | dprintk("RPC: xs_connect delayed xprt %p for %lu " |
2024 | "seconds\n", | 2004 | "seconds\n", |
@@ -2038,16 +2018,6 @@ static void xs_connect(struct rpc_task *task) | |||
2038 | } | 2018 | } |
2039 | } | 2019 | } |
2040 | 2020 | ||
2041 | static void xs_tcp_connect(struct rpc_task *task) | ||
2042 | { | ||
2043 | struct rpc_xprt *xprt = task->tk_xprt; | ||
2044 | |||
2045 | /* Exit if we need to wait for socket shutdown to complete */ | ||
2046 | if (test_bit(XPRT_CLOSING, &xprt->state)) | ||
2047 | return; | ||
2048 | xs_connect(task); | ||
2049 | } | ||
2050 | |||
2051 | /** | 2021 | /** |
2052 | * xs_udp_print_stats - display UDP socket-specifc stats | 2022 | * xs_udp_print_stats - display UDP socket-specifc stats |
2053 | * @xprt: rpc_xprt struct containing statistics | 2023 | * @xprt: rpc_xprt struct containing statistics |
@@ -2210,7 +2180,6 @@ static int bc_send_request(struct rpc_task *task) | |||
2210 | 2180 | ||
2211 | static void bc_close(struct rpc_xprt *xprt) | 2181 | static void bc_close(struct rpc_xprt *xprt) |
2212 | { | 2182 | { |
2213 | return; | ||
2214 | } | 2183 | } |
2215 | 2184 | ||
2216 | /* | 2185 | /* |
@@ -2220,7 +2189,6 @@ static void bc_close(struct rpc_xprt *xprt) | |||
2220 | 2189 | ||
2221 | static void bc_destroy(struct rpc_xprt *xprt) | 2190 | static void bc_destroy(struct rpc_xprt *xprt) |
2222 | { | 2191 | { |
2223 | return; | ||
2224 | } | 2192 | } |
2225 | 2193 | ||
2226 | static struct rpc_xprt_ops xs_udp_ops = { | 2194 | static struct rpc_xprt_ops xs_udp_ops = { |
@@ -2246,7 +2214,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
2246 | .release_xprt = xs_tcp_release_xprt, | 2214 | .release_xprt = xs_tcp_release_xprt, |
2247 | .rpcbind = rpcb_getport_async, | 2215 | .rpcbind = rpcb_getport_async, |
2248 | .set_port = xs_set_port, | 2216 | .set_port = xs_set_port, |
2249 | .connect = xs_tcp_connect, | 2217 | .connect = xs_connect, |
2250 | .buf_alloc = rpc_malloc, | 2218 | .buf_alloc = rpc_malloc, |
2251 | .buf_free = rpc_free, | 2219 | .buf_free = rpc_free, |
2252 | .send_request = xs_tcp_send_request, | 2220 | .send_request = xs_tcp_send_request, |
@@ -2325,6 +2293,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2325 | struct sockaddr *addr = args->dstaddr; | 2293 | struct sockaddr *addr = args->dstaddr; |
2326 | struct rpc_xprt *xprt; | 2294 | struct rpc_xprt *xprt; |
2327 | struct sock_xprt *transport; | 2295 | struct sock_xprt *transport; |
2296 | struct rpc_xprt *ret; | ||
2328 | 2297 | ||
2329 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); | 2298 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); |
2330 | if (IS_ERR(xprt)) | 2299 | if (IS_ERR(xprt)) |
@@ -2337,7 +2306,6 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2337 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | 2306 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); |
2338 | 2307 | ||
2339 | xprt->bind_timeout = XS_BIND_TO; | 2308 | xprt->bind_timeout = XS_BIND_TO; |
2340 | xprt->connect_timeout = XS_UDP_CONN_TO; | ||
2341 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | 2309 | xprt->reestablish_timeout = XS_UDP_REEST_TO; |
2342 | xprt->idle_timeout = XS_IDLE_DISC_TO; | 2310 | xprt->idle_timeout = XS_IDLE_DISC_TO; |
2343 | 2311 | ||
@@ -2363,8 +2331,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2363 | xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); | 2331 | xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); |
2364 | break; | 2332 | break; |
2365 | default: | 2333 | default: |
2366 | kfree(xprt); | 2334 | ret = ERR_PTR(-EAFNOSUPPORT); |
2367 | return ERR_PTR(-EAFNOSUPPORT); | 2335 | goto out_err; |
2368 | } | 2336 | } |
2369 | 2337 | ||
2370 | if (xprt_bound(xprt)) | 2338 | if (xprt_bound(xprt)) |
@@ -2379,10 +2347,11 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2379 | 2347 | ||
2380 | if (try_module_get(THIS_MODULE)) | 2348 | if (try_module_get(THIS_MODULE)) |
2381 | return xprt; | 2349 | return xprt; |
2382 | 2350 | ret = ERR_PTR(-EINVAL); | |
2351 | out_err: | ||
2383 | kfree(xprt->slot); | 2352 | kfree(xprt->slot); |
2384 | kfree(xprt); | 2353 | kfree(xprt); |
2385 | return ERR_PTR(-EINVAL); | 2354 | return ret; |
2386 | } | 2355 | } |
2387 | 2356 | ||
2388 | static const struct rpc_timeout xs_tcp_default_timeout = { | 2357 | static const struct rpc_timeout xs_tcp_default_timeout = { |
@@ -2401,6 +2370,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2401 | struct sockaddr *addr = args->dstaddr; | 2370 | struct sockaddr *addr = args->dstaddr; |
2402 | struct rpc_xprt *xprt; | 2371 | struct rpc_xprt *xprt; |
2403 | struct sock_xprt *transport; | 2372 | struct sock_xprt *transport; |
2373 | struct rpc_xprt *ret; | ||
2404 | 2374 | ||
2405 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2375 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); |
2406 | if (IS_ERR(xprt)) | 2376 | if (IS_ERR(xprt)) |
@@ -2412,7 +2382,6 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2412 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | 2382 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
2413 | 2383 | ||
2414 | xprt->bind_timeout = XS_BIND_TO; | 2384 | xprt->bind_timeout = XS_BIND_TO; |
2415 | xprt->connect_timeout = XS_TCP_CONN_TO; | ||
2416 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 2385 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
2417 | xprt->idle_timeout = XS_IDLE_DISC_TO; | 2386 | xprt->idle_timeout = XS_IDLE_DISC_TO; |
2418 | 2387 | ||
@@ -2437,8 +2406,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2437 | xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); | 2406 | xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); |
2438 | break; | 2407 | break; |
2439 | default: | 2408 | default: |
2440 | kfree(xprt); | 2409 | ret = ERR_PTR(-EAFNOSUPPORT); |
2441 | return ERR_PTR(-EAFNOSUPPORT); | 2410 | goto out_err; |
2442 | } | 2411 | } |
2443 | 2412 | ||
2444 | if (xprt_bound(xprt)) | 2413 | if (xprt_bound(xprt)) |
@@ -2454,10 +2423,11 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2454 | 2423 | ||
2455 | if (try_module_get(THIS_MODULE)) | 2424 | if (try_module_get(THIS_MODULE)) |
2456 | return xprt; | 2425 | return xprt; |
2457 | 2426 | ret = ERR_PTR(-EINVAL); | |
2427 | out_err: | ||
2458 | kfree(xprt->slot); | 2428 | kfree(xprt->slot); |
2459 | kfree(xprt); | 2429 | kfree(xprt); |
2460 | return ERR_PTR(-EINVAL); | 2430 | return ret; |
2461 | } | 2431 | } |
2462 | 2432 | ||
2463 | /** | 2433 | /** |
@@ -2471,9 +2441,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2471 | struct rpc_xprt *xprt; | 2441 | struct rpc_xprt *xprt; |
2472 | struct sock_xprt *transport; | 2442 | struct sock_xprt *transport; |
2473 | struct svc_sock *bc_sock; | 2443 | struct svc_sock *bc_sock; |
2474 | 2444 | struct rpc_xprt *ret; | |
2475 | if (!args->bc_xprt) | ||
2476 | ERR_PTR(-EINVAL); | ||
2477 | 2445 | ||
2478 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2446 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); |
2479 | if (IS_ERR(xprt)) | 2447 | if (IS_ERR(xprt)) |
@@ -2488,7 +2456,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2488 | /* backchannel */ | 2456 | /* backchannel */ |
2489 | xprt_set_bound(xprt); | 2457 | xprt_set_bound(xprt); |
2490 | xprt->bind_timeout = 0; | 2458 | xprt->bind_timeout = 0; |
2491 | xprt->connect_timeout = 0; | ||
2492 | xprt->reestablish_timeout = 0; | 2459 | xprt->reestablish_timeout = 0; |
2493 | xprt->idle_timeout = 0; | 2460 | xprt->idle_timeout = 0; |
2494 | 2461 | ||
@@ -2514,8 +2481,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2514 | RPCBIND_NETID_TCP6); | 2481 | RPCBIND_NETID_TCP6); |
2515 | break; | 2482 | break; |
2516 | default: | 2483 | default: |
2517 | kfree(xprt); | 2484 | ret = ERR_PTR(-EAFNOSUPPORT); |
2518 | return ERR_PTR(-EAFNOSUPPORT); | 2485 | goto out_err; |
2519 | } | 2486 | } |
2520 | 2487 | ||
2521 | if (xprt_bound(xprt)) | 2488 | if (xprt_bound(xprt)) |
@@ -2537,9 +2504,11 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2537 | 2504 | ||
2538 | if (try_module_get(THIS_MODULE)) | 2505 | if (try_module_get(THIS_MODULE)) |
2539 | return xprt; | 2506 | return xprt; |
2507 | ret = ERR_PTR(-EINVAL); | ||
2508 | out_err: | ||
2540 | kfree(xprt->slot); | 2509 | kfree(xprt->slot); |
2541 | kfree(xprt); | 2510 | kfree(xprt); |
2542 | return ERR_PTR(-EINVAL); | 2511 | return ret; |
2543 | } | 2512 | } |
2544 | 2513 | ||
2545 | static struct xprt_class xs_udp_transport = { | 2514 | static struct xprt_class xs_udp_transport = { |
diff --git a/net/sysctl_net.c b/net/sysctl_net.c index 53196009160a..ca84212cfbfe 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c | |||
@@ -82,7 +82,6 @@ static int __net_init sysctl_net_init(struct net *net) | |||
82 | static void __net_exit sysctl_net_exit(struct net *net) | 82 | static void __net_exit sysctl_net_exit(struct net *net) |
83 | { | 83 | { |
84 | WARN_ON(!list_empty(&net->sysctls.list)); | 84 | WARN_ON(!list_empty(&net->sysctls.list)); |
85 | return; | ||
86 | } | 85 | } |
87 | 86 | ||
88 | static struct pernet_operations sysctl_pernet_ops = { | 87 | static struct pernet_operations sysctl_pernet_ops = { |
diff --git a/net/tipc/addr.c b/net/tipc/addr.c index e5207a11edf6..c048543ffbeb 100644 --- a/net/tipc/addr.c +++ b/net/tipc/addr.c | |||
@@ -92,3 +92,35 @@ int tipc_addr_node_valid(u32 addr) | |||
92 | return (tipc_addr_domain_valid(addr) && tipc_node(addr)); | 92 | return (tipc_addr_domain_valid(addr) && tipc_node(addr)); |
93 | } | 93 | } |
94 | 94 | ||
95 | int tipc_in_scope(u32 domain, u32 addr) | ||
96 | { | ||
97 | if (!domain || (domain == addr)) | ||
98 | return 1; | ||
99 | if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */ | ||
100 | return 1; | ||
101 | if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */ | ||
102 | return 1; | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * tipc_addr_scope - convert message lookup domain to a 2-bit scope value | ||
108 | */ | ||
109 | |||
110 | int tipc_addr_scope(u32 domain) | ||
111 | { | ||
112 | if (likely(!domain)) | ||
113 | return TIPC_ZONE_SCOPE; | ||
114 | if (tipc_node(domain)) | ||
115 | return TIPC_NODE_SCOPE; | ||
116 | if (tipc_cluster(domain)) | ||
117 | return TIPC_CLUSTER_SCOPE; | ||
118 | return TIPC_ZONE_SCOPE; | ||
119 | } | ||
120 | |||
121 | char *tipc_addr_string_fill(char *string, u32 addr) | ||
122 | { | ||
123 | snprintf(string, 16, "<%u.%u.%u>", | ||
124 | tipc_zone(addr), tipc_cluster(addr), tipc_node(addr)); | ||
125 | return string; | ||
126 | } | ||
diff --git a/net/tipc/addr.h b/net/tipc/addr.h index 3ba67e6ce03e..c1cc5724d8cc 100644 --- a/net/tipc/addr.h +++ b/net/tipc/addr.h | |||
@@ -67,32 +67,6 @@ static inline int may_route(u32 addr) | |||
67 | return(addr ^ tipc_own_addr) >> 11; | 67 | return(addr ^ tipc_own_addr) >> 11; |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline int in_scope(u32 domain, u32 addr) | ||
71 | { | ||
72 | if (!domain || (domain == addr)) | ||
73 | return 1; | ||
74 | if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */ | ||
75 | return 1; | ||
76 | if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */ | ||
77 | return 1; | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * addr_scope - convert message lookup domain to equivalent 2-bit scope value | ||
83 | */ | ||
84 | |||
85 | static inline int addr_scope(u32 domain) | ||
86 | { | ||
87 | if (likely(!domain)) | ||
88 | return TIPC_ZONE_SCOPE; | ||
89 | if (tipc_node(domain)) | ||
90 | return TIPC_NODE_SCOPE; | ||
91 | if (tipc_cluster(domain)) | ||
92 | return TIPC_CLUSTER_SCOPE; | ||
93 | return TIPC_ZONE_SCOPE; | ||
94 | } | ||
95 | |||
96 | /** | 70 | /** |
97 | * addr_domain - convert 2-bit scope value to equivalent message lookup domain | 71 | * addr_domain - convert 2-bit scope value to equivalent message lookup domain |
98 | * | 72 | * |
@@ -110,14 +84,9 @@ static inline int addr_domain(int sc) | |||
110 | return tipc_addr(tipc_zone(tipc_own_addr), 0, 0); | 84 | return tipc_addr(tipc_zone(tipc_own_addr), 0, 0); |
111 | } | 85 | } |
112 | 86 | ||
113 | static inline char *addr_string_fill(char *string, u32 addr) | ||
114 | { | ||
115 | snprintf(string, 16, "<%u.%u.%u>", | ||
116 | tipc_zone(addr), tipc_cluster(addr), tipc_node(addr)); | ||
117 | return string; | ||
118 | } | ||
119 | |||
120 | int tipc_addr_domain_valid(u32); | 87 | int tipc_addr_domain_valid(u32); |
121 | int tipc_addr_node_valid(u32 addr); | 88 | int tipc_addr_node_valid(u32 addr); |
122 | 89 | int tipc_in_scope(u32 domain, u32 addr); | |
90 | int tipc_addr_scope(u32 domain); | ||
91 | char *tipc_addr_string_fill(char *string, u32 addr); | ||
123 | #endif | 92 | #endif |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 90a051912c03..a008c6689305 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -119,7 +119,7 @@ static struct bclink *bclink = NULL; | |||
119 | static struct link *bcl = NULL; | 119 | static struct link *bcl = NULL; |
120 | static DEFINE_SPINLOCK(bc_lock); | 120 | static DEFINE_SPINLOCK(bc_lock); |
121 | 121 | ||
122 | const char tipc_bclink_name[] = "multicast-link"; | 122 | const char tipc_bclink_name[] = "broadcast-link"; |
123 | 123 | ||
124 | 124 | ||
125 | static u32 buf_seqno(struct sk_buff *buf) | 125 | static u32 buf_seqno(struct sk_buff *buf) |
@@ -275,7 +275,7 @@ static void bclink_send_nack(struct tipc_node *n_ptr) | |||
275 | buf = buf_acquire(INT_H_SIZE); | 275 | buf = buf_acquire(INT_H_SIZE); |
276 | if (buf) { | 276 | if (buf) { |
277 | msg = buf_msg(buf); | 277 | msg = buf_msg(buf); |
278 | msg_init(msg, BCAST_PROTOCOL, STATE_MSG, | 278 | tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, |
279 | INT_H_SIZE, n_ptr->addr); | 279 | INT_H_SIZE, n_ptr->addr); |
280 | msg_set_mc_netid(msg, tipc_net_id); | 280 | msg_set_mc_netid(msg, tipc_net_id); |
281 | msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); | 281 | msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); |
@@ -822,3 +822,113 @@ void tipc_bclink_stop(void) | |||
822 | spin_unlock_bh(&bc_lock); | 822 | spin_unlock_bh(&bc_lock); |
823 | } | 823 | } |
824 | 824 | ||
825 | |||
826 | /** | ||
827 | * tipc_nmap_add - add a node to a node map | ||
828 | */ | ||
829 | |||
830 | void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) | ||
831 | { | ||
832 | int n = tipc_node(node); | ||
833 | int w = n / WSIZE; | ||
834 | u32 mask = (1 << (n % WSIZE)); | ||
835 | |||
836 | if ((nm_ptr->map[w] & mask) == 0) { | ||
837 | nm_ptr->count++; | ||
838 | nm_ptr->map[w] |= mask; | ||
839 | } | ||
840 | } | ||
841 | |||
842 | /** | ||
843 | * tipc_nmap_remove - remove a node from a node map | ||
844 | */ | ||
845 | |||
846 | void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) | ||
847 | { | ||
848 | int n = tipc_node(node); | ||
849 | int w = n / WSIZE; | ||
850 | u32 mask = (1 << (n % WSIZE)); | ||
851 | |||
852 | if ((nm_ptr->map[w] & mask) != 0) { | ||
853 | nm_ptr->map[w] &= ~mask; | ||
854 | nm_ptr->count--; | ||
855 | } | ||
856 | } | ||
857 | |||
858 | /** | ||
859 | * tipc_nmap_diff - find differences between node maps | ||
860 | * @nm_a: input node map A | ||
861 | * @nm_b: input node map B | ||
862 | * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) | ||
863 | */ | ||
864 | |||
865 | void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, | ||
866 | struct tipc_node_map *nm_diff) | ||
867 | { | ||
868 | int stop = ARRAY_SIZE(nm_a->map); | ||
869 | int w; | ||
870 | int b; | ||
871 | u32 map; | ||
872 | |||
873 | memset(nm_diff, 0, sizeof(*nm_diff)); | ||
874 | for (w = 0; w < stop; w++) { | ||
875 | map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]); | ||
876 | nm_diff->map[w] = map; | ||
877 | if (map != 0) { | ||
878 | for (b = 0 ; b < WSIZE; b++) { | ||
879 | if (map & (1 << b)) | ||
880 | nm_diff->count++; | ||
881 | } | ||
882 | } | ||
883 | } | ||
884 | } | ||
885 | |||
886 | /** | ||
887 | * tipc_port_list_add - add a port to a port list, ensuring no duplicates | ||
888 | */ | ||
889 | |||
890 | void tipc_port_list_add(struct port_list *pl_ptr, u32 port) | ||
891 | { | ||
892 | struct port_list *item = pl_ptr; | ||
893 | int i; | ||
894 | int item_sz = PLSIZE; | ||
895 | int cnt = pl_ptr->count; | ||
896 | |||
897 | for (; ; cnt -= item_sz, item = item->next) { | ||
898 | if (cnt < PLSIZE) | ||
899 | item_sz = cnt; | ||
900 | for (i = 0; i < item_sz; i++) | ||
901 | if (item->ports[i] == port) | ||
902 | return; | ||
903 | if (i < PLSIZE) { | ||
904 | item->ports[i] = port; | ||
905 | pl_ptr->count++; | ||
906 | return; | ||
907 | } | ||
908 | if (!item->next) { | ||
909 | item->next = kmalloc(sizeof(*item), GFP_ATOMIC); | ||
910 | if (!item->next) { | ||
911 | warn("Incomplete multicast delivery, no memory\n"); | ||
912 | return; | ||
913 | } | ||
914 | item->next->next = NULL; | ||
915 | } | ||
916 | } | ||
917 | } | ||
918 | |||
919 | /** | ||
920 | * tipc_port_list_free - free dynamically created entries in port_list chain | ||
921 | * | ||
922 | */ | ||
923 | |||
924 | void tipc_port_list_free(struct port_list *pl_ptr) | ||
925 | { | ||
926 | struct port_list *item; | ||
927 | struct port_list *next; | ||
928 | |||
929 | for (item = pl_ptr->next; item; item = next) { | ||
930 | next = item->next; | ||
931 | kfree(item); | ||
932 | } | ||
933 | } | ||
934 | |||
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 4c1771e95c99..e8c2b81658c7 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h | |||
@@ -72,41 +72,11 @@ struct tipc_node; | |||
72 | 72 | ||
73 | extern const char tipc_bclink_name[]; | 73 | extern const char tipc_bclink_name[]; |
74 | 74 | ||
75 | void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node); | ||
76 | void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node); | ||
75 | 77 | ||
76 | /** | 78 | /** |
77 | * nmap_add - add a node to a node map | 79 | * tipc_nmap_equal - test for equality of node maps |
78 | */ | ||
79 | |||
80 | static inline void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) | ||
81 | { | ||
82 | int n = tipc_node(node); | ||
83 | int w = n / WSIZE; | ||
84 | u32 mask = (1 << (n % WSIZE)); | ||
85 | |||
86 | if ((nm_ptr->map[w] & mask) == 0) { | ||
87 | nm_ptr->count++; | ||
88 | nm_ptr->map[w] |= mask; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | /** | ||
93 | * nmap_remove - remove a node from a node map | ||
94 | */ | ||
95 | |||
96 | static inline void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) | ||
97 | { | ||
98 | int n = tipc_node(node); | ||
99 | int w = n / WSIZE; | ||
100 | u32 mask = (1 << (n % WSIZE)); | ||
101 | |||
102 | if ((nm_ptr->map[w] & mask) != 0) { | ||
103 | nm_ptr->map[w] &= ~mask; | ||
104 | nm_ptr->count--; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * nmap_equal - test for equality of node maps | ||
110 | */ | 80 | */ |
111 | 81 | ||
112 | static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b) | 82 | static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b) |
@@ -114,84 +84,11 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m | |||
114 | return !memcmp(nm_a, nm_b, sizeof(*nm_a)); | 84 | return !memcmp(nm_a, nm_b, sizeof(*nm_a)); |
115 | } | 85 | } |
116 | 86 | ||
117 | /** | 87 | void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, |
118 | * nmap_diff - find differences between node maps | 88 | struct tipc_node_map *nm_diff); |
119 | * @nm_a: input node map A | ||
120 | * @nm_b: input node map B | ||
121 | * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) | ||
122 | */ | ||
123 | |||
124 | static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, | ||
125 | struct tipc_node_map *nm_diff) | ||
126 | { | ||
127 | int stop = ARRAY_SIZE(nm_a->map); | ||
128 | int w; | ||
129 | int b; | ||
130 | u32 map; | ||
131 | |||
132 | memset(nm_diff, 0, sizeof(*nm_diff)); | ||
133 | for (w = 0; w < stop; w++) { | ||
134 | map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]); | ||
135 | nm_diff->map[w] = map; | ||
136 | if (map != 0) { | ||
137 | for (b = 0 ; b < WSIZE; b++) { | ||
138 | if (map & (1 << b)) | ||
139 | nm_diff->count++; | ||
140 | } | ||
141 | } | ||
142 | } | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * port_list_add - add a port to a port list, ensuring no duplicates | ||
147 | */ | ||
148 | |||
149 | static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port) | ||
150 | { | ||
151 | struct port_list *item = pl_ptr; | ||
152 | int i; | ||
153 | int item_sz = PLSIZE; | ||
154 | int cnt = pl_ptr->count; | ||
155 | |||
156 | for (; ; cnt -= item_sz, item = item->next) { | ||
157 | if (cnt < PLSIZE) | ||
158 | item_sz = cnt; | ||
159 | for (i = 0; i < item_sz; i++) | ||
160 | if (item->ports[i] == port) | ||
161 | return; | ||
162 | if (i < PLSIZE) { | ||
163 | item->ports[i] = port; | ||
164 | pl_ptr->count++; | ||
165 | return; | ||
166 | } | ||
167 | if (!item->next) { | ||
168 | item->next = kmalloc(sizeof(*item), GFP_ATOMIC); | ||
169 | if (!item->next) { | ||
170 | warn("Incomplete multicast delivery, no memory\n"); | ||
171 | return; | ||
172 | } | ||
173 | item->next->next = NULL; | ||
174 | } | ||
175 | } | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * port_list_free - free dynamically created entries in port_list chain | ||
180 | * | ||
181 | * Note: First item is on stack, so it doesn't need to be released | ||
182 | */ | ||
183 | |||
184 | static inline void tipc_port_list_free(struct port_list *pl_ptr) | ||
185 | { | ||
186 | struct port_list *item; | ||
187 | struct port_list *next; | ||
188 | |||
189 | for (item = pl_ptr->next; item; item = next) { | ||
190 | next = item->next; | ||
191 | kfree(item); | ||
192 | } | ||
193 | } | ||
194 | 89 | ||
90 | void tipc_port_list_add(struct port_list *pl_ptr, u32 port); | ||
91 | void tipc_port_list_free(struct port_list *pl_ptr); | ||
195 | 92 | ||
196 | int tipc_bclink_init(void); | 93 | int tipc_bclink_init(void); |
197 | void tipc_bclink_stop(void); | 94 | void tipc_bclink_stop(void); |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 78091375ca12..52ae17b2583e 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -467,6 +467,18 @@ int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr) | |||
467 | return res; | 467 | return res; |
468 | } | 468 | } |
469 | 469 | ||
470 | /** | ||
471 | * tipc_bearer_congested - determines if bearer is currently congested | ||
472 | */ | ||
473 | |||
474 | int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr) | ||
475 | { | ||
476 | if (unlikely(b_ptr->publ.blocked)) | ||
477 | return 1; | ||
478 | if (likely(list_empty(&b_ptr->cong_links))) | ||
479 | return 0; | ||
480 | return !tipc_bearer_resolve_congestion(b_ptr, l_ptr); | ||
481 | } | ||
470 | 482 | ||
471 | /** | 483 | /** |
472 | * tipc_enable_bearer - enable bearer with the given name | 484 | * tipc_enable_bearer - enable bearer with the given name |
@@ -493,7 +505,7 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority) | |||
493 | return -EINVAL; | 505 | return -EINVAL; |
494 | } | 506 | } |
495 | if (!tipc_addr_domain_valid(bcast_scope) || | 507 | if (!tipc_addr_domain_valid(bcast_scope) || |
496 | !in_scope(bcast_scope, tipc_own_addr)) { | 508 | !tipc_in_scope(bcast_scope, tipc_own_addr)) { |
497 | warn("Bearer <%s> rejected, illegal broadcast scope\n", name); | 509 | warn("Bearer <%s> rejected, illegal broadcast scope\n", name); |
498 | return -EINVAL; | 510 | return -EINVAL; |
499 | } | 511 | } |
@@ -571,7 +583,7 @@ restart: | |||
571 | spin_lock_init(&b_ptr->publ.lock); | 583 | spin_lock_init(&b_ptr->publ.lock); |
572 | write_unlock_bh(&tipc_net_lock); | 584 | write_unlock_bh(&tipc_net_lock); |
573 | info("Enabled bearer <%s>, discovery domain %s, priority %u\n", | 585 | info("Enabled bearer <%s>, discovery domain %s, priority %u\n", |
574 | name, addr_string_fill(addr_string, bcast_scope), priority); | 586 | name, tipc_addr_string_fill(addr_string, bcast_scope), priority); |
575 | return 0; | 587 | return 0; |
576 | failed: | 588 | failed: |
577 | write_unlock_bh(&tipc_net_lock); | 589 | write_unlock_bh(&tipc_net_lock); |
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 000228e93f9e..a850b389663e 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h | |||
@@ -125,6 +125,7 @@ void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest); | |||
125 | void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr); | 125 | void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr); |
126 | struct bearer *tipc_bearer_find_interface(const char *if_name); | 126 | struct bearer *tipc_bearer_find_interface(const char *if_name); |
127 | int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr); | 127 | int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr); |
128 | int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr); | ||
128 | int tipc_bearer_init(void); | 129 | int tipc_bearer_init(void); |
129 | void tipc_bearer_stop(void); | 130 | void tipc_bearer_stop(void); |
130 | void tipc_bearer_lock_push(struct bearer *b_ptr); | 131 | void tipc_bearer_lock_push(struct bearer *b_ptr); |
@@ -154,17 +155,4 @@ static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf, | |||
154 | return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest); | 155 | return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest); |
155 | } | 156 | } |
156 | 157 | ||
157 | /** | 158 | #endif /* _TIPC_BEARER_H */ |
158 | * tipc_bearer_congested - determines if bearer is currently congested | ||
159 | */ | ||
160 | |||
161 | static inline int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr) | ||
162 | { | ||
163 | if (unlikely(b_ptr->publ.blocked)) | ||
164 | return 1; | ||
165 | if (likely(list_empty(&b_ptr->cong_links))) | ||
166 | return 0; | ||
167 | return !tipc_bearer_resolve_congestion(b_ptr, l_ptr); | ||
168 | } | ||
169 | |||
170 | #endif | ||
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c index a7eac00cd363..e68f705381bc 100644 --- a/net/tipc/cluster.c +++ b/net/tipc/cluster.c | |||
@@ -238,7 +238,7 @@ static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest) | |||
238 | if (buf) { | 238 | if (buf) { |
239 | msg = buf_msg(buf); | 239 | msg = buf_msg(buf); |
240 | memset((char *)msg, 0, size); | 240 | memset((char *)msg, 0, size); |
241 | msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest); | 241 | tipc_msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest); |
242 | } | 242 | } |
243 | return buf; | 243 | return buf; |
244 | } | 244 | } |
diff --git a/net/tipc/config.c b/net/tipc/config.c index ca3544d030c7..961d1b097146 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c | |||
@@ -56,9 +56,6 @@ struct subscr_data { | |||
56 | struct manager { | 56 | struct manager { |
57 | u32 user_ref; | 57 | u32 user_ref; |
58 | u32 port_ref; | 58 | u32 port_ref; |
59 | u32 subscr_ref; | ||
60 | u32 link_subscriptions; | ||
61 | struct list_head link_subscribers; | ||
62 | }; | 59 | }; |
63 | 60 | ||
64 | static struct manager mng = { 0}; | 61 | static struct manager mng = { 0}; |
@@ -70,12 +67,6 @@ static int req_tlv_space; /* request message TLV area size */ | |||
70 | static int rep_headroom; /* reply message headroom to use */ | 67 | static int rep_headroom; /* reply message headroom to use */ |
71 | 68 | ||
72 | 69 | ||
73 | void tipc_cfg_link_event(u32 addr, char *name, int up) | ||
74 | { | ||
75 | /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */ | ||
76 | } | ||
77 | |||
78 | |||
79 | struct sk_buff *tipc_cfg_reply_alloc(int payload_size) | 70 | struct sk_buff *tipc_cfg_reply_alloc(int payload_size) |
80 | { | 71 | { |
81 | struct sk_buff *buf; | 72 | struct sk_buff *buf; |
@@ -130,12 +121,24 @@ struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string) | |||
130 | } | 121 | } |
131 | 122 | ||
132 | 123 | ||
133 | |||
134 | |||
135 | #if 0 | 124 | #if 0 |
136 | 125 | ||
137 | /* Now obsolete code for handling commands not yet implemented the new way */ | 126 | /* Now obsolete code for handling commands not yet implemented the new way */ |
138 | 127 | ||
128 | /* | ||
129 | * Some of this code assumed that the manager structure contains two added | ||
130 | * fields: | ||
131 | * u32 link_subscriptions; | ||
132 | * struct list_head link_subscribers; | ||
133 | * which are currently not present. These fields may need to be re-introduced | ||
134 | * if and when support for link subscriptions is added. | ||
135 | */ | ||
136 | |||
137 | void tipc_cfg_link_event(u32 addr, char *name, int up) | ||
138 | { | ||
139 | /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */ | ||
140 | } | ||
141 | |||
139 | int tipc_cfg_cmd(const struct tipc_cmd_msg * msg, | 142 | int tipc_cfg_cmd(const struct tipc_cmd_msg * msg, |
140 | char *data, | 143 | char *data, |
141 | u32 sz, | 144 | u32 sz, |
@@ -243,13 +246,48 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg, | |||
243 | default: | 246 | default: |
244 | rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig); | 247 | rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig); |
245 | } | 248 | } |
246 | exit: | 249 | exit: |
247 | rmsg.result_len = htonl(msg_sect[1].iov_len); | 250 | rmsg.result_len = htonl(msg_sect[1].iov_len); |
248 | rmsg.retval = htonl(rv); | 251 | rmsg.retval = htonl(rv); |
249 | tipc_cfg_respond(msg_sect, 2u, orig); | 252 | tipc_cfg_respond(msg_sect, 2u, orig); |
250 | } | 253 | } |
251 | #endif | 254 | #endif |
252 | 255 | ||
256 | #define MAX_STATS_INFO 2000 | ||
257 | |||
258 | static struct sk_buff *tipc_show_stats(void) | ||
259 | { | ||
260 | struct sk_buff *buf; | ||
261 | struct tlv_desc *rep_tlv; | ||
262 | struct print_buf pb; | ||
263 | int str_len; | ||
264 | u32 value; | ||
265 | |||
266 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | ||
267 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
268 | |||
269 | value = ntohl(*(u32 *)TLV_DATA(req_tlv_area)); | ||
270 | if (value != 0) | ||
271 | return tipc_cfg_reply_error_string("unsupported argument"); | ||
272 | |||
273 | buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_STATS_INFO)); | ||
274 | if (buf == NULL) | ||
275 | return NULL; | ||
276 | |||
277 | rep_tlv = (struct tlv_desc *)buf->data; | ||
278 | tipc_printbuf_init(&pb, (char *)TLV_DATA(rep_tlv), MAX_STATS_INFO); | ||
279 | |||
280 | tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n"); | ||
281 | |||
282 | /* Use additional tipc_printf()'s to return more info ... */ | ||
283 | |||
284 | str_len = tipc_printbuf_validate(&pb); | ||
285 | skb_put(buf, TLV_SPACE(str_len)); | ||
286 | TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); | ||
287 | |||
288 | return buf; | ||
289 | } | ||
290 | |||
253 | static struct sk_buff *cfg_enable_bearer(void) | 291 | static struct sk_buff *cfg_enable_bearer(void) |
254 | { | 292 | { |
255 | struct tipc_bearer_config *args; | 293 | struct tipc_bearer_config *args; |
@@ -533,6 +571,9 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area | |||
533 | case TIPC_CMD_DUMP_LOG: | 571 | case TIPC_CMD_DUMP_LOG: |
534 | rep_tlv_buf = tipc_log_dump(); | 572 | rep_tlv_buf = tipc_log_dump(); |
535 | break; | 573 | break; |
574 | case TIPC_CMD_SHOW_STATS: | ||
575 | rep_tlv_buf = tipc_show_stats(); | ||
576 | break; | ||
536 | case TIPC_CMD_SET_LINK_TOL: | 577 | case TIPC_CMD_SET_LINK_TOL: |
537 | case TIPC_CMD_SET_LINK_PRI: | 578 | case TIPC_CMD_SET_LINK_PRI: |
538 | case TIPC_CMD_SET_LINK_WINDOW: | 579 | case TIPC_CMD_SET_LINK_WINDOW: |
@@ -667,9 +708,6 @@ int tipc_cfg_init(void) | |||
667 | struct tipc_name_seq seq; | 708 | struct tipc_name_seq seq; |
668 | int res; | 709 | int res; |
669 | 710 | ||
670 | memset(&mng, 0, sizeof(mng)); | ||
671 | INIT_LIST_HEAD(&mng.link_subscribers); | ||
672 | |||
673 | res = tipc_attach(&mng.user_ref, NULL, NULL); | 711 | res = tipc_attach(&mng.user_ref, NULL, NULL); |
674 | if (res) | 712 | if (res) |
675 | goto failed; | 713 | goto failed; |
diff --git a/net/tipc/core.c b/net/tipc/core.c index 4e84c8431f32..696468117985 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -49,8 +49,6 @@ | |||
49 | #include "config.h" | 49 | #include "config.h" |
50 | 50 | ||
51 | 51 | ||
52 | #define TIPC_MOD_VER "2.0.0" | ||
53 | |||
54 | #ifndef CONFIG_TIPC_ZONES | 52 | #ifndef CONFIG_TIPC_ZONES |
55 | #define CONFIG_TIPC_ZONES 3 | 53 | #define CONFIG_TIPC_ZONES 3 |
56 | #endif | 54 | #endif |
@@ -104,6 +102,30 @@ int tipc_get_mode(void) | |||
104 | } | 102 | } |
105 | 103 | ||
106 | /** | 104 | /** |
105 | * buf_acquire - creates a TIPC message buffer | ||
106 | * @size: message size (including TIPC header) | ||
107 | * | ||
108 | * Returns a new buffer with data pointers set to the specified size. | ||
109 | * | ||
110 | * NOTE: Headroom is reserved to allow prepending of a data link header. | ||
111 | * There may also be unrequested tailroom present at the buffer's end. | ||
112 | */ | ||
113 | |||
114 | struct sk_buff *buf_acquire(u32 size) | ||
115 | { | ||
116 | struct sk_buff *skb; | ||
117 | unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; | ||
118 | |||
119 | skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); | ||
120 | if (skb) { | ||
121 | skb_reserve(skb, BUF_HEADROOM); | ||
122 | skb_put(skb, size); | ||
123 | skb->next = NULL; | ||
124 | } | ||
125 | return skb; | ||
126 | } | ||
127 | |||
128 | /** | ||
107 | * tipc_core_stop_net - shut down TIPC networking sub-systems | 129 | * tipc_core_stop_net - shut down TIPC networking sub-systems |
108 | */ | 130 | */ |
109 | 131 | ||
diff --git a/net/tipc/core.h b/net/tipc/core.h index c58a1d16563a..188799017abd 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
@@ -59,6 +59,9 @@ | |||
59 | #include <linux/slab.h> | 59 | #include <linux/slab.h> |
60 | #include <linux/vmalloc.h> | 60 | #include <linux/vmalloc.h> |
61 | 61 | ||
62 | |||
63 | #define TIPC_MOD_VER "2.0.0" | ||
64 | |||
62 | /* | 65 | /* |
63 | * TIPC sanity test macros | 66 | * TIPC sanity test macros |
64 | */ | 67 | */ |
@@ -325,29 +328,7 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb) | |||
325 | return (struct tipc_msg *)skb->data; | 328 | return (struct tipc_msg *)skb->data; |
326 | } | 329 | } |
327 | 330 | ||
328 | /** | 331 | extern struct sk_buff *buf_acquire(u32 size); |
329 | * buf_acquire - creates a TIPC message buffer | ||
330 | * @size: message size (including TIPC header) | ||
331 | * | ||
332 | * Returns a new buffer with data pointers set to the specified size. | ||
333 | * | ||
334 | * NOTE: Headroom is reserved to allow prepending of a data link header. | ||
335 | * There may also be unrequested tailroom present at the buffer's end. | ||
336 | */ | ||
337 | |||
338 | static inline struct sk_buff *buf_acquire(u32 size) | ||
339 | { | ||
340 | struct sk_buff *skb; | ||
341 | unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; | ||
342 | |||
343 | skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); | ||
344 | if (skb) { | ||
345 | skb_reserve(skb, BUF_HEADROOM); | ||
346 | skb_put(skb, size); | ||
347 | skb->next = NULL; | ||
348 | } | ||
349 | return skb; | ||
350 | } | ||
351 | 332 | ||
352 | /** | 333 | /** |
353 | * buf_discard - frees a TIPC message buffer | 334 | * buf_discard - frees a TIPC message buffer |
diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 74b7d1e28aec..fc1fcf5e6b53 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c | |||
@@ -120,7 +120,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type, | |||
120 | 120 | ||
121 | if (buf) { | 121 | if (buf) { |
122 | msg = buf_msg(buf); | 122 | msg = buf_msg(buf); |
123 | msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain); | 123 | tipc_msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain); |
124 | msg_set_non_seq(msg, 1); | 124 | msg_set_non_seq(msg, 1); |
125 | msg_set_req_links(msg, req_links); | 125 | msg_set_req_links(msg, req_links); |
126 | msg_set_dest_domain(msg, dest_domain); | 126 | msg_set_dest_domain(msg, dest_domain); |
@@ -144,7 +144,7 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr, | |||
144 | char media_addr_str[64]; | 144 | char media_addr_str[64]; |
145 | struct print_buf pb; | 145 | struct print_buf pb; |
146 | 146 | ||
147 | addr_string_fill(node_addr_str, node_addr); | 147 | tipc_addr_string_fill(node_addr_str, node_addr); |
148 | tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str)); | 148 | tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str)); |
149 | tipc_media_addr_printf(&pb, media_addr); | 149 | tipc_media_addr_printf(&pb, media_addr); |
150 | tipc_printbuf_validate(&pb); | 150 | tipc_printbuf_validate(&pb); |
@@ -183,7 +183,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr) | |||
183 | disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr); | 183 | disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr); |
184 | return; | 184 | return; |
185 | } | 185 | } |
186 | if (!in_scope(dest, tipc_own_addr)) | 186 | if (!tipc_in_scope(dest, tipc_own_addr)) |
187 | return; | 187 | return; |
188 | if (is_slave(tipc_own_addr) && is_slave(orig)) | 188 | if (is_slave(tipc_own_addr) && is_slave(orig)) |
189 | return; | 189 | return; |
@@ -224,7 +224,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr) | |||
224 | memcpy(addr, &media_addr, sizeof(*addr)); | 224 | memcpy(addr, &media_addr, sizeof(*addr)); |
225 | tipc_link_reset(link); | 225 | tipc_link_reset(link); |
226 | } | 226 | } |
227 | link_fully_up = (link->state == WORKING_WORKING); | 227 | link_fully_up = link_working_working(link); |
228 | spin_unlock_bh(&n_ptr->lock); | 228 | spin_unlock_bh(&n_ptr->lock); |
229 | if ((type == DSC_RESP_MSG) || link_fully_up) | 229 | if ((type == DSC_RESP_MSG) || link_fully_up) |
230 | return; | 230 | return; |
diff --git a/net/tipc/link.c b/net/tipc/link.c index c76e82e5f982..a3616b99529b 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -202,41 +202,6 @@ static unsigned int align(unsigned int i) | |||
202 | return (i + 3) & ~3u; | 202 | return (i + 3) & ~3u; |
203 | } | 203 | } |
204 | 204 | ||
205 | static int link_working_working(struct link *l_ptr) | ||
206 | { | ||
207 | return (l_ptr->state == WORKING_WORKING); | ||
208 | } | ||
209 | |||
210 | static int link_working_unknown(struct link *l_ptr) | ||
211 | { | ||
212 | return (l_ptr->state == WORKING_UNKNOWN); | ||
213 | } | ||
214 | |||
215 | static int link_reset_unknown(struct link *l_ptr) | ||
216 | { | ||
217 | return (l_ptr->state == RESET_UNKNOWN); | ||
218 | } | ||
219 | |||
220 | static int link_reset_reset(struct link *l_ptr) | ||
221 | { | ||
222 | return (l_ptr->state == RESET_RESET); | ||
223 | } | ||
224 | |||
225 | static int link_blocked(struct link *l_ptr) | ||
226 | { | ||
227 | return (l_ptr->exp_msg_count || l_ptr->blocked); | ||
228 | } | ||
229 | |||
230 | static int link_congested(struct link *l_ptr) | ||
231 | { | ||
232 | return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]); | ||
233 | } | ||
234 | |||
235 | static u32 link_max_pkt(struct link *l_ptr) | ||
236 | { | ||
237 | return l_ptr->max_pkt; | ||
238 | } | ||
239 | |||
240 | static void link_init_max_pkt(struct link *l_ptr) | 205 | static void link_init_max_pkt(struct link *l_ptr) |
241 | { | 206 | { |
242 | u32 max_pkt; | 207 | u32 max_pkt; |
@@ -468,7 +433,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer, | |||
468 | 433 | ||
469 | l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; | 434 | l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; |
470 | msg = l_ptr->pmsg; | 435 | msg = l_ptr->pmsg; |
471 | msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); | 436 | tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); |
472 | msg_set_size(msg, sizeof(l_ptr->proto_msg)); | 437 | msg_set_size(msg, sizeof(l_ptr->proto_msg)); |
473 | msg_set_session(msg, (tipc_random & 0xffff)); | 438 | msg_set_session(msg, (tipc_random & 0xffff)); |
474 | msg_set_bearer_id(msg, b_ptr->identity); | 439 | msg_set_bearer_id(msg, b_ptr->identity); |
@@ -561,9 +526,8 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz) | |||
561 | goto exit; | 526 | goto exit; |
562 | if (!list_empty(&p_ptr->wait_list)) | 527 | if (!list_empty(&p_ptr->wait_list)) |
563 | goto exit; | 528 | goto exit; |
564 | p_ptr->congested_link = l_ptr; | ||
565 | p_ptr->publ.congested = 1; | 529 | p_ptr->publ.congested = 1; |
566 | p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr)); | 530 | p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); |
567 | list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); | 531 | list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); |
568 | l_ptr->stats.link_congs++; | 532 | l_ptr->stats.link_congs++; |
569 | exit: | 533 | exit: |
@@ -592,7 +556,6 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all) | |||
592 | if (win <= 0) | 556 | if (win <= 0) |
593 | break; | 557 | break; |
594 | list_del_init(&p_ptr->wait_list); | 558 | list_del_init(&p_ptr->wait_list); |
595 | p_ptr->congested_link = NULL; | ||
596 | spin_lock_bh(p_ptr->publ.lock); | 559 | spin_lock_bh(p_ptr->publ.lock); |
597 | p_ptr->publ.congested = 0; | 560 | p_ptr->publ.congested = 0; |
598 | p_ptr->wakeup(&p_ptr->publ); | 561 | p_ptr->wakeup(&p_ptr->publ); |
@@ -1017,7 +980,7 @@ static int link_bundle_buf(struct link *l_ptr, | |||
1017 | return 0; | 980 | return 0; |
1018 | if (skb_tailroom(bundler) < (pad + size)) | 981 | if (skb_tailroom(bundler) < (pad + size)) |
1019 | return 0; | 982 | return 0; |
1020 | if (link_max_pkt(l_ptr) < (to_pos + size)) | 983 | if (l_ptr->max_pkt < (to_pos + size)) |
1021 | return 0; | 984 | return 0; |
1022 | 985 | ||
1023 | skb_put(bundler, pad + size); | 986 | skb_put(bundler, pad + size); |
@@ -1062,9 +1025,9 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf) | |||
1062 | u32 size = msg_size(msg); | 1025 | u32 size = msg_size(msg); |
1063 | u32 dsz = msg_data_sz(msg); | 1026 | u32 dsz = msg_data_sz(msg); |
1064 | u32 queue_size = l_ptr->out_queue_size; | 1027 | u32 queue_size = l_ptr->out_queue_size; |
1065 | u32 imp = msg_tot_importance(msg); | 1028 | u32 imp = tipc_msg_tot_importance(msg); |
1066 | u32 queue_limit = l_ptr->queue_limit[imp]; | 1029 | u32 queue_limit = l_ptr->queue_limit[imp]; |
1067 | u32 max_packet = link_max_pkt(l_ptr); | 1030 | u32 max_packet = l_ptr->max_pkt; |
1068 | 1031 | ||
1069 | msg_set_prevnode(msg, tipc_own_addr); /* If routed message */ | 1032 | msg_set_prevnode(msg, tipc_own_addr); /* If routed message */ |
1070 | 1033 | ||
@@ -1127,7 +1090,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf) | |||
1127 | struct tipc_msg bundler_hdr; | 1090 | struct tipc_msg bundler_hdr; |
1128 | 1091 | ||
1129 | if (bundler) { | 1092 | if (bundler) { |
1130 | msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, | 1093 | tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, |
1131 | INT_H_SIZE, l_ptr->addr); | 1094 | INT_H_SIZE, l_ptr->addr); |
1132 | skb_copy_to_linear_data(bundler, &bundler_hdr, | 1095 | skb_copy_to_linear_data(bundler, &bundler_hdr, |
1133 | INT_H_SIZE); | 1096 | INT_H_SIZE); |
@@ -1195,7 +1158,7 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf, | |||
1195 | int res = msg_data_sz(msg); | 1158 | int res = msg_data_sz(msg); |
1196 | 1159 | ||
1197 | if (likely(!link_congested(l_ptr))) { | 1160 | if (likely(!link_congested(l_ptr))) { |
1198 | if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) { | 1161 | if (likely(msg_size(msg) <= l_ptr->max_pkt)) { |
1199 | if (likely(list_empty(&l_ptr->b_ptr->cong_links))) { | 1162 | if (likely(list_empty(&l_ptr->b_ptr->cong_links))) { |
1200 | link_add_to_outqueue(l_ptr, buf, msg); | 1163 | link_add_to_outqueue(l_ptr, buf, msg); |
1201 | if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, | 1164 | if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, |
@@ -1212,7 +1175,7 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf, | |||
1212 | } | 1175 | } |
1213 | } | 1176 | } |
1214 | else | 1177 | else |
1215 | *used_max_pkt = link_max_pkt(l_ptr); | 1178 | *used_max_pkt = l_ptr->max_pkt; |
1216 | } | 1179 | } |
1217 | return tipc_link_send_buf(l_ptr, buf); /* All other cases */ | 1180 | return tipc_link_send_buf(l_ptr, buf); /* All other cases */ |
1218 | } | 1181 | } |
@@ -1280,7 +1243,7 @@ again: | |||
1280 | * (Must not hold any locks while building message.) | 1243 | * (Must not hold any locks while building message.) |
1281 | */ | 1244 | */ |
1282 | 1245 | ||
1283 | res = msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt, | 1246 | res = tipc_msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt, |
1284 | !sender->user_port, &buf); | 1247 | !sender->user_port, &buf); |
1285 | 1248 | ||
1286 | read_lock_bh(&tipc_net_lock); | 1249 | read_lock_bh(&tipc_net_lock); |
@@ -1319,7 +1282,7 @@ exit: | |||
1319 | * then re-try fast path or fragment the message | 1282 | * then re-try fast path or fragment the message |
1320 | */ | 1283 | */ |
1321 | 1284 | ||
1322 | sender->publ.max_pkt = link_max_pkt(l_ptr); | 1285 | sender->publ.max_pkt = l_ptr->max_pkt; |
1323 | tipc_node_unlock(node); | 1286 | tipc_node_unlock(node); |
1324 | read_unlock_bh(&tipc_net_lock); | 1287 | read_unlock_bh(&tipc_net_lock); |
1325 | 1288 | ||
@@ -1391,7 +1354,7 @@ again: | |||
1391 | /* Prepare reusable fragment header: */ | 1354 | /* Prepare reusable fragment header: */ |
1392 | 1355 | ||
1393 | msg_dbg(hdr, ">FRAGMENTING>"); | 1356 | msg_dbg(hdr, ">FRAGMENTING>"); |
1394 | msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, | 1357 | tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, |
1395 | INT_H_SIZE, msg_destnode(hdr)); | 1358 | INT_H_SIZE, msg_destnode(hdr)); |
1396 | msg_set_link_selector(&fragm_hdr, sender->publ.ref); | 1359 | msg_set_link_selector(&fragm_hdr, sender->publ.ref); |
1397 | msg_set_size(&fragm_hdr, max_pkt); | 1360 | msg_set_size(&fragm_hdr, max_pkt); |
@@ -1482,8 +1445,8 @@ error: | |||
1482 | tipc_node_unlock(node); | 1445 | tipc_node_unlock(node); |
1483 | goto reject; | 1446 | goto reject; |
1484 | } | 1447 | } |
1485 | if (link_max_pkt(l_ptr) < max_pkt) { | 1448 | if (l_ptr->max_pkt < max_pkt) { |
1486 | sender->publ.max_pkt = link_max_pkt(l_ptr); | 1449 | sender->publ.max_pkt = l_ptr->max_pkt; |
1487 | tipc_node_unlock(node); | 1450 | tipc_node_unlock(node); |
1488 | for (; buf_chain; buf_chain = buf) { | 1451 | for (; buf_chain; buf_chain = buf) { |
1489 | buf = buf_chain->next; | 1452 | buf = buf_chain->next; |
@@ -1650,7 +1613,7 @@ static void link_reset_all(unsigned long addr) | |||
1650 | tipc_node_lock(n_ptr); | 1613 | tipc_node_lock(n_ptr); |
1651 | 1614 | ||
1652 | warn("Resetting all links to %s\n", | 1615 | warn("Resetting all links to %s\n", |
1653 | addr_string_fill(addr_string, n_ptr->addr)); | 1616 | tipc_addr_string_fill(addr_string, n_ptr->addr)); |
1654 | 1617 | ||
1655 | for (i = 0; i < MAX_BEARERS; i++) { | 1618 | for (i = 0; i < MAX_BEARERS; i++) { |
1656 | if (n_ptr->links[i]) { | 1619 | if (n_ptr->links[i]) { |
@@ -1692,7 +1655,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf) | |||
1692 | n_ptr = l_ptr->owner->next; | 1655 | n_ptr = l_ptr->owner->next; |
1693 | tipc_node_lock(n_ptr); | 1656 | tipc_node_lock(n_ptr); |
1694 | 1657 | ||
1695 | addr_string_fill(addr_string, n_ptr->addr); | 1658 | tipc_addr_string_fill(addr_string, n_ptr->addr); |
1696 | tipc_printf(TIPC_OUTPUT, "Multicast link info for %s\n", addr_string); | 1659 | tipc_printf(TIPC_OUTPUT, "Multicast link info for %s\n", addr_string); |
1697 | tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported); | 1660 | tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported); |
1698 | tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked); | 1661 | tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked); |
@@ -2435,7 +2398,7 @@ void tipc_link_changeover(struct link *l_ptr) | |||
2435 | return; | 2398 | return; |
2436 | } | 2399 | } |
2437 | 2400 | ||
2438 | msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, | 2401 | tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, |
2439 | ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); | 2402 | ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); |
2440 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 2403 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); |
2441 | msg_set_msgcnt(&tunnel_hdr, msgcount); | 2404 | msg_set_msgcnt(&tunnel_hdr, msgcount); |
@@ -2490,7 +2453,7 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel) | |||
2490 | struct sk_buff *iter; | 2453 | struct sk_buff *iter; |
2491 | struct tipc_msg tunnel_hdr; | 2454 | struct tipc_msg tunnel_hdr; |
2492 | 2455 | ||
2493 | msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, | 2456 | tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, |
2494 | DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); | 2457 | DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); |
2495 | msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); | 2458 | msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); |
2496 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 2459 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); |
@@ -2681,7 +2644,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) | |||
2681 | u32 dsz = msg_data_sz(inmsg); | 2644 | u32 dsz = msg_data_sz(inmsg); |
2682 | unchar *crs = buf->data; | 2645 | unchar *crs = buf->data; |
2683 | u32 rest = insize; | 2646 | u32 rest = insize; |
2684 | u32 pack_sz = link_max_pkt(l_ptr); | 2647 | u32 pack_sz = l_ptr->max_pkt; |
2685 | u32 fragm_sz = pack_sz - INT_H_SIZE; | 2648 | u32 fragm_sz = pack_sz - INT_H_SIZE; |
2686 | u32 fragm_no = 1; | 2649 | u32 fragm_no = 1; |
2687 | u32 destaddr; | 2650 | u32 destaddr; |
@@ -2696,7 +2659,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) | |||
2696 | 2659 | ||
2697 | /* Prepare reusable fragment header: */ | 2660 | /* Prepare reusable fragment header: */ |
2698 | 2661 | ||
2699 | msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, | 2662 | tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, |
2700 | INT_H_SIZE, destaddr); | 2663 | INT_H_SIZE, destaddr); |
2701 | msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg)); | 2664 | msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg)); |
2702 | msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++)); | 2665 | msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++)); |
@@ -3127,7 +3090,7 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) | |||
3127 | tipc_printf(&pb, "Link <%s>\n" | 3090 | tipc_printf(&pb, "Link <%s>\n" |
3128 | " %s MTU:%u Priority:%u Tolerance:%u ms" | 3091 | " %s MTU:%u Priority:%u Tolerance:%u ms" |
3129 | " Window:%u packets\n", | 3092 | " Window:%u packets\n", |
3130 | l_ptr->name, status, link_max_pkt(l_ptr), | 3093 | l_ptr->name, status, l_ptr->max_pkt, |
3131 | l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]); | 3094 | l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]); |
3132 | tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", | 3095 | tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", |
3133 | l_ptr->next_in_no - l_ptr->stats.recv_info, | 3096 | l_ptr->next_in_no - l_ptr->stats.recv_info, |
@@ -3272,7 +3235,7 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector) | |||
3272 | tipc_node_lock(n_ptr); | 3235 | tipc_node_lock(n_ptr); |
3273 | l_ptr = n_ptr->active_links[selector & 1]; | 3236 | l_ptr = n_ptr->active_links[selector & 1]; |
3274 | if (l_ptr) | 3237 | if (l_ptr) |
3275 | res = link_max_pkt(l_ptr); | 3238 | res = l_ptr->max_pkt; |
3276 | tipc_node_unlock(n_ptr); | 3239 | tipc_node_unlock(n_ptr); |
3277 | } | 3240 | } |
3278 | read_unlock_bh(&tipc_net_lock); | 3241 | read_unlock_bh(&tipc_net_lock); |
@@ -3330,9 +3293,7 @@ static void link_print(struct link *l_ptr, struct print_buf *buf, | |||
3330 | if (l_ptr->next_out) | 3293 | if (l_ptr->next_out) |
3331 | tipc_printf(buf, "%u..", | 3294 | tipc_printf(buf, "%u..", |
3332 | msg_seqno(buf_msg(l_ptr->next_out))); | 3295 | msg_seqno(buf_msg(l_ptr->next_out))); |
3333 | tipc_printf(buf, "%u]", | 3296 | tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out))); |
3334 | msg_seqno(buf_msg | ||
3335 | (l_ptr->last_out)), l_ptr->out_queue_size); | ||
3336 | if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - | 3297 | if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - |
3337 | msg_seqno(buf_msg(l_ptr->first_out))) | 3298 | msg_seqno(buf_msg(l_ptr->first_out))) |
3338 | != (l_ptr->out_queue_size - 1)) || | 3299 | != (l_ptr->out_queue_size - 1)) || |
diff --git a/net/tipc/link.h b/net/tipc/link.h index 6a51e38ad25c..2e5385c47d30 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h | |||
@@ -292,4 +292,39 @@ static inline u32 lesser(u32 left, u32 right) | |||
292 | return less_eq(left, right) ? left : right; | 292 | return less_eq(left, right) ? left : right; |
293 | } | 293 | } |
294 | 294 | ||
295 | |||
296 | /* | ||
297 | * Link status checking routines | ||
298 | */ | ||
299 | |||
300 | static inline int link_working_working(struct link *l_ptr) | ||
301 | { | ||
302 | return (l_ptr->state == WORKING_WORKING); | ||
303 | } | ||
304 | |||
305 | static inline int link_working_unknown(struct link *l_ptr) | ||
306 | { | ||
307 | return (l_ptr->state == WORKING_UNKNOWN); | ||
308 | } | ||
309 | |||
310 | static inline int link_reset_unknown(struct link *l_ptr) | ||
311 | { | ||
312 | return (l_ptr->state == RESET_UNKNOWN); | ||
313 | } | ||
314 | |||
315 | static inline int link_reset_reset(struct link *l_ptr) | ||
316 | { | ||
317 | return (l_ptr->state == RESET_RESET); | ||
318 | } | ||
319 | |||
320 | static inline int link_blocked(struct link *l_ptr) | ||
321 | { | ||
322 | return (l_ptr->exp_msg_count || l_ptr->blocked); | ||
323 | } | ||
324 | |||
325 | static inline int link_congested(struct link *l_ptr) | ||
326 | { | ||
327 | return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]); | ||
328 | } | ||
329 | |||
295 | #endif | 330 | #endif |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 73dcd00d674e..381063817b41 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -40,6 +40,100 @@ | |||
40 | #include "msg.h" | 40 | #include "msg.h" |
41 | #include "bearer.h" | 41 | #include "bearer.h" |
42 | 42 | ||
43 | u32 tipc_msg_tot_importance(struct tipc_msg *m) | ||
44 | { | ||
45 | if (likely(msg_isdata(m))) { | ||
46 | if (likely(msg_orignode(m) == tipc_own_addr)) | ||
47 | return msg_importance(m); | ||
48 | return msg_importance(m) + 4; | ||
49 | } | ||
50 | if ((msg_user(m) == MSG_FRAGMENTER) && | ||
51 | (msg_type(m) == FIRST_FRAGMENT)) | ||
52 | return msg_importance(msg_get_wrapped(m)); | ||
53 | return msg_importance(m); | ||
54 | } | ||
55 | |||
56 | |||
57 | void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, | ||
58 | u32 hsize, u32 destnode) | ||
59 | { | ||
60 | memset(m, 0, hsize); | ||
61 | msg_set_version(m); | ||
62 | msg_set_user(m, user); | ||
63 | msg_set_hdr_sz(m, hsize); | ||
64 | msg_set_size(m, hsize); | ||
65 | msg_set_prevnode(m, tipc_own_addr); | ||
66 | msg_set_type(m, type); | ||
67 | if (!msg_short(m)) { | ||
68 | msg_set_orignode(m, tipc_own_addr); | ||
69 | msg_set_destnode(m, destnode); | ||
70 | } | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * tipc_msg_calc_data_size - determine total data size for message | ||
75 | */ | ||
76 | |||
77 | int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect) | ||
78 | { | ||
79 | int dsz = 0; | ||
80 | int i; | ||
81 | |||
82 | for (i = 0; i < num_sect; i++) | ||
83 | dsz += msg_sect[i].iov_len; | ||
84 | return dsz; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * tipc_msg_build - create message using specified header and data | ||
89 | * | ||
90 | * Note: Caller must not hold any locks in case copy_from_user() is interrupted! | ||
91 | * | ||
92 | * Returns message data size or errno | ||
93 | */ | ||
94 | |||
95 | int tipc_msg_build(struct tipc_msg *hdr, | ||
96 | struct iovec const *msg_sect, u32 num_sect, | ||
97 | int max_size, int usrmem, struct sk_buff** buf) | ||
98 | { | ||
99 | int dsz, sz, hsz, pos, res, cnt; | ||
100 | |||
101 | dsz = tipc_msg_calc_data_size(msg_sect, num_sect); | ||
102 | if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) { | ||
103 | *buf = NULL; | ||
104 | return -EINVAL; | ||
105 | } | ||
106 | |||
107 | pos = hsz = msg_hdr_sz(hdr); | ||
108 | sz = hsz + dsz; | ||
109 | msg_set_size(hdr, sz); | ||
110 | if (unlikely(sz > max_size)) { | ||
111 | *buf = NULL; | ||
112 | return dsz; | ||
113 | } | ||
114 | |||
115 | *buf = buf_acquire(sz); | ||
116 | if (!(*buf)) | ||
117 | return -ENOMEM; | ||
118 | skb_copy_to_linear_data(*buf, hdr, hsz); | ||
119 | for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) { | ||
120 | if (likely(usrmem)) | ||
121 | res = !copy_from_user((*buf)->data + pos, | ||
122 | msg_sect[cnt].iov_base, | ||
123 | msg_sect[cnt].iov_len); | ||
124 | else | ||
125 | skb_copy_to_linear_data_offset(*buf, pos, | ||
126 | msg_sect[cnt].iov_base, | ||
127 | msg_sect[cnt].iov_len); | ||
128 | pos += msg_sect[cnt].iov_len; | ||
129 | } | ||
130 | if (likely(res)) | ||
131 | return dsz; | ||
132 | |||
133 | buf_discard(*buf); | ||
134 | *buf = NULL; | ||
135 | return -EFAULT; | ||
136 | } | ||
43 | 137 | ||
44 | #ifdef CONFIG_TIPC_DEBUG | 138 | #ifdef CONFIG_TIPC_DEBUG |
45 | 139 | ||
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 7ee6ae238147..995d2da35b01 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
@@ -708,100 +708,13 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos) | |||
708 | #define DSC_REQ_MSG 0 | 708 | #define DSC_REQ_MSG 0 |
709 | #define DSC_RESP_MSG 1 | 709 | #define DSC_RESP_MSG 1 |
710 | 710 | ||
711 | static inline u32 msg_tot_importance(struct tipc_msg *m) | 711 | u32 tipc_msg_tot_importance(struct tipc_msg *m); |
712 | { | 712 | void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, |
713 | if (likely(msg_isdata(m))) { | 713 | u32 hsize, u32 destnode); |
714 | if (likely(msg_orignode(m) == tipc_own_addr)) | 714 | int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect); |
715 | return msg_importance(m); | 715 | int tipc_msg_build(struct tipc_msg *hdr, |
716 | return msg_importance(m) + 4; | ||
717 | } | ||
718 | if ((msg_user(m) == MSG_FRAGMENTER) && | ||
719 | (msg_type(m) == FIRST_FRAGMENT)) | ||
720 | return msg_importance(msg_get_wrapped(m)); | ||
721 | return msg_importance(m); | ||
722 | } | ||
723 | |||
724 | |||
725 | static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, | ||
726 | u32 hsize, u32 destnode) | ||
727 | { | ||
728 | memset(m, 0, hsize); | ||
729 | msg_set_version(m); | ||
730 | msg_set_user(m, user); | ||
731 | msg_set_hdr_sz(m, hsize); | ||
732 | msg_set_size(m, hsize); | ||
733 | msg_set_prevnode(m, tipc_own_addr); | ||
734 | msg_set_type(m, type); | ||
735 | if (!msg_short(m)) { | ||
736 | msg_set_orignode(m, tipc_own_addr); | ||
737 | msg_set_destnode(m, destnode); | ||
738 | } | ||
739 | } | ||
740 | |||
741 | /** | ||
742 | * msg_calc_data_size - determine total data size for message | ||
743 | */ | ||
744 | |||
745 | static inline int msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect) | ||
746 | { | ||
747 | int dsz = 0; | ||
748 | int i; | ||
749 | |||
750 | for (i = 0; i < num_sect; i++) | ||
751 | dsz += msg_sect[i].iov_len; | ||
752 | return dsz; | ||
753 | } | ||
754 | |||
755 | /** | ||
756 | * msg_build - create message using specified header and data | ||
757 | * | ||
758 | * Note: Caller must not hold any locks in case copy_from_user() is interrupted! | ||
759 | * | ||
760 | * Returns message data size or errno | ||
761 | */ | ||
762 | |||
763 | static inline int msg_build(struct tipc_msg *hdr, | ||
764 | struct iovec const *msg_sect, u32 num_sect, | 716 | struct iovec const *msg_sect, u32 num_sect, |
765 | int max_size, int usrmem, struct sk_buff** buf) | 717 | int max_size, int usrmem, struct sk_buff** buf); |
766 | { | ||
767 | int dsz, sz, hsz, pos, res, cnt; | ||
768 | |||
769 | dsz = msg_calc_data_size(msg_sect, num_sect); | ||
770 | if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) { | ||
771 | *buf = NULL; | ||
772 | return -EINVAL; | ||
773 | } | ||
774 | |||
775 | pos = hsz = msg_hdr_sz(hdr); | ||
776 | sz = hsz + dsz; | ||
777 | msg_set_size(hdr, sz); | ||
778 | if (unlikely(sz > max_size)) { | ||
779 | *buf = NULL; | ||
780 | return dsz; | ||
781 | } | ||
782 | |||
783 | *buf = buf_acquire(sz); | ||
784 | if (!(*buf)) | ||
785 | return -ENOMEM; | ||
786 | skb_copy_to_linear_data(*buf, hdr, hsz); | ||
787 | for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) { | ||
788 | if (likely(usrmem)) | ||
789 | res = !copy_from_user((*buf)->data + pos, | ||
790 | msg_sect[cnt].iov_base, | ||
791 | msg_sect[cnt].iov_len); | ||
792 | else | ||
793 | skb_copy_to_linear_data_offset(*buf, pos, | ||
794 | msg_sect[cnt].iov_base, | ||
795 | msg_sect[cnt].iov_len); | ||
796 | pos += msg_sect[cnt].iov_len; | ||
797 | } | ||
798 | if (likely(res)) | ||
799 | return dsz; | ||
800 | |||
801 | buf_discard(*buf); | ||
802 | *buf = NULL; | ||
803 | return -EFAULT; | ||
804 | } | ||
805 | 718 | ||
806 | static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a) | 719 | static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a) |
807 | { | 720 | { |
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 10a69894e2fd..6ac3c543250b 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c | |||
@@ -103,7 +103,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) | |||
103 | 103 | ||
104 | if (buf != NULL) { | 104 | if (buf != NULL) { |
105 | msg = buf_msg(buf); | 105 | msg = buf_msg(buf); |
106 | msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest); | 106 | tipc_msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest); |
107 | msg_set_size(msg, LONG_H_SIZE + size); | 107 | msg_set_size(msg, LONG_H_SIZE + size); |
108 | } | 108 | } |
109 | return buf; | 109 | return buf; |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index acab41a48d67..8ba79620db3f 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -627,7 +627,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) | |||
627 | struct name_seq *seq; | 627 | struct name_seq *seq; |
628 | u32 ref; | 628 | u32 ref; |
629 | 629 | ||
630 | if (!in_scope(*destnode, tipc_own_addr)) | 630 | if (!tipc_in_scope(*destnode, tipc_own_addr)) |
631 | return 0; | 631 | return 0; |
632 | 632 | ||
633 | read_lock_bh(&tipc_nametbl_lock); | 633 | read_lock_bh(&tipc_nametbl_lock); |
diff --git a/net/tipc/net.c b/net/tipc/net.c index d7cd1e064a80..f61b7694138b 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -219,7 +219,7 @@ void tipc_net_route_msg(struct sk_buff *buf) | |||
219 | 219 | ||
220 | /* Handle message for this node */ | 220 | /* Handle message for this node */ |
221 | dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); | 221 | dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); |
222 | if (in_scope(dnode, tipc_own_addr)) { | 222 | if (tipc_in_scope(dnode, tipc_own_addr)) { |
223 | if (msg_isdata(msg)) { | 223 | if (msg_isdata(msg)) { |
224 | if (msg_mcast(msg)) | 224 | if (msg_mcast(msg)) |
225 | tipc_port_recv_mcast(buf, NULL); | 225 | tipc_port_recv_mcast(buf, NULL); |
@@ -277,7 +277,7 @@ int tipc_net_start(u32 addr) | |||
277 | 277 | ||
278 | info("Started in network mode\n"); | 278 | info("Started in network mode\n"); |
279 | info("Own node address %s, network identity %u\n", | 279 | info("Own node address %s, network identity %u\n", |
280 | addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); | 280 | tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); |
281 | return 0; | 281 | return 0; |
282 | } | 282 | } |
283 | 283 | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index 17cc394f424f..b634942caba5 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -268,7 +268,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr) | |||
268 | 268 | ||
269 | if (n_ptr->link_cnt >= 2) { | 269 | if (n_ptr->link_cnt >= 2) { |
270 | err("Attempt to create third link to %s\n", | 270 | err("Attempt to create third link to %s\n", |
271 | addr_string_fill(addr_string, n_ptr->addr)); | 271 | tipc_addr_string_fill(addr_string, n_ptr->addr)); |
272 | return NULL; | 272 | return NULL; |
273 | } | 273 | } |
274 | 274 | ||
@@ -280,7 +280,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr) | |||
280 | } | 280 | } |
281 | err("Attempt to establish second link on <%s> to %s\n", | 281 | err("Attempt to establish second link on <%s> to %s\n", |
282 | l_ptr->b_ptr->publ.name, | 282 | l_ptr->b_ptr->publ.name, |
283 | addr_string_fill(addr_string, l_ptr->addr)); | 283 | tipc_addr_string_fill(addr_string, l_ptr->addr)); |
284 | } | 284 | } |
285 | return NULL; | 285 | return NULL; |
286 | } | 286 | } |
@@ -439,7 +439,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) | |||
439 | return; | 439 | return; |
440 | 440 | ||
441 | info("Lost contact with %s\n", | 441 | info("Lost contact with %s\n", |
442 | addr_string_fill(addr_string, n_ptr->addr)); | 442 | tipc_addr_string_fill(addr_string, n_ptr->addr)); |
443 | 443 | ||
444 | /* Abort link changeover */ | 444 | /* Abort link changeover */ |
445 | for (i = 0; i < MAX_BEARERS; i++) { | 445 | for (i = 0; i < MAX_BEARERS; i++) { |
@@ -602,7 +602,7 @@ u32 tipc_available_nodes(const u32 domain) | |||
602 | 602 | ||
603 | read_lock_bh(&tipc_net_lock); | 603 | read_lock_bh(&tipc_net_lock); |
604 | for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { | 604 | for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { |
605 | if (!in_scope(domain, n_ptr->addr)) | 605 | if (!tipc_in_scope(domain, n_ptr->addr)) |
606 | continue; | 606 | continue; |
607 | if (tipc_node_is_up(n_ptr)) | 607 | if (tipc_node_is_up(n_ptr)) |
608 | cnt++; | 608 | cnt++; |
@@ -651,7 +651,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) | |||
651 | /* Add TLVs for all nodes in scope */ | 651 | /* Add TLVs for all nodes in scope */ |
652 | 652 | ||
653 | for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { | 653 | for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { |
654 | if (!in_scope(domain, n_ptr->addr)) | 654 | if (!tipc_in_scope(domain, n_ptr->addr)) |
655 | continue; | 655 | continue; |
656 | node_info.addr = htonl(n_ptr->addr); | 656 | node_info.addr = htonl(n_ptr->addr); |
657 | node_info.up = htonl(tipc_node_is_up(n_ptr)); | 657 | node_info.up = htonl(tipc_node_is_up(n_ptr)); |
@@ -711,7 +711,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | |||
711 | for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { | 711 | for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { |
712 | u32 i; | 712 | u32 i; |
713 | 713 | ||
714 | if (!in_scope(domain, n_ptr->addr)) | 714 | if (!tipc_in_scope(domain, n_ptr->addr)) |
715 | continue; | 715 | continue; |
716 | tipc_node_lock(n_ptr); | 716 | tipc_node_lock(n_ptr); |
717 | for (i = 0; i < MAX_BEARERS; i++) { | 717 | for (i = 0; i < MAX_BEARERS; i++) { |
diff --git a/net/tipc/port.c b/net/tipc/port.c index e70d27ea6578..0737680e9266 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c | |||
@@ -116,7 +116,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain, | |||
116 | msg_set_namelower(hdr, seq->lower); | 116 | msg_set_namelower(hdr, seq->lower); |
117 | msg_set_nameupper(hdr, seq->upper); | 117 | msg_set_nameupper(hdr, seq->upper); |
118 | msg_set_hdr_sz(hdr, MCAST_H_SIZE); | 118 | msg_set_hdr_sz(hdr, MCAST_H_SIZE); |
119 | res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, | 119 | res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, |
120 | !oport->user_port, &buf); | 120 | !oport->user_port, &buf); |
121 | if (unlikely(!buf)) | 121 | if (unlikely(!buf)) |
122 | return res; | 122 | return res; |
@@ -241,13 +241,12 @@ struct tipc_port *tipc_createport_raw(void *usr_handle, | |||
241 | p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; | 241 | p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; |
242 | p_ptr->publ.ref = ref; | 242 | p_ptr->publ.ref = ref; |
243 | msg = &p_ptr->publ.phdr; | 243 | msg = &p_ptr->publ.phdr; |
244 | msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0); | 244 | tipc_msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0); |
245 | msg_set_origport(msg, ref); | 245 | msg_set_origport(msg, ref); |
246 | p_ptr->last_in_seqno = 41; | 246 | p_ptr->last_in_seqno = 41; |
247 | p_ptr->sent = 1; | 247 | p_ptr->sent = 1; |
248 | INIT_LIST_HEAD(&p_ptr->wait_list); | 248 | INIT_LIST_HEAD(&p_ptr->wait_list); |
249 | INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); | 249 | INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); |
250 | p_ptr->congested_link = NULL; | ||
251 | p_ptr->dispatcher = dispatcher; | 250 | p_ptr->dispatcher = dispatcher; |
252 | p_ptr->wakeup = wakeup; | 251 | p_ptr->wakeup = wakeup; |
253 | p_ptr->user_port = NULL; | 252 | p_ptr->user_port = NULL; |
@@ -396,7 +395,7 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode, | |||
396 | buf = buf_acquire(LONG_H_SIZE); | 395 | buf = buf_acquire(LONG_H_SIZE); |
397 | if (buf) { | 396 | if (buf) { |
398 | msg = buf_msg(buf); | 397 | msg = buf_msg(buf); |
399 | msg_init(msg, usr, type, LONG_H_SIZE, destnode); | 398 | tipc_msg_init(msg, usr, type, LONG_H_SIZE, destnode); |
400 | msg_set_errcode(msg, err); | 399 | msg_set_errcode(msg, err); |
401 | msg_set_destport(msg, destport); | 400 | msg_set_destport(msg, destport); |
402 | msg_set_origport(msg, origport); | 401 | msg_set_origport(msg, origport); |
@@ -440,7 +439,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) | |||
440 | return data_sz; | 439 | return data_sz; |
441 | } | 440 | } |
442 | rmsg = buf_msg(rbuf); | 441 | rmsg = buf_msg(rbuf); |
443 | msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg)); | 442 | tipc_msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg)); |
444 | msg_set_errcode(rmsg, err); | 443 | msg_set_errcode(rmsg, err); |
445 | msg_set_destport(rmsg, msg_origport(msg)); | 444 | msg_set_destport(rmsg, msg_origport(msg)); |
446 | msg_set_origport(rmsg, msg_destport(msg)); | 445 | msg_set_origport(rmsg, msg_destport(msg)); |
@@ -481,7 +480,7 @@ int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr, | |||
481 | struct sk_buff *buf; | 480 | struct sk_buff *buf; |
482 | int res; | 481 | int res; |
483 | 482 | ||
484 | res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, | 483 | res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, |
485 | !p_ptr->user_port, &buf); | 484 | !p_ptr->user_port, &buf); |
486 | if (!buf) | 485 | if (!buf) |
487 | return res; | 486 | return res; |
@@ -1344,7 +1343,7 @@ int tipc_port_recv_sections(struct port *sender, unsigned int num_sect, | |||
1344 | struct sk_buff *buf; | 1343 | struct sk_buff *buf; |
1345 | int res; | 1344 | int res; |
1346 | 1345 | ||
1347 | res = msg_build(&sender->publ.phdr, msg_sect, num_sect, | 1346 | res = tipc_msg_build(&sender->publ.phdr, msg_sect, num_sect, |
1348 | MAX_MSG_SIZE, !sender->user_port, &buf); | 1347 | MAX_MSG_SIZE, !sender->user_port, &buf); |
1349 | if (likely(buf)) | 1348 | if (likely(buf)) |
1350 | tipc_port_recv_msg(buf); | 1349 | tipc_port_recv_msg(buf); |
@@ -1384,7 +1383,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect) | |||
1384 | if (port_unreliable(p_ptr)) { | 1383 | if (port_unreliable(p_ptr)) { |
1385 | p_ptr->publ.congested = 0; | 1384 | p_ptr->publ.congested = 0; |
1386 | /* Just calculate msg length and return */ | 1385 | /* Just calculate msg length and return */ |
1387 | return msg_calc_data_size(msg_sect, num_sect); | 1386 | return tipc_msg_calc_data_size(msg_sect, num_sect); |
1388 | } | 1387 | } |
1389 | return -ELINKCONG; | 1388 | return -ELINKCONG; |
1390 | } | 1389 | } |
@@ -1453,7 +1452,7 @@ int tipc_forward2name(u32 ref, | |||
1453 | struct port *p_ptr; | 1452 | struct port *p_ptr; |
1454 | struct tipc_msg *msg; | 1453 | struct tipc_msg *msg; |
1455 | u32 destnode = domain; | 1454 | u32 destnode = domain; |
1456 | u32 destport = 0; | 1455 | u32 destport; |
1457 | int res; | 1456 | int res; |
1458 | 1457 | ||
1459 | p_ptr = tipc_port_deref(ref); | 1458 | p_ptr = tipc_port_deref(ref); |
@@ -1467,7 +1466,7 @@ int tipc_forward2name(u32 ref, | |||
1467 | msg_set_hdr_sz(msg, LONG_H_SIZE); | 1466 | msg_set_hdr_sz(msg, LONG_H_SIZE); |
1468 | msg_set_nametype(msg, name->type); | 1467 | msg_set_nametype(msg, name->type); |
1469 | msg_set_nameinst(msg, name->instance); | 1468 | msg_set_nameinst(msg, name->instance); |
1470 | msg_set_lookup_scope(msg, addr_scope(domain)); | 1469 | msg_set_lookup_scope(msg, tipc_addr_scope(domain)); |
1471 | if (importance <= TIPC_CRITICAL_IMPORTANCE) | 1470 | if (importance <= TIPC_CRITICAL_IMPORTANCE) |
1472 | msg_set_importance(msg,importance); | 1471 | msg_set_importance(msg,importance); |
1473 | destport = tipc_nametbl_translate(name->type, name->instance, &destnode); | 1472 | destport = tipc_nametbl_translate(name->type, name->instance, &destnode); |
@@ -1484,7 +1483,7 @@ int tipc_forward2name(u32 ref, | |||
1484 | return res; | 1483 | return res; |
1485 | if (port_unreliable(p_ptr)) { | 1484 | if (port_unreliable(p_ptr)) { |
1486 | /* Just calculate msg length and return */ | 1485 | /* Just calculate msg length and return */ |
1487 | return msg_calc_data_size(msg_sect, num_sect); | 1486 | return tipc_msg_calc_data_size(msg_sect, num_sect); |
1488 | } | 1487 | } |
1489 | return -ELINKCONG; | 1488 | return -ELINKCONG; |
1490 | } | 1489 | } |
@@ -1525,7 +1524,7 @@ int tipc_forward_buf2name(u32 ref, | |||
1525 | struct port *p_ptr; | 1524 | struct port *p_ptr; |
1526 | struct tipc_msg *msg; | 1525 | struct tipc_msg *msg; |
1527 | u32 destnode = domain; | 1526 | u32 destnode = domain; |
1528 | u32 destport = 0; | 1527 | u32 destport; |
1529 | int res; | 1528 | int res; |
1530 | 1529 | ||
1531 | p_ptr = (struct port *)tipc_ref_deref(ref); | 1530 | p_ptr = (struct port *)tipc_ref_deref(ref); |
@@ -1540,7 +1539,7 @@ int tipc_forward_buf2name(u32 ref, | |||
1540 | msg_set_origport(msg, orig->ref); | 1539 | msg_set_origport(msg, orig->ref); |
1541 | msg_set_nametype(msg, name->type); | 1540 | msg_set_nametype(msg, name->type); |
1542 | msg_set_nameinst(msg, name->instance); | 1541 | msg_set_nameinst(msg, name->instance); |
1543 | msg_set_lookup_scope(msg, addr_scope(domain)); | 1542 | msg_set_lookup_scope(msg, tipc_addr_scope(domain)); |
1544 | msg_set_hdr_sz(msg, LONG_H_SIZE); | 1543 | msg_set_hdr_sz(msg, LONG_H_SIZE); |
1545 | msg_set_size(msg, LONG_H_SIZE + dsz); | 1544 | msg_set_size(msg, LONG_H_SIZE + dsz); |
1546 | destport = tipc_nametbl_translate(name->type, name->instance, &destnode); | 1545 | destport = tipc_nametbl_translate(name->type, name->instance, &destnode); |
@@ -1620,7 +1619,7 @@ int tipc_forward2port(u32 ref, | |||
1620 | return res; | 1619 | return res; |
1621 | if (port_unreliable(p_ptr)) { | 1620 | if (port_unreliable(p_ptr)) { |
1622 | /* Just calculate msg length and return */ | 1621 | /* Just calculate msg length and return */ |
1623 | return msg_calc_data_size(msg_sect, num_sect); | 1622 | return tipc_msg_calc_data_size(msg_sect, num_sect); |
1624 | } | 1623 | } |
1625 | return -ELINKCONG; | 1624 | return -ELINKCONG; |
1626 | } | 1625 | } |
diff --git a/net/tipc/port.h b/net/tipc/port.h index ff31ee4a1dc3..8d1652aab298 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h | |||
@@ -75,7 +75,6 @@ struct user_port { | |||
75 | * @wakeup: ptr to routine to call when port is no longer congested | 75 | * @wakeup: ptr to routine to call when port is no longer congested |
76 | * @user_port: ptr to user port associated with port (if any) | 76 | * @user_port: ptr to user port associated with port (if any) |
77 | * @wait_list: adjacent ports in list of ports waiting on link congestion | 77 | * @wait_list: adjacent ports in list of ports waiting on link congestion |
78 | * @congested_link: ptr to congested link port is waiting on | ||
79 | * @waiting_pkts: | 78 | * @waiting_pkts: |
80 | * @sent: | 79 | * @sent: |
81 | * @acked: | 80 | * @acked: |
@@ -95,7 +94,6 @@ struct port { | |||
95 | void (*wakeup)(struct tipc_port *); | 94 | void (*wakeup)(struct tipc_port *); |
96 | struct user_port *user_port; | 95 | struct user_port *user_port; |
97 | struct list_head wait_list; | 96 | struct list_head wait_list; |
98 | struct link *congested_link; | ||
99 | u32 waiting_pkts; | 97 | u32 waiting_pkts; |
100 | u32 sent; | 98 | u32 sent; |
101 | u32 acked; | 99 | u32 acked; |
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c index e978c7136c97..2609e445fe7d 100644 --- a/net/wimax/op-rfkill.c +++ b/net/wimax/op-rfkill.c | |||
@@ -43,7 +43,7 @@ | |||
43 | * wimax_rfkill() Kernel calling wimax_rfkill() | 43 | * wimax_rfkill() Kernel calling wimax_rfkill() |
44 | * __wimax_rf_toggle_radio() | 44 | * __wimax_rf_toggle_radio() |
45 | * | 45 | * |
46 | * wimax_rfkill_set_radio_block() RF-Kill subsytem calling | 46 | * wimax_rfkill_set_radio_block() RF-Kill subsystem calling |
47 | * __wimax_rf_toggle_radio() | 47 | * __wimax_rf_toggle_radio() |
48 | * | 48 | * |
49 | * __wimax_rf_toggle_radio() | 49 | * __wimax_rf_toggle_radio() |
diff --git a/net/wimax/stack.c b/net/wimax/stack.c index 62b1a6662209..ee99e7dfcdba 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c | |||
@@ -320,7 +320,6 @@ void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) | |||
320 | out: | 320 | out: |
321 | d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n", | 321 | d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n", |
322 | wimax_dev, new_state, old_state); | 322 | wimax_dev, new_state, old_state); |
323 | return; | ||
324 | } | 323 | } |
325 | 324 | ||
326 | 325 | ||
@@ -362,7 +361,6 @@ void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) | |||
362 | if (wimax_dev->state > __WIMAX_ST_NULL) | 361 | if (wimax_dev->state > __WIMAX_ST_NULL) |
363 | __wimax_state_change(wimax_dev, new_state); | 362 | __wimax_state_change(wimax_dev, new_state); |
364 | mutex_unlock(&wimax_dev->mutex); | 363 | mutex_unlock(&wimax_dev->mutex); |
365 | return; | ||
366 | } | 364 | } |
367 | EXPORT_SYMBOL_GPL(wimax_state_change); | 365 | EXPORT_SYMBOL_GPL(wimax_state_change); |
368 | 366 | ||
diff --git a/net/wireless/chan.c b/net/wireless/chan.c index bf1737fc9a7e..b01a6f6397d7 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c | |||
@@ -10,38 +10,6 @@ | |||
10 | #include "core.h" | 10 | #include "core.h" |
11 | 11 | ||
12 | struct ieee80211_channel * | 12 | struct ieee80211_channel * |
13 | rdev_fixed_channel(struct cfg80211_registered_device *rdev, | ||
14 | struct wireless_dev *for_wdev) | ||
15 | { | ||
16 | struct wireless_dev *wdev; | ||
17 | struct ieee80211_channel *result = NULL; | ||
18 | |||
19 | WARN_ON(!mutex_is_locked(&rdev->devlist_mtx)); | ||
20 | |||
21 | list_for_each_entry(wdev, &rdev->netdev_list, list) { | ||
22 | if (wdev == for_wdev) | ||
23 | continue; | ||
24 | |||
25 | /* | ||
26 | * Lock manually to tell lockdep about allowed | ||
27 | * nesting here if for_wdev->mtx is held already. | ||
28 | * This is ok as it's all under the rdev devlist | ||
29 | * mutex and as such can only be done once at any | ||
30 | * given time. | ||
31 | */ | ||
32 | mutex_lock_nested(&wdev->mtx, SINGLE_DEPTH_NESTING); | ||
33 | if (wdev->current_bss) | ||
34 | result = wdev->current_bss->pub.channel; | ||
35 | wdev_unlock(wdev); | ||
36 | |||
37 | if (result) | ||
38 | break; | ||
39 | } | ||
40 | |||
41 | return result; | ||
42 | } | ||
43 | |||
44 | struct ieee80211_channel * | ||
45 | rdev_freq_to_chan(struct cfg80211_registered_device *rdev, | 13 | rdev_freq_to_chan(struct cfg80211_registered_device *rdev, |
46 | int freq, enum nl80211_channel_type channel_type) | 14 | int freq, enum nl80211_channel_type channel_type) |
47 | { | 15 | { |
@@ -75,15 +43,22 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev, | |||
75 | return chan; | 43 | return chan; |
76 | } | 44 | } |
77 | 45 | ||
78 | int rdev_set_freq(struct cfg80211_registered_device *rdev, | 46 | int cfg80211_set_freq(struct cfg80211_registered_device *rdev, |
79 | struct wireless_dev *for_wdev, | 47 | struct wireless_dev *wdev, int freq, |
80 | int freq, enum nl80211_channel_type channel_type) | 48 | enum nl80211_channel_type channel_type) |
81 | { | 49 | { |
82 | struct ieee80211_channel *chan; | 50 | struct ieee80211_channel *chan; |
83 | int result; | 51 | int result; |
84 | 52 | ||
85 | if (rdev_fixed_channel(rdev, for_wdev)) | 53 | if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR) |
86 | return -EBUSY; | 54 | wdev = NULL; |
55 | |||
56 | if (wdev) { | ||
57 | ASSERT_WDEV_LOCK(wdev); | ||
58 | |||
59 | if (!netif_running(wdev->netdev)) | ||
60 | return -ENETDOWN; | ||
61 | } | ||
87 | 62 | ||
88 | if (!rdev->ops->set_channel) | 63 | if (!rdev->ops->set_channel) |
89 | return -EOPNOTSUPP; | 64 | return -EOPNOTSUPP; |
@@ -92,11 +67,14 @@ int rdev_set_freq(struct cfg80211_registered_device *rdev, | |||
92 | if (!chan) | 67 | if (!chan) |
93 | return -EINVAL; | 68 | return -EINVAL; |
94 | 69 | ||
95 | result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type); | 70 | result = rdev->ops->set_channel(&rdev->wiphy, |
71 | wdev ? wdev->netdev : NULL, | ||
72 | chan, channel_type); | ||
96 | if (result) | 73 | if (result) |
97 | return result; | 74 | return result; |
98 | 75 | ||
99 | rdev->channel = chan; | 76 | if (wdev) |
77 | wdev->channel = chan; | ||
100 | 78 | ||
101 | return 0; | 79 | return 0; |
102 | } | 80 | } |
diff --git a/net/wireless/core.h b/net/wireless/core.h index b2234b436ead..ae930acf75e9 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -70,9 +70,6 @@ struct cfg80211_registered_device { | |||
70 | struct work_struct conn_work; | 70 | struct work_struct conn_work; |
71 | struct work_struct event_work; | 71 | struct work_struct event_work; |
72 | 72 | ||
73 | /* current channel */ | ||
74 | struct ieee80211_channel *channel; | ||
75 | |||
76 | /* must be last because of the way we do wiphy_priv(), | 73 | /* must be last because of the way we do wiphy_priv(), |
77 | * and it should at least be aligned to NETDEV_ALIGN */ | 74 | * and it should at least be aligned to NETDEV_ALIGN */ |
78 | struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); | 75 | struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); |
@@ -388,14 +385,11 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, | |||
388 | void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); | 385 | void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); |
389 | 386 | ||
390 | struct ieee80211_channel * | 387 | struct ieee80211_channel * |
391 | rdev_fixed_channel(struct cfg80211_registered_device *rdev, | ||
392 | struct wireless_dev *for_wdev); | ||
393 | struct ieee80211_channel * | ||
394 | rdev_freq_to_chan(struct cfg80211_registered_device *rdev, | 388 | rdev_freq_to_chan(struct cfg80211_registered_device *rdev, |
395 | int freq, enum nl80211_channel_type channel_type); | 389 | int freq, enum nl80211_channel_type channel_type); |
396 | int rdev_set_freq(struct cfg80211_registered_device *rdev, | 390 | int cfg80211_set_freq(struct cfg80211_registered_device *rdev, |
397 | struct wireless_dev *for_wdev, | 391 | struct wireless_dev *wdev, int freq, |
398 | int freq, enum nl80211_channel_type channel_type); | 392 | enum nl80211_channel_type channel_type); |
399 | 393 | ||
400 | u16 cfg80211_calculate_bitrate(struct rate_info *rate); | 394 | u16 cfg80211_calculate_bitrate(struct rate_info *rate); |
401 | 395 | ||
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 6a5acf750174..adcabba02e20 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
@@ -81,15 +81,10 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
81 | struct cfg80211_cached_keys *connkeys) | 81 | struct cfg80211_cached_keys *connkeys) |
82 | { | 82 | { |
83 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 83 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
84 | struct ieee80211_channel *chan; | ||
85 | int err; | 84 | int err; |
86 | 85 | ||
87 | ASSERT_WDEV_LOCK(wdev); | 86 | ASSERT_WDEV_LOCK(wdev); |
88 | 87 | ||
89 | chan = rdev_fixed_channel(rdev, wdev); | ||
90 | if (chan && chan != params->channel) | ||
91 | return -EBUSY; | ||
92 | |||
93 | if (wdev->ssid_len) | 88 | if (wdev->ssid_len) |
94 | return -EALREADY; | 89 | return -EALREADY; |
95 | 90 | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 01da83ddcff7..db71150b8040 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -589,6 +589,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
589 | i++; | 589 | i++; |
590 | NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); | 590 | NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); |
591 | } | 591 | } |
592 | CMD(set_channel, SET_CHANNEL); | ||
592 | 593 | ||
593 | #undef CMD | 594 | #undef CMD |
594 | 595 | ||
@@ -689,10 +690,90 @@ static int parse_txq_params(struct nlattr *tb[], | |||
689 | return 0; | 690 | return 0; |
690 | } | 691 | } |
691 | 692 | ||
693 | static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev) | ||
694 | { | ||
695 | /* | ||
696 | * You can only set the channel explicitly for AP, mesh | ||
697 | * and WDS type interfaces; all others have their channel | ||
698 | * managed via their respective "establish a connection" | ||
699 | * command (connect, join, ...) | ||
700 | * | ||
701 | * Monitors are special as they are normally slaved to | ||
702 | * whatever else is going on, so they behave as though | ||
703 | * you tried setting the wiphy channel itself. | ||
704 | */ | ||
705 | return !wdev || | ||
706 | wdev->iftype == NL80211_IFTYPE_AP || | ||
707 | wdev->iftype == NL80211_IFTYPE_WDS || | ||
708 | wdev->iftype == NL80211_IFTYPE_MESH_POINT || | ||
709 | wdev->iftype == NL80211_IFTYPE_MONITOR; | ||
710 | } | ||
711 | |||
712 | static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, | ||
713 | struct wireless_dev *wdev, | ||
714 | struct genl_info *info) | ||
715 | { | ||
716 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | ||
717 | u32 freq; | ||
718 | int result; | ||
719 | |||
720 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) | ||
721 | return -EINVAL; | ||
722 | |||
723 | if (!nl80211_can_set_dev_channel(wdev)) | ||
724 | return -EOPNOTSUPP; | ||
725 | |||
726 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { | ||
727 | channel_type = nla_get_u32(info->attrs[ | ||
728 | NL80211_ATTR_WIPHY_CHANNEL_TYPE]); | ||
729 | if (channel_type != NL80211_CHAN_NO_HT && | ||
730 | channel_type != NL80211_CHAN_HT20 && | ||
731 | channel_type != NL80211_CHAN_HT40PLUS && | ||
732 | channel_type != NL80211_CHAN_HT40MINUS) | ||
733 | return -EINVAL; | ||
734 | } | ||
735 | |||
736 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | ||
737 | |||
738 | mutex_lock(&rdev->devlist_mtx); | ||
739 | if (wdev) { | ||
740 | wdev_lock(wdev); | ||
741 | result = cfg80211_set_freq(rdev, wdev, freq, channel_type); | ||
742 | wdev_unlock(wdev); | ||
743 | } else { | ||
744 | result = cfg80211_set_freq(rdev, NULL, freq, channel_type); | ||
745 | } | ||
746 | mutex_unlock(&rdev->devlist_mtx); | ||
747 | |||
748 | return result; | ||
749 | } | ||
750 | |||
751 | static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info) | ||
752 | { | ||
753 | struct cfg80211_registered_device *rdev; | ||
754 | struct net_device *netdev; | ||
755 | int result; | ||
756 | |||
757 | rtnl_lock(); | ||
758 | |||
759 | result = get_rdev_dev_by_info_ifindex(info, &rdev, &netdev); | ||
760 | if (result) | ||
761 | goto unlock; | ||
762 | |||
763 | result = __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info); | ||
764 | |||
765 | unlock: | ||
766 | rtnl_unlock(); | ||
767 | |||
768 | return result; | ||
769 | } | ||
770 | |||
692 | static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | 771 | static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) |
693 | { | 772 | { |
694 | struct cfg80211_registered_device *rdev; | 773 | struct cfg80211_registered_device *rdev; |
695 | int result = 0, rem_txq_params = 0; | 774 | struct net_device *netdev = NULL; |
775 | struct wireless_dev *wdev; | ||
776 | int result, rem_txq_params = 0; | ||
696 | struct nlattr *nl_txq_params; | 777 | struct nlattr *nl_txq_params; |
697 | u32 changed; | 778 | u32 changed; |
698 | u8 retry_short = 0, retry_long = 0; | 779 | u8 retry_short = 0, retry_long = 0; |
@@ -701,16 +782,50 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
701 | 782 | ||
702 | rtnl_lock(); | 783 | rtnl_lock(); |
703 | 784 | ||
785 | /* | ||
786 | * Try to find the wiphy and netdev. Normally this | ||
787 | * function shouldn't need the netdev, but this is | ||
788 | * done for backward compatibility -- previously | ||
789 | * setting the channel was done per wiphy, but now | ||
790 | * it is per netdev. Previous userland like hostapd | ||
791 | * also passed a netdev to set_wiphy, so that it is | ||
792 | * possible to let that go to the right netdev! | ||
793 | */ | ||
704 | mutex_lock(&cfg80211_mutex); | 794 | mutex_lock(&cfg80211_mutex); |
705 | 795 | ||
706 | rdev = __cfg80211_rdev_from_info(info); | 796 | if (info->attrs[NL80211_ATTR_IFINDEX]) { |
707 | if (IS_ERR(rdev)) { | 797 | int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); |
708 | mutex_unlock(&cfg80211_mutex); | 798 | |
709 | result = PTR_ERR(rdev); | 799 | netdev = dev_get_by_index(genl_info_net(info), ifindex); |
710 | goto unlock; | 800 | if (netdev && netdev->ieee80211_ptr) { |
801 | rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy); | ||
802 | mutex_lock(&rdev->mtx); | ||
803 | } else | ||
804 | netdev = NULL; | ||
711 | } | 805 | } |
712 | 806 | ||
713 | mutex_lock(&rdev->mtx); | 807 | if (!netdev) { |
808 | rdev = __cfg80211_rdev_from_info(info); | ||
809 | if (IS_ERR(rdev)) { | ||
810 | mutex_unlock(&cfg80211_mutex); | ||
811 | result = PTR_ERR(rdev); | ||
812 | goto unlock; | ||
813 | } | ||
814 | wdev = NULL; | ||
815 | netdev = NULL; | ||
816 | result = 0; | ||
817 | |||
818 | mutex_lock(&rdev->mtx); | ||
819 | } else if (netif_running(netdev) && | ||
820 | nl80211_can_set_dev_channel(netdev->ieee80211_ptr)) | ||
821 | wdev = netdev->ieee80211_ptr; | ||
822 | else | ||
823 | wdev = NULL; | ||
824 | |||
825 | /* | ||
826 | * end workaround code, by now the rdev is available | ||
827 | * and locked, and wdev may or may not be NULL. | ||
828 | */ | ||
714 | 829 | ||
715 | if (info->attrs[NL80211_ATTR_WIPHY_NAME]) | 830 | if (info->attrs[NL80211_ATTR_WIPHY_NAME]) |
716 | result = cfg80211_dev_rename( | 831 | result = cfg80211_dev_rename( |
@@ -749,26 +864,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
749 | } | 864 | } |
750 | 865 | ||
751 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { | 866 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { |
752 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | 867 | result = __nl80211_set_channel(rdev, wdev, info); |
753 | u32 freq; | ||
754 | |||
755 | result = -EINVAL; | ||
756 | |||
757 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { | ||
758 | channel_type = nla_get_u32(info->attrs[ | ||
759 | NL80211_ATTR_WIPHY_CHANNEL_TYPE]); | ||
760 | if (channel_type != NL80211_CHAN_NO_HT && | ||
761 | channel_type != NL80211_CHAN_HT20 && | ||
762 | channel_type != NL80211_CHAN_HT40PLUS && | ||
763 | channel_type != NL80211_CHAN_HT40MINUS) | ||
764 | goto bad_res; | ||
765 | } | ||
766 | |||
767 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | ||
768 | |||
769 | mutex_lock(&rdev->devlist_mtx); | ||
770 | result = rdev_set_freq(rdev, NULL, freq, channel_type); | ||
771 | mutex_unlock(&rdev->devlist_mtx); | ||
772 | if (result) | 868 | if (result) |
773 | goto bad_res; | 869 | goto bad_res; |
774 | } | 870 | } |
@@ -865,6 +961,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
865 | 961 | ||
866 | bad_res: | 962 | bad_res: |
867 | mutex_unlock(&rdev->mtx); | 963 | mutex_unlock(&rdev->mtx); |
964 | if (netdev) | ||
965 | dev_put(netdev); | ||
868 | unlock: | 966 | unlock: |
869 | rtnl_unlock(); | 967 | rtnl_unlock(); |
870 | return result; | 968 | return result; |
@@ -3562,9 +3660,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) | |||
3562 | { | 3660 | { |
3563 | struct cfg80211_registered_device *rdev; | 3661 | struct cfg80211_registered_device *rdev; |
3564 | struct net_device *dev; | 3662 | struct net_device *dev; |
3565 | struct wireless_dev *wdev; | ||
3566 | struct cfg80211_crypto_settings crypto; | 3663 | struct cfg80211_crypto_settings crypto; |
3567 | struct ieee80211_channel *chan, *fixedchan; | 3664 | struct ieee80211_channel *chan; |
3568 | const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; | 3665 | const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; |
3569 | int err, ssid_len, ie_len = 0; | 3666 | int err, ssid_len, ie_len = 0; |
3570 | bool use_mfp = false; | 3667 | bool use_mfp = false; |
@@ -3607,16 +3704,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) | |||
3607 | goto out; | 3704 | goto out; |
3608 | } | 3705 | } |
3609 | 3706 | ||
3610 | mutex_lock(&rdev->devlist_mtx); | ||
3611 | wdev = dev->ieee80211_ptr; | ||
3612 | fixedchan = rdev_fixed_channel(rdev, wdev); | ||
3613 | if (fixedchan && chan != fixedchan) { | ||
3614 | err = -EBUSY; | ||
3615 | mutex_unlock(&rdev->devlist_mtx); | ||
3616 | goto out; | ||
3617 | } | ||
3618 | mutex_unlock(&rdev->devlist_mtx); | ||
3619 | |||
3620 | ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); | 3707 | ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); |
3621 | ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); | 3708 | ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); |
3622 | 3709 | ||
@@ -4356,9 +4443,10 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, | |||
4356 | if (channel_type != NL80211_CHAN_NO_HT && | 4443 | if (channel_type != NL80211_CHAN_NO_HT && |
4357 | channel_type != NL80211_CHAN_HT20 && | 4444 | channel_type != NL80211_CHAN_HT20 && |
4358 | channel_type != NL80211_CHAN_HT40PLUS && | 4445 | channel_type != NL80211_CHAN_HT40PLUS && |
4359 | channel_type != NL80211_CHAN_HT40MINUS) | 4446 | channel_type != NL80211_CHAN_HT40MINUS) { |
4360 | err = -EINVAL; | 4447 | err = -EINVAL; |
4361 | goto out; | 4448 | goto out; |
4449 | } | ||
4362 | } | 4450 | } |
4363 | 4451 | ||
4364 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | 4452 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); |
@@ -4630,9 +4718,10 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) | |||
4630 | if (channel_type != NL80211_CHAN_NO_HT && | 4718 | if (channel_type != NL80211_CHAN_NO_HT && |
4631 | channel_type != NL80211_CHAN_HT20 && | 4719 | channel_type != NL80211_CHAN_HT20 && |
4632 | channel_type != NL80211_CHAN_HT40PLUS && | 4720 | channel_type != NL80211_CHAN_HT40PLUS && |
4633 | channel_type != NL80211_CHAN_HT40MINUS) | 4721 | channel_type != NL80211_CHAN_HT40MINUS) { |
4634 | err = -EINVAL; | 4722 | err = -EINVAL; |
4635 | goto out; | 4723 | goto out; |
4724 | } | ||
4636 | } | 4725 | } |
4637 | 4726 | ||
4638 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | 4727 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); |
@@ -5186,6 +5275,12 @@ static struct genl_ops nl80211_ops[] = { | |||
5186 | .policy = nl80211_policy, | 5275 | .policy = nl80211_policy, |
5187 | .flags = GENL_ADMIN_PERM, | 5276 | .flags = GENL_ADMIN_PERM, |
5188 | }, | 5277 | }, |
5278 | { | ||
5279 | .cmd = NL80211_CMD_SET_CHANNEL, | ||
5280 | .doit = nl80211_set_channel, | ||
5281 | .policy = nl80211_policy, | ||
5282 | .flags = GENL_ADMIN_PERM, | ||
5283 | }, | ||
5189 | }; | 5284 | }; |
5190 | 5285 | ||
5191 | static struct genl_multicast_group nl80211_mlme_mcgrp = { | 5286 | static struct genl_multicast_group nl80211_mlme_mcgrp = { |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index a026c6d56bd3..58401d246bda 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -515,7 +515,7 @@ cfg80211_inform_bss(struct wiphy *wiphy, | |||
515 | 515 | ||
516 | privsz = wiphy->bss_priv_size; | 516 | privsz = wiphy->bss_priv_size; |
517 | 517 | ||
518 | if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && | 518 | if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && |
519 | (signal < 0 || signal > 100))) | 519 | (signal < 0 || signal > 100))) |
520 | return NULL; | 520 | return NULL; |
521 | 521 | ||
@@ -571,7 +571,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, | |||
571 | u.probe_resp.variable); | 571 | u.probe_resp.variable); |
572 | size_t privsz = wiphy->bss_priv_size; | 572 | size_t privsz = wiphy->bss_priv_size; |
573 | 573 | ||
574 | if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && | 574 | if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && |
575 | (signal < 0 || signal > 100))) | 575 | (signal < 0 || signal > 100))) |
576 | return NULL; | 576 | return NULL; |
577 | 577 | ||
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 8ddf5ae0dd03..72222f0074db 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -741,7 +741,6 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
741 | const u8 *prev_bssid) | 741 | const u8 *prev_bssid) |
742 | { | 742 | { |
743 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 743 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
744 | struct ieee80211_channel *chan; | ||
745 | struct cfg80211_bss *bss = NULL; | 744 | struct cfg80211_bss *bss = NULL; |
746 | int err; | 745 | int err; |
747 | 746 | ||
@@ -750,10 +749,6 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
750 | if (wdev->sme_state != CFG80211_SME_IDLE) | 749 | if (wdev->sme_state != CFG80211_SME_IDLE) |
751 | return -EALREADY; | 750 | return -EALREADY; |
752 | 751 | ||
753 | chan = rdev_fixed_channel(rdev, wdev); | ||
754 | if (chan && chan != connect->channel) | ||
755 | return -EBUSY; | ||
756 | |||
757 | if (WARN_ON(wdev->connect_keys)) { | 752 | if (WARN_ON(wdev->connect_keys)) { |
758 | kfree(wdev->connect_keys); | 753 | kfree(wdev->connect_keys); |
759 | wdev->connect_keys = NULL; | 754 | wdev->connect_keys = NULL; |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index a60a2773b497..96342993cf93 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -782,16 +782,22 @@ int cfg80211_wext_siwfreq(struct net_device *dev, | |||
782 | return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra); | 782 | return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra); |
783 | case NL80211_IFTYPE_ADHOC: | 783 | case NL80211_IFTYPE_ADHOC: |
784 | return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); | 784 | return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); |
785 | default: | 785 | case NL80211_IFTYPE_MONITOR: |
786 | case NL80211_IFTYPE_WDS: | ||
787 | case NL80211_IFTYPE_MESH_POINT: | ||
786 | freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); | 788 | freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); |
787 | if (freq < 0) | 789 | if (freq < 0) |
788 | return freq; | 790 | return freq; |
789 | if (freq == 0) | 791 | if (freq == 0) |
790 | return -EINVAL; | 792 | return -EINVAL; |
793 | wdev_lock(wdev); | ||
791 | mutex_lock(&rdev->devlist_mtx); | 794 | mutex_lock(&rdev->devlist_mtx); |
792 | err = rdev_set_freq(rdev, NULL, freq, NL80211_CHAN_NO_HT); | 795 | err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); |
793 | mutex_unlock(&rdev->devlist_mtx); | 796 | mutex_unlock(&rdev->devlist_mtx); |
797 | wdev_unlock(wdev); | ||
794 | return err; | 798 | return err; |
799 | default: | ||
800 | return -EOPNOTSUPP; | ||
795 | } | 801 | } |
796 | } | 802 | } |
797 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq); | 803 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq); |
@@ -801,7 +807,6 @@ int cfg80211_wext_giwfreq(struct net_device *dev, | |||
801 | struct iw_freq *freq, char *extra) | 807 | struct iw_freq *freq, char *extra) |
802 | { | 808 | { |
803 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 809 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
804 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
805 | 810 | ||
806 | switch (wdev->iftype) { | 811 | switch (wdev->iftype) { |
807 | case NL80211_IFTYPE_STATION: | 812 | case NL80211_IFTYPE_STATION: |
@@ -809,9 +814,9 @@ int cfg80211_wext_giwfreq(struct net_device *dev, | |||
809 | case NL80211_IFTYPE_ADHOC: | 814 | case NL80211_IFTYPE_ADHOC: |
810 | return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); | 815 | return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); |
811 | default: | 816 | default: |
812 | if (!rdev->channel) | 817 | if (!wdev->channel) |
813 | return -EINVAL; | 818 | return -EINVAL; |
814 | freq->m = rdev->channel->center_freq; | 819 | freq->m = wdev->channel->center_freq; |
815 | freq->e = 6; | 820 | freq->e = 6; |
816 | return 0; | 821 | return 0; |
817 | } | 822 | } |
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index d5c6140f4cb8..9818198add8a 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c | |||
@@ -108,7 +108,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
108 | 108 | ||
109 | /* SSID is not set, we just want to switch channel */ | 109 | /* SSID is not set, we just want to switch channel */ |
110 | if (chan && !wdev->wext.connect.ssid_len) { | 110 | if (chan && !wdev->wext.connect.ssid_len) { |
111 | err = rdev_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); | 111 | err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); |
112 | goto out; | 112 | goto out; |
113 | } | 113 | } |
114 | 114 | ||
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 296e65e01064..5e86d4e97dce 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -453,7 +453,6 @@ static int x25_setsockopt(struct socket *sock, int level, int optname, | |||
453 | struct sock *sk = sock->sk; | 453 | struct sock *sk = sock->sk; |
454 | int rc = -ENOPROTOOPT; | 454 | int rc = -ENOPROTOOPT; |
455 | 455 | ||
456 | lock_kernel(); | ||
457 | if (level != SOL_X25 || optname != X25_QBITINCL) | 456 | if (level != SOL_X25 || optname != X25_QBITINCL) |
458 | goto out; | 457 | goto out; |
459 | 458 | ||
@@ -465,10 +464,12 @@ static int x25_setsockopt(struct socket *sock, int level, int optname, | |||
465 | if (get_user(opt, (int __user *)optval)) | 464 | if (get_user(opt, (int __user *)optval)) |
466 | goto out; | 465 | goto out; |
467 | 466 | ||
468 | x25_sk(sk)->qbitincl = !!opt; | 467 | if (opt) |
468 | set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); | ||
469 | else | ||
470 | clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); | ||
469 | rc = 0; | 471 | rc = 0; |
470 | out: | 472 | out: |
471 | unlock_kernel(); | ||
472 | return rc; | 473 | return rc; |
473 | } | 474 | } |
474 | 475 | ||
@@ -478,7 +479,6 @@ static int x25_getsockopt(struct socket *sock, int level, int optname, | |||
478 | struct sock *sk = sock->sk; | 479 | struct sock *sk = sock->sk; |
479 | int val, len, rc = -ENOPROTOOPT; | 480 | int val, len, rc = -ENOPROTOOPT; |
480 | 481 | ||
481 | lock_kernel(); | ||
482 | if (level != SOL_X25 || optname != X25_QBITINCL) | 482 | if (level != SOL_X25 || optname != X25_QBITINCL) |
483 | goto out; | 483 | goto out; |
484 | 484 | ||
@@ -496,10 +496,9 @@ static int x25_getsockopt(struct socket *sock, int level, int optname, | |||
496 | if (put_user(len, optlen)) | 496 | if (put_user(len, optlen)) |
497 | goto out; | 497 | goto out; |
498 | 498 | ||
499 | val = x25_sk(sk)->qbitincl; | 499 | val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); |
500 | rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; | 500 | rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; |
501 | out: | 501 | out: |
502 | unlock_kernel(); | ||
503 | return rc; | 502 | return rc; |
504 | } | 503 | } |
505 | 504 | ||
@@ -583,7 +582,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol, | |||
583 | x25->t2 = sysctl_x25_ack_holdback_timeout; | 582 | x25->t2 = sysctl_x25_ack_holdback_timeout; |
584 | x25->state = X25_STATE_0; | 583 | x25->state = X25_STATE_0; |
585 | x25->cudmatchlength = 0; | 584 | x25->cudmatchlength = 0; |
586 | x25->accptapprv = X25_DENY_ACCPT_APPRV; /* normally no cud */ | 585 | set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */ |
587 | /* on call accept */ | 586 | /* on call accept */ |
588 | 587 | ||
589 | x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; | 588 | x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; |
@@ -632,12 +631,12 @@ static struct sock *x25_make_new(struct sock *osk) | |||
632 | x25->t22 = ox25->t22; | 631 | x25->t22 = ox25->t22; |
633 | x25->t23 = ox25->t23; | 632 | x25->t23 = ox25->t23; |
634 | x25->t2 = ox25->t2; | 633 | x25->t2 = ox25->t2; |
634 | x25->flags = ox25->flags; | ||
635 | x25->facilities = ox25->facilities; | 635 | x25->facilities = ox25->facilities; |
636 | x25->qbitincl = ox25->qbitincl; | ||
637 | x25->dte_facilities = ox25->dte_facilities; | 636 | x25->dte_facilities = ox25->dte_facilities; |
638 | x25->cudmatchlength = ox25->cudmatchlength; | 637 | x25->cudmatchlength = ox25->cudmatchlength; |
639 | x25->accptapprv = ox25->accptapprv; | ||
640 | 638 | ||
639 | clear_bit(X25_INTERRUPT_FLAG, &x25->flags); | ||
641 | x25_init_timers(sk); | 640 | x25_init_timers(sk); |
642 | out: | 641 | out: |
643 | return sk; | 642 | return sk; |
@@ -1053,8 +1052,8 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, | |||
1053 | makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE; | 1052 | makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE; |
1054 | makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; | 1053 | makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; |
1055 | 1054 | ||
1056 | /* Normally all calls are accepted immediatly */ | 1055 | /* Normally all calls are accepted immediately */ |
1057 | if(makex25->accptapprv & X25_DENY_ACCPT_APPRV) { | 1056 | if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) { |
1058 | x25_write_internal(make, X25_CALL_ACCEPTED); | 1057 | x25_write_internal(make, X25_CALL_ACCEPTED); |
1059 | makex25->state = X25_STATE_3; | 1058 | makex25->state = X25_STATE_3; |
1060 | } | 1059 | } |
@@ -1186,7 +1185,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1186 | * If the Q BIT Include socket option is in force, the first | 1185 | * If the Q BIT Include socket option is in force, the first |
1187 | * byte of the user data is the logical value of the Q Bit. | 1186 | * byte of the user data is the logical value of the Q Bit. |
1188 | */ | 1187 | */ |
1189 | if (x25->qbitincl) { | 1188 | if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { |
1190 | qbit = skb->data[0]; | 1189 | qbit = skb->data[0]; |
1191 | skb_pull(skb, 1); | 1190 | skb_pull(skb, 1); |
1192 | } | 1191 | } |
@@ -1242,7 +1241,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1242 | len = rc; | 1241 | len = rc; |
1243 | if (rc < 0) | 1242 | if (rc < 0) |
1244 | kfree_skb(skb); | 1243 | kfree_skb(skb); |
1245 | else if (x25->qbitincl) | 1244 | else if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) |
1246 | len++; | 1245 | len++; |
1247 | } | 1246 | } |
1248 | 1247 | ||
@@ -1307,7 +1306,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1307 | /* | 1306 | /* |
1308 | * No Q bit information on Interrupt data. | 1307 | * No Q bit information on Interrupt data. |
1309 | */ | 1308 | */ |
1310 | if (x25->qbitincl) { | 1309 | if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { |
1311 | asmptr = skb_push(skb, 1); | 1310 | asmptr = skb_push(skb, 1); |
1312 | *asmptr = 0x00; | 1311 | *asmptr = 0x00; |
1313 | } | 1312 | } |
@@ -1325,7 +1324,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1325 | skb_pull(skb, x25->neighbour->extended ? | 1324 | skb_pull(skb, x25->neighbour->extended ? |
1326 | X25_EXT_MIN_LEN : X25_STD_MIN_LEN); | 1325 | X25_EXT_MIN_LEN : X25_STD_MIN_LEN); |
1327 | 1326 | ||
1328 | if (x25->qbitincl) { | 1327 | if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { |
1329 | asmptr = skb_push(skb, 1); | 1328 | asmptr = skb_push(skb, 1); |
1330 | *asmptr = qbit; | 1329 | *asmptr = qbit; |
1331 | } | 1330 | } |
@@ -1576,7 +1575,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1576 | rc = -EINVAL; | 1575 | rc = -EINVAL; |
1577 | if (sk->sk_state != TCP_CLOSE) | 1576 | if (sk->sk_state != TCP_CLOSE) |
1578 | break; | 1577 | break; |
1579 | x25->accptapprv = X25_ALLOW_ACCPT_APPRV; | 1578 | clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); |
1580 | rc = 0; | 1579 | rc = 0; |
1581 | break; | 1580 | break; |
1582 | } | 1581 | } |
@@ -1585,7 +1584,8 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1585 | rc = -EINVAL; | 1584 | rc = -EINVAL; |
1586 | if (sk->sk_state != TCP_ESTABLISHED) | 1585 | if (sk->sk_state != TCP_ESTABLISHED) |
1587 | break; | 1586 | break; |
1588 | if (x25->accptapprv) /* must call accptapprv above */ | 1587 | /* must call accptapprv above */ |
1588 | if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) | ||
1589 | break; | 1589 | break; |
1590 | x25_write_internal(sk, X25_CALL_ACCEPTED); | 1590 | x25_write_internal(sk, X25_CALL_ACCEPTED); |
1591 | x25->state = X25_STATE_3; | 1591 | x25->state = X25_STATE_3; |
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 372ac226e648..63178961efac 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
@@ -273,7 +273,7 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
273 | break; | 273 | break; |
274 | 274 | ||
275 | case X25_INTERRUPT_CONFIRMATION: | 275 | case X25_INTERRUPT_CONFIRMATION: |
276 | x25->intflag = 0; | 276 | clear_bit(X25_INTERRUPT_FLAG, &x25->flags); |
277 | break; | 277 | break; |
278 | 278 | ||
279 | case X25_INTERRUPT: | 279 | case X25_INTERRUPT: |
diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c index 52351a26b6fc..d00649fb251d 100644 --- a/net/x25/x25_out.c +++ b/net/x25/x25_out.c | |||
@@ -148,8 +148,9 @@ void x25_kick(struct sock *sk) | |||
148 | /* | 148 | /* |
149 | * Transmit interrupt data. | 149 | * Transmit interrupt data. |
150 | */ | 150 | */ |
151 | if (!x25->intflag && skb_peek(&x25->interrupt_out_queue) != NULL) { | 151 | if (skb_peek(&x25->interrupt_out_queue) != NULL && |
152 | x25->intflag = 1; | 152 | !test_and_set_bit(X25_INTERRUPT_FLAG, &x25->flags)) { |
153 | |||
153 | skb = skb_dequeue(&x25->interrupt_out_queue); | 154 | skb = skb_dequeue(&x25->interrupt_out_queue); |
154 | x25_transmit_link(skb, x25->neighbour); | 155 | x25_transmit_link(skb, x25->neighbour); |
155 | } | 156 | } |
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h index 1396572d2ade..8e69533d2313 100644 --- a/net/xfrm/xfrm_hash.h +++ b/net/xfrm/xfrm_hash.h | |||
@@ -55,7 +55,7 @@ static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr, | |||
55 | case AF_INET6: | 55 | case AF_INET6: |
56 | h ^= __xfrm6_daddr_saddr_hash(daddr, saddr); | 56 | h ^= __xfrm6_daddr_saddr_hash(daddr, saddr); |
57 | break; | 57 | break; |
58 | }; | 58 | } |
59 | return (h ^ (h >> 16)) & hmask; | 59 | return (h ^ (h >> 16)) & hmask; |
60 | } | 60 | } |
61 | 61 | ||
@@ -102,7 +102,7 @@ static inline unsigned int __sel_hash(struct xfrm_selector *sel, unsigned short | |||
102 | 102 | ||
103 | h = __xfrm6_daddr_saddr_hash(daddr, saddr); | 103 | h = __xfrm6_daddr_saddr_hash(daddr, saddr); |
104 | break; | 104 | break; |
105 | }; | 105 | } |
106 | h ^= (h >> 16); | 106 | h ^= (h >> 16); |
107 | return h & hmask; | 107 | return h & hmask; |
108 | } | 108 | } |
@@ -119,7 +119,7 @@ static inline unsigned int __addr_hash(xfrm_address_t *daddr, xfrm_address_t *sa | |||
119 | case AF_INET6: | 119 | case AF_INET6: |
120 | h = __xfrm6_daddr_saddr_hash(daddr, saddr); | 120 | h = __xfrm6_daddr_saddr_hash(daddr, saddr); |
121 | break; | 121 | break; |
122 | }; | 122 | } |
123 | h ^= (h >> 16); | 123 | h ^= (h >> 16); |
124 | return h & hmask; | 124 | return h & hmask; |
125 | } | 125 | } |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 6a329158bdfa..a3cca0a94346 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -95,13 +95,13 @@ resume: | |||
95 | goto error_nolock; | 95 | goto error_nolock; |
96 | } | 96 | } |
97 | 97 | ||
98 | dst = dst_pop(dst); | 98 | dst = skb_dst_pop(skb); |
99 | if (!dst) { | 99 | if (!dst) { |
100 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); | 100 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); |
101 | err = -EHOSTUNREACH; | 101 | err = -EHOSTUNREACH; |
102 | goto error_nolock; | 102 | goto error_nolock; |
103 | } | 103 | } |
104 | skb_dst_set(skb, dst); | 104 | skb_dst_set_noref(skb, dst); |
105 | x = dst->xfrm; | 105 | x = dst->xfrm; |
106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); | 106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); |
107 | 107 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 31f4ba43b48f..4bf27d901333 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1805,7 +1805,7 @@ restart: | |||
1805 | /* EREMOTE tells the caller to generate | 1805 | /* EREMOTE tells the caller to generate |
1806 | * a one-shot blackhole route. */ | 1806 | * a one-shot blackhole route. */ |
1807 | dst_release(dst); | 1807 | dst_release(dst); |
1808 | xfrm_pols_put(pols, num_pols); | 1808 | xfrm_pols_put(pols, drop_pols); |
1809 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | 1809 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); |
1810 | return -EREMOTE; | 1810 | return -EREMOTE; |
1811 | } | 1811 | } |
@@ -2153,6 +2153,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) | |||
2153 | return 0; | 2153 | return 0; |
2154 | } | 2154 | } |
2155 | 2155 | ||
2156 | skb_dst_force(skb); | ||
2156 | dst = skb_dst(skb); | 2157 | dst = skb_dst(skb); |
2157 | 2158 | ||
2158 | res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; | 2159 | res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; |
@@ -2209,7 +2210,6 @@ EXPORT_SYMBOL(xfrm_dst_ifdown); | |||
2209 | static void xfrm_link_failure(struct sk_buff *skb) | 2210 | static void xfrm_link_failure(struct sk_buff *skb) |
2210 | { | 2211 | { |
2211 | /* Impossible. Such dst must be popped before reaches point of failure. */ | 2212 | /* Impossible. Such dst must be popped before reaches point of failure. */ |
2212 | return; | ||
2213 | } | 2213 | } |
2214 | 2214 | ||
2215 | static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) | 2215 | static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index a267fbdda525..ba59983aaffe 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1783,7 +1783,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1783 | 1783 | ||
1784 | } else { | 1784 | } else { |
1785 | // reset the timers here? | 1785 | // reset the timers here? |
1786 | printk("Dont know what to do with soft policy expire\n"); | 1786 | WARN(1, "Dont know what to do with soft policy expire\n"); |
1787 | } | 1787 | } |
1788 | km_policy_expired(xp, p->dir, up->hard, current->pid); | 1788 | km_policy_expired(xp, p->dir, up->hard, current->pid); |
1789 | 1789 | ||
@@ -1883,7 +1883,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1883 | return 0; | 1883 | return 0; |
1884 | 1884 | ||
1885 | bad_policy: | 1885 | bad_policy: |
1886 | printk("BAD policy passed\n"); | 1886 | WARN(1, "BAD policy passed\n"); |
1887 | free_state: | 1887 | free_state: |
1888 | kfree(x); | 1888 | kfree(x); |
1889 | nomem: | 1889 | nomem: |
@@ -2385,8 +2385,9 @@ static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c) | |||
2385 | case XFRM_MSG_FLUSHSA: | 2385 | case XFRM_MSG_FLUSHSA: |
2386 | return xfrm_notify_sa_flush(c); | 2386 | return xfrm_notify_sa_flush(c); |
2387 | default: | 2387 | default: |
2388 | printk("xfrm_user: Unknown SA event %d\n", c->event); | 2388 | printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", |
2389 | break; | 2389 | c->event); |
2390 | break; | ||
2390 | } | 2391 | } |
2391 | 2392 | ||
2392 | return 0; | 2393 | return 0; |
@@ -2676,7 +2677,8 @@ static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_ev | |||
2676 | case XFRM_MSG_POLEXPIRE: | 2677 | case XFRM_MSG_POLEXPIRE: |
2677 | return xfrm_exp_policy_notify(xp, dir, c); | 2678 | return xfrm_exp_policy_notify(xp, dir, c); |
2678 | default: | 2679 | default: |
2679 | printk("xfrm_user: Unknown Policy event %d\n", c->event); | 2680 | printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", |
2681 | c->event); | ||
2680 | } | 2682 | } |
2681 | 2683 | ||
2682 | return 0; | 2684 | return 0; |