diff options
Diffstat (limited to 'net')
121 files changed, 342 insertions, 279 deletions
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index d1314cf18adf..d940c49d168a 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c | |||
| @@ -54,7 +54,7 @@ static const char name_conf[] = "config"; | |||
| 54 | 54 | ||
| 55 | /* | 55 | /* |
| 56 | * Structures for interfacing with the /proc filesystem. | 56 | * Structures for interfacing with the /proc filesystem. |
| 57 | * VLAN creates its own directory /proc/net/vlan with the folowing | 57 | * VLAN creates its own directory /proc/net/vlan with the following |
| 58 | * entries: | 58 | * entries: |
| 59 | * config device status/configuration | 59 | * config device status/configuration |
| 60 | * <device> entry for each device | 60 | * <device> entry for each device |
diff --git a/net/9p/client.c b/net/9p/client.c index 2ccbf04d37df..48b8e084e710 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
| @@ -178,7 +178,7 @@ free_and_return: | |||
| 178 | * @tag: numeric id for transaction | 178 | * @tag: numeric id for transaction |
| 179 | * | 179 | * |
| 180 | * this is a simple array lookup, but will grow the | 180 | * this is a simple array lookup, but will grow the |
| 181 | * request_slots as necessary to accomodate transaction | 181 | * request_slots as necessary to accommodate transaction |
| 182 | * ids which did not previously have a slot. | 182 | * ids which did not previously have a slot. |
| 183 | * | 183 | * |
| 184 | * this code relies on the client spinlock to manage locks, its | 184 | * this code relies on the client spinlock to manage locks, its |
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index 9172ab78fcb0..d47880e971dd 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c | |||
| @@ -36,7 +36,7 @@ p9_release_req_pages(struct trans_rpage_info *rpinfo) | |||
| 36 | EXPORT_SYMBOL(p9_release_req_pages); | 36 | EXPORT_SYMBOL(p9_release_req_pages); |
| 37 | 37 | ||
| 38 | /** | 38 | /** |
| 39 | * p9_nr_pages - Return number of pages needed to accomodate the payload. | 39 | * p9_nr_pages - Return number of pages needed to accommodate the payload. |
| 40 | */ | 40 | */ |
| 41 | int | 41 | int |
| 42 | p9_nr_pages(struct p9_req_t *req) | 42 | p9_nr_pages(struct p9_req_t *req) |
| @@ -55,7 +55,7 @@ EXPORT_SYMBOL(p9_nr_pages); | |||
| 55 | * @req: Request to be sent to server. | 55 | * @req: Request to be sent to server. |
| 56 | * @pdata_off: data offset into the first page after translation (gup). | 56 | * @pdata_off: data offset into the first page after translation (gup). |
| 57 | * @pdata_len: Total length of the IO. gup may not return requested # of pages. | 57 | * @pdata_len: Total length of the IO. gup may not return requested # of pages. |
| 58 | * @nr_pages: number of pages to accomodate the payload | 58 | * @nr_pages: number of pages to accommodate the payload |
| 59 | * @rw: Indicates if the pages are for read or write. | 59 | * @rw: Indicates if the pages are for read or write. |
| 60 | */ | 60 | */ |
| 61 | int | 61 | int |
diff --git a/net/9p/util.c b/net/9p/util.c index b84619b5ba22..da6af81e59d9 100644 --- a/net/9p/util.c +++ b/net/9p/util.c | |||
| @@ -67,7 +67,7 @@ EXPORT_SYMBOL(p9_idpool_create); | |||
| 67 | 67 | ||
| 68 | /** | 68 | /** |
| 69 | * p9_idpool_destroy - create a new per-connection id pool | 69 | * p9_idpool_destroy - create a new per-connection id pool |
| 70 | * @p: idpool to destory | 70 | * @p: idpool to destroy |
| 71 | */ | 71 | */ |
| 72 | 72 | ||
| 73 | void p9_idpool_destroy(struct p9_idpool *p) | 73 | void p9_idpool_destroy(struct p9_idpool *p) |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index fce2eae8d476..2252c2085dac 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
| @@ -509,7 +509,7 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) | |||
| 509 | write_lock_irq(&devs_lock); | 509 | write_lock_irq(&devs_lock); |
| 510 | net_dev = br2684_find_dev(&be.ifspec); | 510 | net_dev = br2684_find_dev(&be.ifspec); |
| 511 | if (net_dev == NULL) { | 511 | if (net_dev == NULL) { |
| 512 | pr_err("tried to attach to non-existant device\n"); | 512 | pr_err("tried to attach to non-existent device\n"); |
| 513 | err = -ENXIO; | 513 | err = -ENXIO; |
| 514 | goto error; | 514 | goto error; |
| 515 | } | 515 | } |
diff --git a/net/atm/lec.h b/net/atm/lec.h index 9d14d196cc1d..dfc071966463 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h | |||
| @@ -35,7 +35,7 @@ struct lecdatahdr_8025 { | |||
| 35 | * Operations that LANE2 capable device can do. Two first functions | 35 | * Operations that LANE2 capable device can do. Two first functions |
| 36 | * are used to make the device do things. See spec 3.1.3 and 3.1.4. | 36 | * are used to make the device do things. See spec 3.1.3 and 3.1.4. |
| 37 | * | 37 | * |
| 38 | * The third function is intented for the MPOA component sitting on | 38 | * The third function is intended for the MPOA component sitting on |
| 39 | * top of the LANE device. The MPOA component assigns it's own function | 39 | * top of the LANE device. The MPOA component assigns it's own function |
| 40 | * to (*associate_indicator)() and the LANE device will use that | 40 | * to (*associate_indicator)() and the LANE device will use that |
| 41 | * function to tell about TLVs it sees floating through. | 41 | * function to tell about TLVs it sees floating through. |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 9ed26140a269..824e1f6e50f2 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
| @@ -474,7 +474,7 @@ void interface_rx(struct net_device *soft_iface, | |||
| 474 | goto dropped; | 474 | goto dropped; |
| 475 | skb->protocol = eth_type_trans(skb, soft_iface); | 475 | skb->protocol = eth_type_trans(skb, soft_iface); |
| 476 | 476 | ||
| 477 | /* should not be neccesary anymore as we use skb_pull_rcsum() | 477 | /* should not be necessary anymore as we use skb_pull_rcsum() |
| 478 | * TODO: please verify this and remove this TODO | 478 | * TODO: please verify this and remove this TODO |
| 479 | * -- Dec 21st 2009, Simon Wunderlich */ | 479 | * -- Dec 21st 2009, Simon Wunderlich */ |
| 480 | 480 | ||
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2216620ff296..c83f618282f7 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
| @@ -1883,7 +1883,7 @@ static void hci_tx_task(unsigned long arg) | |||
| 1883 | read_unlock(&hci_task_lock); | 1883 | read_unlock(&hci_task_lock); |
| 1884 | } | 1884 | } |
| 1885 | 1885 | ||
| 1886 | /* ----- HCI RX task (incoming data proccessing) ----- */ | 1886 | /* ----- HCI RX task (incoming data processing) ----- */ |
| 1887 | 1887 | ||
| 1888 | /* ACL data packet */ | 1888 | /* ACL data packet */ |
| 1889 | static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) | 1889 | static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) |
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index f77308e63e58..299fe56a9668 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c | |||
| @@ -679,7 +679,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
| 679 | 679 | ||
| 680 | if (opt == BT_FLUSHABLE_OFF) { | 680 | if (opt == BT_FLUSHABLE_OFF) { |
| 681 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | 681 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; |
| 682 | /* proceed futher only when we have l2cap_conn and | 682 | /* proceed further only when we have l2cap_conn and |
| 683 | No Flush support in the LM */ | 683 | No Flush support in the LM */ |
| 684 | if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) { | 684 | if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) { |
| 685 | err = -EINVAL; | 685 | err = -EINVAL; |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 88485cc74dc3..cc4d3c5ab1c6 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
| @@ -169,7 +169,7 @@ void br_fdb_flush(struct net_bridge *br) | |||
| 169 | spin_unlock_bh(&br->hash_lock); | 169 | spin_unlock_bh(&br->hash_lock); |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | /* Flush all entries refering to a specific port. | 172 | /* Flush all entries referring to a specific port. |
| 173 | * if do_all is set also flush static entries | 173 | * if do_all is set also flush static entries |
| 174 | */ | 174 | */ |
| 175 | void br_fdb_delete_by_port(struct net_bridge *br, | 175 | void br_fdb_delete_by_port(struct net_bridge *br, |
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index cb43312b846e..3d9fca0e3370 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c | |||
| @@ -106,7 +106,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd) | |||
| 106 | /* | 106 | /* |
| 107 | * Legacy ioctl's through SIOCDEVPRIVATE | 107 | * Legacy ioctl's through SIOCDEVPRIVATE |
| 108 | * This interface is deprecated because it was too difficult to | 108 | * This interface is deprecated because it was too difficult to |
| 109 | * to do the translation for 32/64bit ioctl compatability. | 109 | * to do the translation for 32/64bit ioctl compatibility. |
| 110 | */ | 110 | */ |
| 111 | static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 111 | static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
| 112 | { | 112 | { |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 8184c031d028..37a4034dfc29 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
| @@ -852,7 +852,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, | |||
| 852 | sock->state = SS_CONNECTING; | 852 | sock->state = SS_CONNECTING; |
| 853 | sk->sk_state = CAIF_CONNECTING; | 853 | sk->sk_state = CAIF_CONNECTING; |
| 854 | 854 | ||
| 855 | /* Check priority value comming from socket */ | 855 | /* Check priority value coming from socket */ |
| 856 | /* if priority value is out of range it will be ajusted */ | 856 | /* if priority value is out of range it will be ajusted */ |
| 857 | if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) | 857 | if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) |
| 858 | cf_sk->conn_req.priority = CAIF_PRIO_MAX; | 858 | cf_sk->conn_req.priority = CAIF_PRIO_MAX; |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 871a0ad51025..57b1aed79014 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
| @@ -387,7 +387,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data) | |||
| 387 | } | 387 | } |
| 388 | 388 | ||
| 389 | /* | 389 | /* |
| 390 | * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions | 390 | * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions |
| 391 | */ | 391 | */ |
| 392 | static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) | 392 | static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) |
| 393 | { | 393 | { |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 3b91d651fe08..50af02737a3d 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
| @@ -917,7 +917,7 @@ EXPORT_SYMBOL(ceph_osdc_set_request_linger); | |||
| 917 | /* | 917 | /* |
| 918 | * Pick an osd (the first 'up' osd in the pg), allocate the osd struct | 918 | * Pick an osd (the first 'up' osd in the pg), allocate the osd struct |
| 919 | * (as needed), and set the request r_osd appropriately. If there is | 919 | * (as needed), and set the request r_osd appropriately. If there is |
| 920 | * no up osd, set r_osd to NULL. Move the request to the appropiate list | 920 | * no up osd, set r_osd to NULL. Move the request to the appropriate list |
| 921 | * (unsent, homeless) or leave on in-flight lru. | 921 | * (unsent, homeless) or leave on in-flight lru. |
| 922 | * | 922 | * |
| 923 | * Return 0 if unchanged, 1 if changed, or negative on error. | 923 | * Return 0 if unchanged, 1 if changed, or negative on error. |
diff --git a/net/core/dev.c b/net/core/dev.c index 3da9fb06d47a..956d3b006e8b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2091,7 +2091,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 2091 | u32 features; | 2091 | u32 features; |
| 2092 | 2092 | ||
| 2093 | /* | 2093 | /* |
| 2094 | * If device doesnt need skb->dst, release it right now while | 2094 | * If device doesn't need skb->dst, release it right now while |
| 2095 | * its hot in this cpu cache | 2095 | * its hot in this cpu cache |
| 2096 | */ | 2096 | */ |
| 2097 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | 2097 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) |
| @@ -2151,7 +2151,7 @@ gso: | |||
| 2151 | nskb->next = NULL; | 2151 | nskb->next = NULL; |
| 2152 | 2152 | ||
| 2153 | /* | 2153 | /* |
| 2154 | * If device doesnt need nskb->dst, release it right now while | 2154 | * If device doesn't need nskb->dst, release it right now while |
| 2155 | * its hot in this cpu cache | 2155 | * its hot in this cpu cache |
| 2156 | */ | 2156 | */ |
| 2157 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | 2157 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) |
| @@ -2970,8 +2970,8 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); | |||
| 2970 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions | 2970 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions |
| 2971 | * a compare and 2 stores extra right now if we dont have it on | 2971 | * a compare and 2 stores extra right now if we dont have it on |
| 2972 | * but have CONFIG_NET_CLS_ACT | 2972 | * but have CONFIG_NET_CLS_ACT |
| 2973 | * NOTE: This doesnt stop any functionality; if you dont have | 2973 | * NOTE: This doesn't stop any functionality; if you dont have |
| 2974 | * the ingress scheduler, you just cant add policies on ingress. | 2974 | * the ingress scheduler, you just can't add policies on ingress. |
| 2975 | * | 2975 | * |
| 2976 | */ | 2976 | */ |
| 2977 | static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) | 2977 | static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) |
| @@ -3800,7 +3800,7 @@ static void net_rx_action(struct softirq_action *h) | |||
| 3800 | * with netpoll's poll_napi(). Only the entity which | 3800 | * with netpoll's poll_napi(). Only the entity which |
| 3801 | * obtains the lock and sees NAPI_STATE_SCHED set will | 3801 | * obtains the lock and sees NAPI_STATE_SCHED set will |
| 3802 | * actually make the ->poll() call. Therefore we avoid | 3802 | * actually make the ->poll() call. Therefore we avoid |
| 3803 | * accidently calling ->poll() when NAPI is not scheduled. | 3803 | * accidentally calling ->poll() when NAPI is not scheduled. |
| 3804 | */ | 3804 | */ |
| 3805 | work = 0; | 3805 | work = 0; |
| 3806 | if (test_bit(NAPI_STATE_SCHED, &n->state)) { | 3806 | if (test_bit(NAPI_STATE_SCHED, &n->state)) { |
| @@ -6336,7 +6336,7 @@ static void __net_exit default_device_exit(struct net *net) | |||
| 6336 | if (dev->rtnl_link_ops) | 6336 | if (dev->rtnl_link_ops) |
| 6337 | continue; | 6337 | continue; |
| 6338 | 6338 | ||
| 6339 | /* Push remaing network devices to init_net */ | 6339 | /* Push remaining network devices to init_net */ |
| 6340 | snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); | 6340 | snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); |
| 6341 | err = dev_change_net_namespace(dev, &init_net, fb_name); | 6341 | err = dev_change_net_namespace(dev, &init_net, fb_name); |
| 6342 | if (err) { | 6342 | if (err) { |
diff --git a/net/core/filter.c b/net/core/filter.c index 232b1873bb28..afb8afb066bb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -425,7 +425,7 @@ EXPORT_SYMBOL(sk_run_filter); | |||
| 425 | * As we dont want to clear mem[] array for each packet going through | 425 | * As we dont want to clear mem[] array for each packet going through |
| 426 | * sk_run_filter(), we check that filter loaded by user never try to read | 426 | * sk_run_filter(), we check that filter loaded by user never try to read |
| 427 | * a cell if not previously written, and we check all branches to be sure | 427 | * a cell if not previously written, and we check all branches to be sure |
| 428 | * a malicious user doesnt try to abuse us. | 428 | * a malicious user doesn't try to abuse us. |
| 429 | */ | 429 | */ |
| 430 | static int check_load_and_stores(struct sock_filter *filter, int flen) | 430 | static int check_load_and_stores(struct sock_filter *filter, int flen) |
| 431 | { | 431 | { |
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 01a1101b5936..a7b342131869 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
| @@ -129,7 +129,7 @@ static void linkwatch_schedule_work(int urgent) | |||
| 129 | if (!cancel_delayed_work(&linkwatch_work)) | 129 | if (!cancel_delayed_work(&linkwatch_work)) |
| 130 | return; | 130 | return; |
| 131 | 131 | ||
| 132 | /* Otherwise we reschedule it again for immediate exection. */ | 132 | /* Otherwise we reschedule it again for immediate execution. */ |
| 133 | schedule_delayed_work(&linkwatch_work, 0); | 133 | schedule_delayed_work(&linkwatch_work, 0); |
| 134 | } | 134 | } |
| 135 | 135 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 49f7ea5b4c75..d7c4bb4b1820 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -196,7 +196,7 @@ EXPORT_SYMBOL_GPL(__rtnl_register); | |||
| 196 | * as failure of this function is very unlikely, it can only happen due | 196 | * as failure of this function is very unlikely, it can only happen due |
| 197 | * to lack of memory when allocating the chain to store all message | 197 | * to lack of memory when allocating the chain to store all message |
| 198 | * handlers for a protocol. Meant for use in init functions where lack | 198 | * handlers for a protocol. Meant for use in init functions where lack |
| 199 | * of memory implies no sense in continueing. | 199 | * of memory implies no sense in continuing. |
| 200 | */ | 200 | */ |
| 201 | void rtnl_register(int protocol, int msgtype, | 201 | void rtnl_register(int protocol, int msgtype, |
| 202 | rtnl_doit_func doit, rtnl_dumpit_func dumpit) | 202 | rtnl_doit_func doit, rtnl_dumpit_func dumpit) |
| @@ -1440,7 +1440,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
| 1440 | errout: | 1440 | errout: |
| 1441 | if (err < 0 && modified && net_ratelimit()) | 1441 | if (err < 0 && modified && net_ratelimit()) |
| 1442 | printk(KERN_WARNING "A link change request failed with " | 1442 | printk(KERN_WARNING "A link change request failed with " |
| 1443 | "some changes comitted already. Interface %s may " | 1443 | "some changes committed already. Interface %s may " |
| 1444 | "have been left with an inconsistent configuration, " | 1444 | "have been left with an inconsistent configuration, " |
| 1445 | "please check.\n", dev->name); | 1445 | "please check.\n", dev->name); |
| 1446 | 1446 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 801dd08908f9..7ebeed0a877c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -2267,7 +2267,7 @@ EXPORT_SYMBOL(skb_prepare_seq_read); | |||
| 2267 | * of bytes already consumed and the next call to | 2267 | * of bytes already consumed and the next call to |
| 2268 | * skb_seq_read() will return the remaining part of the block. | 2268 | * skb_seq_read() will return the remaining part of the block. |
| 2269 | * | 2269 | * |
| 2270 | * Note 1: The size of each block of data returned can be arbitary, | 2270 | * Note 1: The size of each block of data returned can be arbitrary, |
| 2271 | * this limitation is the cost for zerocopy seqeuental | 2271 | * this limitation is the cost for zerocopy seqeuental |
| 2272 | * reads of potentially non linear data. | 2272 | * reads of potentially non linear data. |
| 2273 | * | 2273 | * |
diff --git a/net/core/sock.c b/net/core/sock.c index 7dfed792434d..6e819780c232 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -215,7 +215,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; | |||
| 215 | __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; | 215 | __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; |
| 216 | __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; | 216 | __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; |
| 217 | 217 | ||
| 218 | /* Maximal space eaten by iovec or ancilliary data plus some space */ | 218 | /* Maximal space eaten by iovec or ancillary data plus some space */ |
| 219 | int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); | 219 | int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); |
| 220 | EXPORT_SYMBOL(sysctl_optmem_max); | 220 | EXPORT_SYMBOL(sysctl_optmem_max); |
| 221 | 221 | ||
| @@ -1175,7 +1175,7 @@ static void __sk_free(struct sock *sk) | |||
| 1175 | void sk_free(struct sock *sk) | 1175 | void sk_free(struct sock *sk) |
| 1176 | { | 1176 | { |
| 1177 | /* | 1177 | /* |
| 1178 | * We substract one from sk_wmem_alloc and can know if | 1178 | * We subtract one from sk_wmem_alloc and can know if |
| 1179 | * some packets are still in some tx queue. | 1179 | * some packets are still in some tx queue. |
| 1180 | * If not null, sock_wfree() will call __sk_free(sk) later | 1180 | * If not null, sock_wfree() will call __sk_free(sk) later |
| 1181 | */ | 1181 | */ |
| @@ -1185,10 +1185,10 @@ void sk_free(struct sock *sk) | |||
| 1185 | EXPORT_SYMBOL(sk_free); | 1185 | EXPORT_SYMBOL(sk_free); |
| 1186 | 1186 | ||
| 1187 | /* | 1187 | /* |
| 1188 | * Last sock_put should drop referrence to sk->sk_net. It has already | 1188 | * Last sock_put should drop reference to sk->sk_net. It has already |
| 1189 | * been dropped in sk_change_net. Taking referrence to stopping namespace | 1189 | * been dropped in sk_change_net. Taking reference to stopping namespace |
| 1190 | * is not an option. | 1190 | * is not an option. |
| 1191 | * Take referrence to a socket to remove it from hash _alive_ and after that | 1191 | * Take reference to a socket to remove it from hash _alive_ and after that |
| 1192 | * destroy it in the context of init_net. | 1192 | * destroy it in the context of init_net. |
| 1193 | */ | 1193 | */ |
| 1194 | void sk_release_kernel(struct sock *sk) | 1194 | void sk_release_kernel(struct sock *sk) |
diff --git a/net/dccp/output.c b/net/dccp/output.c index 784d30210543..136d41cbcd02 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
| @@ -143,7 +143,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | /** | 145 | /** |
| 146 | * dccp_determine_ccmps - Find out about CCID-specfic packet-size limits | 146 | * dccp_determine_ccmps - Find out about CCID-specific packet-size limits |
| 147 | * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.), | 147 | * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.), |
| 148 | * since the RX CCID is restricted to feedback packets (Acks), which are small | 148 | * since the RX CCID is restricted to feedback packets (Acks), which are small |
| 149 | * in comparison with the data traffic. A value of 0 means "no current CCMPS". | 149 | * in comparison with the data traffic. A value of 0 means "no current CCMPS". |
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c index bb2b41bc854e..3da418894efc 100644 --- a/net/dsa/mv88e6131.c +++ b/net/dsa/mv88e6131.c | |||
| @@ -14,6 +14,13 @@ | |||
| 14 | #include "dsa_priv.h" | 14 | #include "dsa_priv.h" |
| 15 | #include "mv88e6xxx.h" | 15 | #include "mv88e6xxx.h" |
| 16 | 16 | ||
| 17 | /* | ||
| 18 | * Switch product IDs | ||
| 19 | */ | ||
| 20 | #define ID_6085 0x04a0 | ||
| 21 | #define ID_6095 0x0950 | ||
| 22 | #define ID_6131 0x1060 | ||
| 23 | |||
| 17 | static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) | 24 | static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) |
| 18 | { | 25 | { |
| 19 | int ret; | 26 | int ret; |
| @@ -21,9 +28,11 @@ static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) | |||
| 21 | ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); | 28 | ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); |
| 22 | if (ret >= 0) { | 29 | if (ret >= 0) { |
| 23 | ret &= 0xfff0; | 30 | ret &= 0xfff0; |
| 24 | if (ret == 0x0950) | 31 | if (ret == ID_6085) |
| 32 | return "Marvell 88E6085"; | ||
| 33 | if (ret == ID_6095) | ||
| 25 | return "Marvell 88E6095/88E6095F"; | 34 | return "Marvell 88E6095/88E6095F"; |
| 26 | if (ret == 0x1060) | 35 | if (ret == ID_6131) |
| 27 | return "Marvell 88E6131"; | 36 | return "Marvell 88E6131"; |
| 28 | } | 37 | } |
| 29 | 38 | ||
| @@ -124,7 +133,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) | |||
| 124 | * Ignore removed tag data on doubly tagged packets, disable | 133 | * Ignore removed tag data on doubly tagged packets, disable |
| 125 | * flow control messages, force flow control priority to the | 134 | * flow control messages, force flow control priority to the |
| 126 | * highest, and send all special multicast frames to the CPU | 135 | * highest, and send all special multicast frames to the CPU |
| 127 | * port at the higest priority. | 136 | * port at the highest priority. |
| 128 | */ | 137 | */ |
| 129 | REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); | 138 | REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); |
| 130 | 139 | ||
| @@ -164,6 +173,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) | |||
| 164 | 173 | ||
| 165 | static int mv88e6131_setup_port(struct dsa_switch *ds, int p) | 174 | static int mv88e6131_setup_port(struct dsa_switch *ds, int p) |
| 166 | { | 175 | { |
| 176 | struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); | ||
| 167 | int addr = REG_PORT(p); | 177 | int addr = REG_PORT(p); |
| 168 | u16 val; | 178 | u16 val; |
| 169 | 179 | ||
| @@ -171,10 +181,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) | |||
| 171 | * MAC Forcing register: don't force link, speed, duplex | 181 | * MAC Forcing register: don't force link, speed, duplex |
| 172 | * or flow control state to any particular values on physical | 182 | * or flow control state to any particular values on physical |
| 173 | * ports, but force the CPU port and all DSA ports to 1000 Mb/s | 183 | * ports, but force the CPU port and all DSA ports to 1000 Mb/s |
| 174 | * full duplex. | 184 | * (100 Mb/s on 6085) full duplex. |
| 175 | */ | 185 | */ |
| 176 | if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) | 186 | if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) |
| 177 | REG_WRITE(addr, 0x01, 0x003e); | 187 | if (ps->id == ID_6085) |
| 188 | REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */ | ||
| 189 | else | ||
| 190 | REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */ | ||
| 178 | else | 191 | else |
| 179 | REG_WRITE(addr, 0x01, 0x0003); | 192 | REG_WRITE(addr, 0x01, 0x0003); |
| 180 | 193 | ||
| @@ -286,6 +299,8 @@ static int mv88e6131_setup(struct dsa_switch *ds) | |||
| 286 | mv88e6xxx_ppu_state_init(ds); | 299 | mv88e6xxx_ppu_state_init(ds); |
| 287 | mutex_init(&ps->stats_mutex); | 300 | mutex_init(&ps->stats_mutex); |
| 288 | 301 | ||
| 302 | ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; | ||
| 303 | |||
| 289 | ret = mv88e6131_switch_reset(ds); | 304 | ret = mv88e6131_switch_reset(ds); |
| 290 | if (ret < 0) | 305 | if (ret < 0) |
| 291 | return ret; | 306 | return ret; |
diff --git a/net/dsa/mv88e6xxx.h b/net/dsa/mv88e6xxx.h index eb0e0aaa9f1b..61156ca26a0d 100644 --- a/net/dsa/mv88e6xxx.h +++ b/net/dsa/mv88e6xxx.h | |||
| @@ -39,6 +39,8 @@ struct mv88e6xxx_priv_state { | |||
| 39 | * Hold this mutex over snapshot + dump sequences. | 39 | * Hold this mutex over snapshot + dump sequences. |
| 40 | */ | 40 | */ |
| 41 | struct mutex stats_mutex; | 41 | struct mutex stats_mutex; |
| 42 | |||
| 43 | int id; /* switch product id */ | ||
| 42 | }; | 44 | }; |
| 43 | 45 | ||
| 44 | struct mv88e6xxx_hw_stat { | 46 | struct mv88e6xxx_hw_stat { |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 094e150c6260..a0af7ea87870 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
| @@ -112,7 +112,7 @@ int cipso_v4_rbm_strictvalid = 1; | |||
| 112 | /* The maximum number of category ranges permitted in the ranged category tag | 112 | /* The maximum number of category ranges permitted in the ranged category tag |
| 113 | * (tag #5). You may note that the IETF draft states that the maximum number | 113 | * (tag #5). You may note that the IETF draft states that the maximum number |
| 114 | * of category ranges is 7, but if the low end of the last category range is | 114 | * of category ranges is 7, but if the low end of the last category range is |
| 115 | * zero then it is possibile to fit 8 category ranges because the zero should | 115 | * zero then it is possible to fit 8 category ranges because the zero should |
| 116 | * be omitted. */ | 116 | * be omitted. */ |
| 117 | #define CIPSO_V4_TAG_RNG_CAT_MAX 8 | 117 | #define CIPSO_V4_TAG_RNG_CAT_MAX 8 |
| 118 | 118 | ||
| @@ -438,7 +438,7 @@ cache_add_failure: | |||
| 438 | * | 438 | * |
| 439 | * Description: | 439 | * Description: |
| 440 | * Search the DOI definition list for a DOI definition with a DOI value that | 440 | * Search the DOI definition list for a DOI definition with a DOI value that |
| 441 | * matches @doi. The caller is responsibile for calling rcu_read_[un]lock(). | 441 | * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). |
| 442 | * Returns a pointer to the DOI definition on success and NULL on failure. | 442 | * Returns a pointer to the DOI definition on success and NULL on failure. |
| 443 | */ | 443 | */ |
| 444 | static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) | 444 | static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) |
| @@ -1293,7 +1293,7 @@ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, | |||
| 1293 | return ret_val; | 1293 | return ret_val; |
| 1294 | 1294 | ||
| 1295 | /* This will send packets using the "optimized" format when | 1295 | /* This will send packets using the "optimized" format when |
| 1296 | * possibile as specified in section 3.4.2.6 of the | 1296 | * possible as specified in section 3.4.2.6 of the |
| 1297 | * CIPSO draft. */ | 1297 | * CIPSO draft. */ |
| 1298 | if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) | 1298 | if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) |
| 1299 | tag_len = 14; | 1299 | tag_len = 14; |
| @@ -1752,7 +1752,7 @@ validate_return: | |||
| 1752 | } | 1752 | } |
| 1753 | 1753 | ||
| 1754 | /** | 1754 | /** |
| 1755 | * cipso_v4_error - Send the correct reponse for a bad packet | 1755 | * cipso_v4_error - Send the correct response for a bad packet |
| 1756 | * @skb: the packet | 1756 | * @skb: the packet |
| 1757 | * @error: the error code | 1757 | * @error: the error code |
| 1758 | * @gateway: CIPSO gateway flag | 1758 | * @gateway: CIPSO gateway flag |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index b92c86f6e9b3..e9013d6c1f51 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | * | 12 | * |
| 13 | * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet | 13 | * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet |
| 14 | * | 14 | * |
| 15 | * This work is based on the LPC-trie which is originally descibed in: | 15 | * This work is based on the LPC-trie which is originally described in: |
| 16 | * | 16 | * |
| 17 | * An experimental study of compression methods for dynamic tries | 17 | * An experimental study of compression methods for dynamic tries |
| 18 | * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. | 18 | * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index a91dc1611081..e5f8a71d3a2a 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
| @@ -704,7 +704,7 @@ static void icmp_unreach(struct sk_buff *skb) | |||
| 704 | */ | 704 | */ |
| 705 | 705 | ||
| 706 | /* | 706 | /* |
| 707 | * Check the other end isnt violating RFC 1122. Some routers send | 707 | * Check the other end isn't violating RFC 1122. Some routers send |
| 708 | * bogus responses to broadcast frames. If you see this message | 708 | * bogus responses to broadcast frames. If you see this message |
| 709 | * first check your netmask matches at both ends, if it does then | 709 | * first check your netmask matches at both ends, if it does then |
| 710 | * get the other vendor to fix their kit. | 710 | * get the other vendor to fix their kit. |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 67f241b97649..459c011b1d4a 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -603,7 +603,7 @@ slow_path: | |||
| 603 | /* IF: it doesn't fit, use 'mtu' - the data space left */ | 603 | /* IF: it doesn't fit, use 'mtu' - the data space left */ |
| 604 | if (len > mtu) | 604 | if (len > mtu) |
| 605 | len = mtu; | 605 | len = mtu; |
| 606 | /* IF: we are not sending upto and including the packet end | 606 | /* IF: we are not sending up to and including the packet end |
| 607 | then align the next start on an eight byte boundary */ | 607 | then align the next start on an eight byte boundary */ |
| 608 | if (len < left) { | 608 | if (len < left) { |
| 609 | len &= ~7; | 609 | len &= ~7; |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 2b097752426b..cbff2ecccf3d 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
| @@ -1444,7 +1444,7 @@ static int __init ip_auto_config(void) | |||
| 1444 | root_server_addr = addr; | 1444 | root_server_addr = addr; |
| 1445 | 1445 | ||
| 1446 | /* | 1446 | /* |
| 1447 | * Use defaults whereever applicable. | 1447 | * Use defaults wherever applicable. |
| 1448 | */ | 1448 | */ |
| 1449 | if (ic_defaults() < 0) | 1449 | if (ic_defaults() < 0) |
| 1450 | return -1; | 1450 | return -1; |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index f3c0b549b8e1..4614babdc45f 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
| @@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, | |||
| 221 | return csum; | 221 | return csum; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | static int nf_ip_route(struct dst_entry **dst, struct flowi *fl) | 224 | static int nf_ip_route(struct net *net, struct dst_entry **dst, |
| 225 | struct flowi *fl, bool strict __always_unused) | ||
| 225 | { | 226 | { |
| 226 | struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4); | 227 | struct rtable *rt = ip_route_output_key(net, &fl->u.ip4); |
| 227 | if (IS_ERR(rt)) | 228 | if (IS_ERR(rt)) |
| 228 | return PTR_ERR(rt); | 229 | return PTR_ERR(rt); |
| 229 | *dst = &rt->dst; | 230 | *dst = &rt->dst; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 4b5d457c2d76..89bc7e66d598 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
| @@ -76,7 +76,7 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, | |||
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | /* | 78 | /* |
| 79 | * Unfortunatly, _b and _mask are not aligned to an int (or long int) | 79 | * Unfortunately, _b and _mask are not aligned to an int (or long int) |
| 80 | * Some arches dont care, unrolling the loop is a win on them. | 80 | * Some arches dont care, unrolling the loop is a win on them. |
| 81 | * For other arches, we only have a 16bit alignement. | 81 | * For other arches, we only have a 16bit alignement. |
| 82 | */ | 82 | */ |
| @@ -1874,7 +1874,7 @@ static int __init arp_tables_init(void) | |||
| 1874 | if (ret < 0) | 1874 | if (ret < 0) |
| 1875 | goto err1; | 1875 | goto err1; |
| 1876 | 1876 | ||
| 1877 | /* Noone else will be downing sem now, so we won't sleep */ | 1877 | /* No one else will be downing sem now, so we won't sleep */ |
| 1878 | ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); | 1878 | ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); |
| 1879 | if (ret < 0) | 1879 | if (ret < 0) |
| 1880 | goto err2; | 1880 | goto err2; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index ffcea0d1678e..704915028009 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
| @@ -2233,7 +2233,7 @@ static int __init ip_tables_init(void) | |||
| 2233 | if (ret < 0) | 2233 | if (ret < 0) |
| 2234 | goto err1; | 2234 | goto err1; |
| 2235 | 2235 | ||
| 2236 | /* Noone else will be downing sem now, so we won't sleep */ | 2236 | /* No one else will be downing sem now, so we won't sleep */ |
| 2237 | ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); | 2237 | ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); |
| 2238 | if (ret < 0) | 2238 | if (ret < 0) |
| 2239 | goto err2; | 2239 | goto err2; |
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 21bcf471b25a..9c71b2755ce3 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
| @@ -521,7 +521,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto) | |||
| 521 | } | 521 | } |
| 522 | EXPORT_SYMBOL(nf_nat_protocol_register); | 522 | EXPORT_SYMBOL(nf_nat_protocol_register); |
| 523 | 523 | ||
| 524 | /* Noone stores the protocol anywhere; simply delete it. */ | 524 | /* No one stores the protocol anywhere; simply delete it. */ |
| 525 | void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto) | 525 | void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto) |
| 526 | { | 526 | { |
| 527 | spin_lock_bh(&nf_nat_lock); | 527 | spin_lock_bh(&nf_nat_lock); |
| @@ -532,7 +532,7 @@ void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto) | |||
| 532 | } | 532 | } |
| 533 | EXPORT_SYMBOL(nf_nat_protocol_unregister); | 533 | EXPORT_SYMBOL(nf_nat_protocol_unregister); |
| 534 | 534 | ||
| 535 | /* Noone using conntrack by the time this called. */ | 535 | /* No one using conntrack by the time this called. */ |
| 536 | static void nf_nat_cleanup_conntrack(struct nf_conn *ct) | 536 | static void nf_nat_cleanup_conntrack(struct nf_conn *ct) |
| 537 | { | 537 | { |
| 538 | struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); | 538 | struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 2d3c72e5bbbf..bceaec42c37d 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
| @@ -622,7 +622,7 @@ do_confirm: | |||
| 622 | static void raw_close(struct sock *sk, long timeout) | 622 | static void raw_close(struct sock *sk, long timeout) |
| 623 | { | 623 | { |
| 624 | /* | 624 | /* |
| 625 | * Raw sockets may have direct kernel refereneces. Kill them. | 625 | * Raw sockets may have direct kernel references. Kill them. |
| 626 | */ | 626 | */ |
| 627 | ip_ra_control(sk, 0, NULL); | 627 | ip_ra_control(sk, 0, NULL); |
| 628 | 628 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 4b0c81180804..c1acf69858fd 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -821,7 +821,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth) | |||
| 821 | } | 821 | } |
| 822 | 822 | ||
| 823 | /* | 823 | /* |
| 824 | * Pertubation of rt_genid by a small quantity [1..256] | 824 | * Perturbation of rt_genid by a small quantity [1..256] |
| 825 | * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() | 825 | * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() |
| 826 | * many times (2^24) without giving recent rt_genid. | 826 | * many times (2^24) without giving recent rt_genid. |
| 827 | * Jenkins hash is strong enough that litle changes of rt_genid are OK. | 827 | * Jenkins hash is strong enough that litle changes of rt_genid are OK. |
| @@ -1191,7 +1191,7 @@ restart: | |||
| 1191 | #endif | 1191 | #endif |
| 1192 | /* | 1192 | /* |
| 1193 | * Since lookup is lockfree, we must make sure | 1193 | * Since lookup is lockfree, we must make sure |
| 1194 | * previous writes to rt are comitted to memory | 1194 | * previous writes to rt are committed to memory |
| 1195 | * before making rt visible to other CPUS. | 1195 | * before making rt visible to other CPUS. |
| 1196 | */ | 1196 | */ |
| 1197 | rcu_assign_pointer(rt_hash_table[hash].chain, rt); | 1197 | rcu_assign_pointer(rt_hash_table[hash].chain, rt); |
| @@ -1891,6 +1891,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
| 1891 | #ifdef CONFIG_IP_ROUTE_CLASSID | 1891 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 1892 | rth->dst.tclassid = itag; | 1892 | rth->dst.tclassid = itag; |
| 1893 | #endif | 1893 | #endif |
| 1894 | rth->rt_route_iif = dev->ifindex; | ||
| 1894 | rth->rt_iif = dev->ifindex; | 1895 | rth->rt_iif = dev->ifindex; |
| 1895 | rth->dst.dev = init_net.loopback_dev; | 1896 | rth->dst.dev = init_net.loopback_dev; |
| 1896 | dev_hold(rth->dst.dev); | 1897 | dev_hold(rth->dst.dev); |
| @@ -2026,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
| 2026 | rth->rt_key_src = saddr; | 2027 | rth->rt_key_src = saddr; |
| 2027 | rth->rt_src = saddr; | 2028 | rth->rt_src = saddr; |
| 2028 | rth->rt_gateway = daddr; | 2029 | rth->rt_gateway = daddr; |
| 2030 | rth->rt_route_iif = in_dev->dev->ifindex; | ||
| 2029 | rth->rt_iif = in_dev->dev->ifindex; | 2031 | rth->rt_iif = in_dev->dev->ifindex; |
| 2030 | rth->dst.dev = (out_dev)->dev; | 2032 | rth->dst.dev = (out_dev)->dev; |
| 2031 | dev_hold(rth->dst.dev); | 2033 | dev_hold(rth->dst.dev); |
| @@ -2202,6 +2204,7 @@ local_input: | |||
| 2202 | #ifdef CONFIG_IP_ROUTE_CLASSID | 2204 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 2203 | rth->dst.tclassid = itag; | 2205 | rth->dst.tclassid = itag; |
| 2204 | #endif | 2206 | #endif |
| 2207 | rth->rt_route_iif = dev->ifindex; | ||
| 2205 | rth->rt_iif = dev->ifindex; | 2208 | rth->rt_iif = dev->ifindex; |
| 2206 | rth->dst.dev = net->loopback_dev; | 2209 | rth->dst.dev = net->loopback_dev; |
| 2207 | dev_hold(rth->dst.dev); | 2210 | dev_hold(rth->dst.dev); |
| @@ -2401,7 +2404,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
| 2401 | rth->rt_mark = oldflp4->flowi4_mark; | 2404 | rth->rt_mark = oldflp4->flowi4_mark; |
| 2402 | rth->rt_dst = fl4->daddr; | 2405 | rth->rt_dst = fl4->daddr; |
| 2403 | rth->rt_src = fl4->saddr; | 2406 | rth->rt_src = fl4->saddr; |
| 2404 | rth->rt_iif = 0; | 2407 | rth->rt_route_iif = 0; |
| 2408 | rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex; | ||
| 2405 | /* get references to the devices that are to be hold by the routing | 2409 | /* get references to the devices that are to be hold by the routing |
| 2406 | cache entry */ | 2410 | cache entry */ |
| 2407 | rth->dst.dev = dev_out; | 2411 | rth->dst.dev = dev_out; |
| @@ -2716,6 +2720,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
| 2716 | rt->rt_key_dst = ort->rt_key_dst; | 2720 | rt->rt_key_dst = ort->rt_key_dst; |
| 2717 | rt->rt_key_src = ort->rt_key_src; | 2721 | rt->rt_key_src = ort->rt_key_src; |
| 2718 | rt->rt_tos = ort->rt_tos; | 2722 | rt->rt_tos = ort->rt_tos; |
| 2723 | rt->rt_route_iif = ort->rt_route_iif; | ||
| 2719 | rt->rt_iif = ort->rt_iif; | 2724 | rt->rt_iif = ort->rt_iif; |
| 2720 | rt->rt_oif = ort->rt_oif; | 2725 | rt->rt_oif = ort->rt_oif; |
| 2721 | rt->rt_mark = ort->rt_mark; | 2726 | rt->rt_mark = ort->rt_mark; |
| @@ -2725,7 +2730,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
| 2725 | rt->rt_type = ort->rt_type; | 2730 | rt->rt_type = ort->rt_type; |
| 2726 | rt->rt_dst = ort->rt_dst; | 2731 | rt->rt_dst = ort->rt_dst; |
| 2727 | rt->rt_src = ort->rt_src; | 2732 | rt->rt_src = ort->rt_src; |
| 2728 | rt->rt_iif = ort->rt_iif; | ||
| 2729 | rt->rt_gateway = ort->rt_gateway; | 2733 | rt->rt_gateway = ort->rt_gateway; |
| 2730 | rt->rt_spec_dst = ort->rt_spec_dst; | 2734 | rt->rt_spec_dst = ort->rt_spec_dst; |
| 2731 | rt->peer = ort->peer; | 2735 | rt->peer = ort->peer; |
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index 656d431c99ad..72f7218b03f5 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | * within cong_avoid. | 12 | * within cong_avoid. |
| 13 | * o Error correcting in remote HZ, therefore remote HZ will be keeped | 13 | * o Error correcting in remote HZ, therefore remote HZ will be keeped |
| 14 | * on checking and updating. | 14 | * on checking and updating. |
| 15 | * o Handling calculation of One-Way-Delay (OWD) within rtt_sample, sicne | 15 | * o Handling calculation of One-Way-Delay (OWD) within rtt_sample, since |
| 16 | * OWD have a similar meaning as RTT. Also correct the buggy formular. | 16 | * OWD have a similar meaning as RTT. Also correct the buggy formular. |
| 17 | * o Handle reaction for Early Congestion Indication (ECI) within | 17 | * o Handle reaction for Early Congestion Indication (ECI) within |
| 18 | * pkts_acked, as mentioned within pseudo code. | 18 | * pkts_acked, as mentioned within pseudo code. |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8b0d0167e44a..17388c7f49c4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -73,7 +73,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) | |||
| 73 | tcp_advance_send_head(sk, skb); | 73 | tcp_advance_send_head(sk, skb); |
| 74 | tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; | 74 | tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; |
| 75 | 75 | ||
| 76 | /* Don't override Nagle indefinately with F-RTO */ | 76 | /* Don't override Nagle indefinitely with F-RTO */ |
| 77 | if (tp->frto_counter == 2) | 77 | if (tp->frto_counter == 2) |
| 78 | tp->frto_counter = 3; | 78 | tp->frto_counter = 3; |
| 79 | 79 | ||
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index dc7f43179c9a..05c3b6f0e8e1 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | #define TCP_YEAH_DELTA 3 //log minimum fraction of cwnd to be removed on loss | 20 | #define TCP_YEAH_DELTA 3 //log minimum fraction of cwnd to be removed on loss |
| 21 | #define TCP_YEAH_EPSILON 1 //log maximum fraction to be removed on early decongestion | 21 | #define TCP_YEAH_EPSILON 1 //log maximum fraction to be removed on early decongestion |
| 22 | #define TCP_YEAH_PHY 8 //lin maximum delta from base | 22 | #define TCP_YEAH_PHY 8 //lin maximum delta from base |
| 23 | #define TCP_YEAH_RHO 16 //lin minumum number of consecutive rtt to consider competition on loss | 23 | #define TCP_YEAH_RHO 16 //lin minimum number of consecutive rtt to consider competition on loss |
| 24 | #define TCP_YEAH_ZETA 50 //lin minimum number of state switchs to reset reno_count | 24 | #define TCP_YEAH_ZETA 50 //lin minimum number of state switchs to reset reno_count |
| 25 | 25 | ||
| 26 | #define TCP_SCALABLE_AI_CNT 100U | 26 | #define TCP_SCALABLE_AI_CNT 100U |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 588f47af5faf..f87a8eb76f3b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -189,7 +189,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, | |||
| 189 | * @sk: socket struct in question | 189 | * @sk: socket struct in question |
| 190 | * @snum: port number to look up | 190 | * @snum: port number to look up |
| 191 | * @saddr_comp: AF-dependent comparison of bound local IP addresses | 191 | * @saddr_comp: AF-dependent comparison of bound local IP addresses |
| 192 | * @hash2_nulladdr: AF-dependant hash value in secondary hash chains, | 192 | * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, |
| 193 | * with NULL address | 193 | * with NULL address |
| 194 | */ | 194 | */ |
| 195 | int udp_lib_get_port(struct sock *sk, unsigned short snum, | 195 | int udp_lib_get_port(struct sock *sk, unsigned short snum, |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 13e0e7f659ff..d20a05e970d8 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
| @@ -74,6 +74,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
| 74 | rt->rt_key_dst = fl4->daddr; | 74 | rt->rt_key_dst = fl4->daddr; |
| 75 | rt->rt_key_src = fl4->saddr; | 75 | rt->rt_key_src = fl4->saddr; |
| 76 | rt->rt_tos = fl4->flowi4_tos; | 76 | rt->rt_tos = fl4->flowi4_tos; |
| 77 | rt->rt_route_iif = fl4->flowi4_iif; | ||
| 77 | rt->rt_iif = fl4->flowi4_iif; | 78 | rt->rt_iif = fl4->flowi4_iif; |
| 78 | rt->rt_oif = fl4->flowi4_oif; | 79 | rt->rt_oif = fl4->flowi4_oif; |
| 79 | rt->rt_mark = fl4->flowi4_mark; | 80 | rt->rt_mark = fl4->flowi4_mark; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3daaf3c7703c..1493534116df 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -1084,7 +1084,7 @@ static int ipv6_get_saddr_eval(struct net *net, | |||
| 1084 | case IPV6_SADDR_RULE_PRIVACY: | 1084 | case IPV6_SADDR_RULE_PRIVACY: |
| 1085 | { | 1085 | { |
| 1086 | /* Rule 7: Prefer public address | 1086 | /* Rule 7: Prefer public address |
| 1087 | * Note: prefer temprary address if use_tempaddr >= 2 | 1087 | * Note: prefer temporary address if use_tempaddr >= 2 |
| 1088 | */ | 1088 | */ |
| 1089 | int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ? | 1089 | int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ? |
| 1090 | !!(dst->prefs & IPV6_PREFER_SRC_TMP) : | 1090 | !!(dst->prefs & IPV6_PREFER_SRC_TMP) : |
| @@ -1968,7 +1968,7 @@ ok: | |||
| 1968 | * to the stored lifetime since we'll | 1968 | * to the stored lifetime since we'll |
| 1969 | * be updating the timestamp below, | 1969 | * be updating the timestamp below, |
| 1970 | * else we'll set it back to the | 1970 | * else we'll set it back to the |
| 1971 | * minumum. | 1971 | * minimum. |
| 1972 | */ | 1972 | */ |
| 1973 | if (prefered_lft != ifp->prefered_lft) { | 1973 | if (prefered_lft != ifp->prefered_lft) { |
| 1974 | valid_lft = stored_lft; | 1974 | valid_lft = stored_lft; |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 4b13d5d8890e..afcc7099f96d 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
| @@ -1113,7 +1113,7 @@ static int __init inet6_init(void) | |||
| 1113 | /* | 1113 | /* |
| 1114 | * ipngwg API draft makes clear that the correct semantics | 1114 | * ipngwg API draft makes clear that the correct semantics |
| 1115 | * for TCP and UDP is to consider one TCP and UDP instance | 1115 | * for TCP and UDP is to consider one TCP and UDP instance |
| 1116 | * in a host availiable by both INET and INET6 APIs and | 1116 | * in a host available by both INET and INET6 APIs and |
| 1117 | * able to communicate via both network protocols. | 1117 | * able to communicate via both network protocols. |
| 1118 | */ | 1118 | */ |
| 1119 | 1119 | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 18208876aa8a..46cf7bea6769 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -779,7 +779,7 @@ slow_path: | |||
| 779 | /* IF: it doesn't fit, use 'mtu' - the data space left */ | 779 | /* IF: it doesn't fit, use 'mtu' - the data space left */ |
| 780 | if (len > mtu) | 780 | if (len > mtu) |
| 781 | len = mtu; | 781 | len = mtu; |
| 782 | /* IF: we are not sending upto and including the packet end | 782 | /* IF: we are not sending up to and including the packet end |
| 783 | then align the next start on an eight byte boundary */ | 783 | then align the next start on an eight byte boundary */ |
| 784 | if (len < left) { | 784 | if (len < left) { |
| 785 | len &= ~7; | 785 | len &= ~7; |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 39aaca2b4fd2..28bc1f644b7b 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
| @@ -90,9 +90,18 @@ static int nf_ip6_reroute(struct sk_buff *skb, | |||
| 90 | return 0; | 90 | return 0; |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) | 93 | static int nf_ip6_route(struct net *net, struct dst_entry **dst, |
| 94 | struct flowi *fl, bool strict) | ||
| 94 | { | 95 | { |
| 95 | *dst = ip6_route_output(&init_net, NULL, &fl->u.ip6); | 96 | static const struct ipv6_pinfo fake_pinfo; |
| 97 | static const struct inet_sock fake_sk = { | ||
| 98 | /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */ | ||
| 99 | .sk.sk_bound_dev_if = 1, | ||
| 100 | .pinet6 = (struct ipv6_pinfo *) &fake_pinfo, | ||
| 101 | }; | ||
| 102 | const void *sk = strict ? &fake_sk : NULL; | ||
| 103 | |||
| 104 | *dst = ip6_route_output(net, sk, &fl->u.ip6); | ||
| 96 | return (*dst)->error; | 105 | return (*dst)->error; |
| 97 | } | 106 | } |
| 98 | 107 | ||
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 0b2af9b85cec..5a1c6f27ffaf 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -2248,7 +2248,7 @@ static int __init ip6_tables_init(void) | |||
| 2248 | if (ret < 0) | 2248 | if (ret < 0) |
| 2249 | goto err1; | 2249 | goto err1; |
| 2250 | 2250 | ||
| 2251 | /* Noone else will be downing sem now, so we won't sleep */ | 2251 | /* No one else will be downing sem now, so we won't sleep */ |
| 2252 | ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); | 2252 | ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); |
| 2253 | if (ret < 0) | 2253 | if (ret < 0) |
| 2254 | goto err2; | 2254 | goto err2; |
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index 97c5b21b9674..cdd6d045e42e 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | |||
| @@ -71,7 +71,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum, | |||
| 71 | if (reasm == NULL) | 71 | if (reasm == NULL) |
| 72 | return NF_STOLEN; | 72 | return NF_STOLEN; |
| 73 | 73 | ||
| 74 | /* error occured or not fragmented */ | 74 | /* error occurred or not fragmented */ |
| 75 | if (reasm == skb) | 75 | if (reasm == skb) |
| 76 | return NF_ACCEPT; | 76 | return NF_ACCEPT; |
| 77 | 77 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 56fa12538d45..4f49e5dd41bb 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -1622,6 +1622,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 1622 | opt_skb = skb_clone(skb, GFP_ATOMIC); | 1622 | opt_skb = skb_clone(skb, GFP_ATOMIC); |
| 1623 | 1623 | ||
| 1624 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1624 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
| 1625 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
| 1625 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) | 1626 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) |
| 1626 | goto reset; | 1627 | goto reset; |
| 1627 | if (opt_skb) | 1628 | if (opt_skb) |
| @@ -1649,7 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 1649 | __kfree_skb(opt_skb); | 1650 | __kfree_skb(opt_skb); |
| 1650 | return 0; | 1651 | return 0; |
| 1651 | } | 1652 | } |
| 1652 | } | 1653 | } else |
| 1654 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
| 1653 | 1655 | ||
| 1654 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) | 1656 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) |
| 1655 | goto reset; | 1657 | goto reset; |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index d7037c006e13..15c37746845e 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -505,6 +505,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
| 505 | int rc; | 505 | int rc; |
| 506 | int is_udplite = IS_UDPLITE(sk); | 506 | int is_udplite = IS_UDPLITE(sk); |
| 507 | 507 | ||
| 508 | if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) | ||
| 509 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
| 510 | |||
| 508 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 511 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
| 509 | goto drop; | 512 | goto drop; |
| 510 | 513 | ||
diff --git a/net/irda/irlap.c b/net/irda/irlap.c index 783c5f367d29..005b424494a0 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c | |||
| @@ -165,7 +165,7 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos, | |||
| 165 | 165 | ||
| 166 | irlap_apply_default_connection_parameters(self); | 166 | irlap_apply_default_connection_parameters(self); |
| 167 | 167 | ||
| 168 | self->N3 = 3; /* # connections attemts to try before giving up */ | 168 | self->N3 = 3; /* # connections attempts to try before giving up */ |
| 169 | 169 | ||
| 170 | self->state = LAP_NDM; | 170 | self->state = LAP_NDM; |
| 171 | 171 | ||
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index d434c8880745..bb47021c9a55 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c | |||
| @@ -708,7 +708,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event, | |||
| 708 | 708 | ||
| 709 | self->frame_sent = TRUE; | 709 | self->frame_sent = TRUE; |
| 710 | } | 710 | } |
| 711 | /* Readjust our timer to accomodate devices | 711 | /* Readjust our timer to accommodate devices |
| 712 | * doing faster or slower discovery than us... | 712 | * doing faster or slower discovery than us... |
| 713 | * Jean II */ | 713 | * Jean II */ |
| 714 | irlap_start_query_timer(self, info->S, info->s); | 714 | irlap_start_query_timer(self, info->S, info->s); |
| @@ -931,7 +931,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, | |||
| 931 | irlap_send_rr_frame(self, CMD_FRAME); | 931 | irlap_send_rr_frame(self, CMD_FRAME); |
| 932 | 932 | ||
| 933 | /* The timer is set to half the normal timer to quickly | 933 | /* The timer is set to half the normal timer to quickly |
| 934 | * detect a failure to negociate the new connection | 934 | * detect a failure to negotiate the new connection |
| 935 | * parameters. IrLAP 6.11.3.2, note 3. | 935 | * parameters. IrLAP 6.11.3.2, note 3. |
| 936 | * Note that currently we don't process this failure | 936 | * Note that currently we don't process this failure |
| 937 | * properly, as we should do a quick disconnect. | 937 | * properly, as we should do a quick disconnect. |
| @@ -1052,7 +1052,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
| 1052 | return -EPROTO; | 1052 | return -EPROTO; |
| 1053 | } | 1053 | } |
| 1054 | 1054 | ||
| 1055 | /* Substract space used by this skb */ | 1055 | /* Subtract space used by this skb */ |
| 1056 | self->bytes_left -= skb->len; | 1056 | self->bytes_left -= skb->len; |
| 1057 | #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ | 1057 | #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ |
| 1058 | /* Window has been adjusted for the max packet | 1058 | /* Window has been adjusted for the max packet |
| @@ -1808,7 +1808,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, | |||
| 1808 | 1808 | ||
| 1809 | return -EPROTO; /* Try again later */ | 1809 | return -EPROTO; /* Try again later */ |
| 1810 | } | 1810 | } |
| 1811 | /* Substract space used by this skb */ | 1811 | /* Subtract space used by this skb */ |
| 1812 | self->bytes_left -= skb->len; | 1812 | self->bytes_left -= skb->len; |
| 1813 | #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ | 1813 | #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ |
| 1814 | /* Window has been adjusted for the max packet | 1814 | /* Window has been adjusted for the max packet |
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 688222cbf55b..8c004161a843 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c | |||
| @@ -848,7 +848,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb) | |||
| 848 | * though IrLAP is currently sending the *last* frame of the | 848 | * though IrLAP is currently sending the *last* frame of the |
| 849 | * tx-window, the driver most likely has only just started | 849 | * tx-window, the driver most likely has only just started |
| 850 | * sending the *first* frame of the same tx-window. | 850 | * sending the *first* frame of the same tx-window. |
| 851 | * I.e. we are always at the very begining of or Tx window. | 851 | * I.e. we are always at the very beginning of or Tx window. |
| 852 | * Now, we are supposed to set the final timer from the end | 852 | * Now, we are supposed to set the final timer from the end |
| 853 | * of our tx-window to let the other peer reply. So, we need | 853 | * of our tx-window to let the other peer reply. So, we need |
| 854 | * to add extra time to compensate for the fact that we | 854 | * to add extra time to compensate for the fact that we |
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c index c1fb5db81042..9505a7d06f1a 100644 --- a/net/irda/irlmp_event.c +++ b/net/irda/irlmp_event.c | |||
| @@ -498,7 +498,7 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, | |||
| 498 | switch (event) { | 498 | switch (event) { |
| 499 | #ifdef CONFIG_IRDA_ULTRA | 499 | #ifdef CONFIG_IRDA_ULTRA |
| 500 | case LM_UDATA_INDICATION: | 500 | case LM_UDATA_INDICATION: |
| 501 | /* This is most bizzare. Those packets are aka unreliable | 501 | /* This is most bizarre. Those packets are aka unreliable |
| 502 | * connected, aka IrLPT or SOCK_DGRAM/IRDAPROTO_UNITDATA. | 502 | * connected, aka IrLPT or SOCK_DGRAM/IRDAPROTO_UNITDATA. |
| 503 | * Why do we pass them as Ultra ??? Jean II */ | 503 | * Why do we pass them as Ultra ??? Jean II */ |
| 504 | irlmp_connless_data_indication(self, skb); | 504 | irlmp_connless_data_indication(self, skb); |
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h index 0d82ff5aeff1..979ecb2435a7 100644 --- a/net/irda/irnet/irnet.h +++ b/net/irda/irnet/irnet.h | |||
| @@ -73,7 +73,7 @@ | |||
| 73 | * Infinite thanks to those brave souls for providing the infrastructure | 73 | * Infinite thanks to those brave souls for providing the infrastructure |
| 74 | * upon which IrNET is built. | 74 | * upon which IrNET is built. |
| 75 | * | 75 | * |
| 76 | * Thanks to all my collegues in HP for helping me. In particular, | 76 | * Thanks to all my colleagues in HP for helping me. In particular, |
| 77 | * thanks to Salil Pradhan and Bill Serra for W2k testing... | 77 | * thanks to Salil Pradhan and Bill Serra for W2k testing... |
| 78 | * Thanks to Luiz Magalhaes for irnetd and much testing... | 78 | * Thanks to Luiz Magalhaes for irnetd and much testing... |
| 79 | * | 79 | * |
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c index 849aaf0dabb5..9715e6e5900b 100644 --- a/net/irda/irqueue.c +++ b/net/irda/irqueue.c | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | * o the hash function for ints is pathetic (but could be changed) | 40 | * o the hash function for ints is pathetic (but could be changed) |
| 41 | * o locking is sometime suspicious (especially during enumeration) | 41 | * o locking is sometime suspicious (especially during enumeration) |
| 42 | * o most users have only a few elements (== overhead) | 42 | * o most users have only a few elements (== overhead) |
| 43 | * o most users never use seach, so don't benefit from hashing | 43 | * o most users never use search, so don't benefit from hashing |
| 44 | * Problem already fixed : | 44 | * Problem already fixed : |
| 45 | * o not 64 bit compliant (most users do hashv = (int) self) | 45 | * o not 64 bit compliant (most users do hashv = (int) self) |
| 46 | * o hashbin_remove() is broken => use hashbin_remove_this() | 46 | * o hashbin_remove() is broken => use hashbin_remove_this() |
diff --git a/net/irda/irttp.c b/net/irda/irttp.c index f6054f9ccbe3..9d9af4606970 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c | |||
| @@ -1193,7 +1193,7 @@ EXPORT_SYMBOL(irttp_connect_request); | |||
| 1193 | /* | 1193 | /* |
| 1194 | * Function irttp_connect_confirm (handle, qos, skb) | 1194 | * Function irttp_connect_confirm (handle, qos, skb) |
| 1195 | * | 1195 | * |
| 1196 | * Sevice user confirms TSAP connection with peer. | 1196 | * Service user confirms TSAP connection with peer. |
| 1197 | * | 1197 | * |
| 1198 | */ | 1198 | */ |
| 1199 | static void irttp_connect_confirm(void *instance, void *sap, | 1199 | static void irttp_connect_confirm(void *instance, void *sap, |
diff --git a/net/irda/qos.c b/net/irda/qos.c index 2b00974e5bae..1b51bcf42394 100644 --- a/net/irda/qos.c +++ b/net/irda/qos.c | |||
| @@ -39,16 +39,16 @@ | |||
| 39 | #include <net/irda/irlap_frame.h> | 39 | #include <net/irda/irlap_frame.h> |
| 40 | 40 | ||
| 41 | /* | 41 | /* |
| 42 | * Maximum values of the baud rate we negociate with the other end. | 42 | * Maximum values of the baud rate we negotiate with the other end. |
| 43 | * Most often, you don't have to change that, because Linux-IrDA will | 43 | * Most often, you don't have to change that, because Linux-IrDA will |
| 44 | * use the maximum offered by the link layer, which usually works fine. | 44 | * use the maximum offered by the link layer, which usually works fine. |
| 45 | * In some very rare cases, you may want to limit it to lower speeds... | 45 | * In some very rare cases, you may want to limit it to lower speeds... |
| 46 | */ | 46 | */ |
| 47 | int sysctl_max_baud_rate = 16000000; | 47 | int sysctl_max_baud_rate = 16000000; |
| 48 | /* | 48 | /* |
| 49 | * Maximum value of the lap disconnect timer we negociate with the other end. | 49 | * Maximum value of the lap disconnect timer we negotiate with the other end. |
| 50 | * Most often, the value below represent the best compromise, but some user | 50 | * Most often, the value below represent the best compromise, but some user |
| 51 | * may want to keep the LAP alive longuer or shorter in case of link failure. | 51 | * may want to keep the LAP alive longer or shorter in case of link failure. |
| 52 | * Remember that the threshold time (early warning) is fixed to 3s... | 52 | * Remember that the threshold time (early warning) is fixed to 3s... |
| 53 | */ | 53 | */ |
| 54 | int sysctl_max_noreply_time = 12; | 54 | int sysctl_max_noreply_time = 12; |
| @@ -411,7 +411,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) | |||
| 411 | * Fix tx data size according to user limits - Jean II | 411 | * Fix tx data size according to user limits - Jean II |
| 412 | */ | 412 | */ |
| 413 | if (qos->data_size.value > sysctl_max_tx_data_size) | 413 | if (qos->data_size.value > sysctl_max_tx_data_size) |
| 414 | /* Allow non discrete adjustement to avoid loosing capacity */ | 414 | /* Allow non discrete adjustement to avoid losing capacity */ |
| 415 | qos->data_size.value = sysctl_max_tx_data_size; | 415 | qos->data_size.value = sysctl_max_tx_data_size; |
| 416 | /* | 416 | /* |
| 417 | * Override Tx window if user request it. - Jean II | 417 | * Override Tx window if user request it. - Jean II |
diff --git a/net/irda/timer.c b/net/irda/timer.c index 0335ba0cc593..f418cb2ad49c 100644 --- a/net/irda/timer.c +++ b/net/irda/timer.c | |||
| @@ -59,7 +59,7 @@ void irlap_start_query_timer(struct irlap_cb *self, int S, int s) | |||
| 59 | * slot time, plus add some extra time to properly receive the last | 59 | * slot time, plus add some extra time to properly receive the last |
| 60 | * discovery packet (which is longer due to extra discovery info), | 60 | * discovery packet (which is longer due to extra discovery info), |
| 61 | * to avoid messing with for incomming connections requests and | 61 | * to avoid messing with for incomming connections requests and |
| 62 | * to accomodate devices that perform discovery slower than us. | 62 | * to accommodate devices that perform discovery slower than us. |
| 63 | * Jean II */ | 63 | * Jean II */ |
| 64 | timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s) | 64 | timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s) |
| 65 | + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT); | 65 | + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT); |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 9637e45744fa..986b2a5e8769 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
| @@ -250,7 +250,7 @@ static struct device *af_iucv_dev; | |||
| 250 | * PRMDATA[0..6] socket data (max 7 bytes); | 250 | * PRMDATA[0..6] socket data (max 7 bytes); |
| 251 | * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) | 251 | * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) |
| 252 | * | 252 | * |
| 253 | * The socket data length is computed by substracting the socket data length | 253 | * The socket data length is computed by subtracting the socket data length |
| 254 | * value from 0xFF. | 254 | * value from 0xFF. |
| 255 | * If the socket data len is greater 7, then PRMDATA can be used for special | 255 | * If the socket data len is greater 7, then PRMDATA can be used for special |
| 256 | * notifications (see iucv_sock_shutdown); and further, | 256 | * notifications (see iucv_sock_shutdown); and further, |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 1ee5dab3cfae..8f156bd86be7 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
| @@ -735,7 +735,7 @@ static void iucv_cleanup_queue(void) | |||
| 735 | struct iucv_irq_list *p, *n; | 735 | struct iucv_irq_list *p, *n; |
| 736 | 736 | ||
| 737 | /* | 737 | /* |
| 738 | * When a path is severed, the pathid can be reused immediatly | 738 | * When a path is severed, the pathid can be reused immediately |
| 739 | * on a iucv connect or a connection pending interrupt. Remove | 739 | * on a iucv connect or a connection pending interrupt. Remove |
| 740 | * all entries from the task queue that refer to a stale pathid | 740 | * all entries from the task queue that refer to a stale pathid |
| 741 | * (iucv_path_table[ix] == NULL). Only then do the iucv connect | 741 | * (iucv_path_table[ix] == NULL). Only then do the iucv connect |
| @@ -807,7 +807,7 @@ void iucv_unregister(struct iucv_handler *handler, int smp) | |||
| 807 | spin_lock_bh(&iucv_table_lock); | 807 | spin_lock_bh(&iucv_table_lock); |
| 808 | /* Remove handler from the iucv_handler_list. */ | 808 | /* Remove handler from the iucv_handler_list. */ |
| 809 | list_del_init(&handler->list); | 809 | list_del_init(&handler->list); |
| 810 | /* Sever all pathids still refering to the handler. */ | 810 | /* Sever all pathids still referring to the handler. */ |
| 811 | list_for_each_entry_safe(p, n, &handler->paths, list) { | 811 | list_for_each_entry_safe(p, n, &handler->paths, list) { |
| 812 | iucv_sever_pathid(p->pathid, NULL); | 812 | iucv_sever_pathid(p->pathid, NULL); |
| 813 | iucv_path_table[p->pathid] = NULL; | 813 | iucv_path_table[p->pathid] = NULL; |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a40401701424..c18396c248d7 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
| @@ -97,7 +97,7 @@ struct ieee80211_bss { | |||
| 97 | size_t supp_rates_len; | 97 | size_t supp_rates_len; |
| 98 | 98 | ||
| 99 | /* | 99 | /* |
| 100 | * During assocation, we save an ERP value from a probe response so | 100 | * During association, we save an ERP value from a probe response so |
| 101 | * that we can feed ERP info to the driver when handling the | 101 | * that we can feed ERP info to the driver when handling the |
| 102 | * association completes. these fields probably won't be up-to-date | 102 | * association completes. these fields probably won't be up-to-date |
| 103 | * otherwise, you probably don't want to use them. | 103 | * otherwise, you probably don't want to use them. |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 8d65b47d9837..336ca9d0c5c4 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
| @@ -628,7 +628,7 @@ void mesh_path_discard_frame(struct sk_buff *skb, | |||
| 628 | * | 628 | * |
| 629 | * @mpath: mesh path whose queue has to be freed | 629 | * @mpath: mesh path whose queue has to be freed |
| 630 | * | 630 | * |
| 631 | * Locking: the function must me called withing a rcu_read_lock region | 631 | * Locking: the function must me called within a rcu_read_lock region |
| 632 | */ | 632 | */ |
| 633 | void mesh_path_flush_pending(struct mesh_path *mpath) | 633 | void mesh_path_flush_pending(struct mesh_path *mpath) |
| 634 | { | 634 | { |
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index dbdebeda097f..c06aa3ac6b9d 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
| @@ -259,7 +259,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
| 259 | } | 259 | } |
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | /* try to sample up to half of the availble rates during each interval */ | 262 | /* try to sample up to half of the available rates during each interval */ |
| 263 | mi->sample_count *= 4; | 263 | mi->sample_count *= 4; |
| 264 | 264 | ||
| 265 | cur_prob = 0; | 265 | cur_prob = 0; |
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h index 6510f8ee738e..19111c7bf454 100644 --- a/net/mac80211/rc80211_pid.h +++ b/net/mac80211/rc80211_pid.h | |||
| @@ -77,7 +77,7 @@ union rc_pid_event_data { | |||
| 77 | }; | 77 | }; |
| 78 | 78 | ||
| 79 | struct rc_pid_event { | 79 | struct rc_pid_event { |
| 80 | /* The time when the event occured */ | 80 | /* The time when the event occurred */ |
| 81 | unsigned long timestamp; | 81 | unsigned long timestamp; |
| 82 | 82 | ||
| 83 | /* Event ID number */ | 83 | /* Event ID number */ |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index aa5cc37b4921..c5d4530d8284 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -381,7 +381,7 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) | |||
| 381 | * specs were sane enough this time around to require padding each A-MSDU | 381 | * specs were sane enough this time around to require padding each A-MSDU |
| 382 | * subframe to a length that is a multiple of four. | 382 | * subframe to a length that is a multiple of four. |
| 383 | * | 383 | * |
| 384 | * Padding like Atheros hardware adds which is inbetween the 802.11 header and | 384 | * Padding like Atheros hardware adds which is between the 802.11 header and |
| 385 | * the payload is not supported, the driver is required to move the 802.11 | 385 | * the payload is not supported, the driver is required to move the 802.11 |
| 386 | * header to be directly in front of the payload in that case. | 386 | * header to be directly in front of the payload in that case. |
| 387 | */ | 387 | */ |
| @@ -2541,7 +2541,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) | |||
| 2541 | * same TID from the same station | 2541 | * same TID from the same station |
| 2542 | */ | 2542 | */ |
| 2543 | rx->skb = skb; | 2543 | rx->skb = skb; |
| 2544 | rx->flags = 0; | ||
| 2545 | 2544 | ||
| 2546 | CALL_RXH(ieee80211_rx_h_decrypt) | 2545 | CALL_RXH(ieee80211_rx_h_decrypt) |
| 2547 | CALL_RXH(ieee80211_rx_h_check_more_data) | 2546 | CALL_RXH(ieee80211_rx_h_check_more_data) |
| @@ -2612,6 +2611,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | |||
| 2612 | .sdata = sta->sdata, | 2611 | .sdata = sta->sdata, |
| 2613 | .local = sta->local, | 2612 | .local = sta->local, |
| 2614 | .queue = tid, | 2613 | .queue = tid, |
| 2614 | .flags = 0, | ||
| 2615 | }; | 2615 | }; |
| 2616 | struct tid_ampdu_rx *tid_agg_rx; | 2616 | struct tid_ampdu_rx *tid_agg_rx; |
| 2617 | 2617 | ||
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index d0311a322ddd..13e8c30adf01 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
| @@ -47,9 +47,9 @@ | |||
| 47 | * Station entries are added by mac80211 when you establish a link with a | 47 | * Station entries are added by mac80211 when you establish a link with a |
| 48 | * peer. This means different things for the different type of interfaces | 48 | * peer. This means different things for the different type of interfaces |
| 49 | * we support. For a regular station this mean we add the AP sta when we | 49 | * we support. For a regular station this mean we add the AP sta when we |
| 50 | * receive an assocation response from the AP. For IBSS this occurs when | 50 | * receive an association response from the AP. For IBSS this occurs when |
| 51 | * get to know about a peer on the same IBSS. For WDS we add the sta for | 51 | * get to know about a peer on the same IBSS. For WDS we add the sta for |
| 52 | * the peer imediately upon device open. When using AP mode we add stations | 52 | * the peer immediately upon device open. When using AP mode we add stations |
| 53 | * for each respective station upon request from userspace through nl80211. | 53 | * for each respective station upon request from userspace through nl80211. |
| 54 | * | 54 | * |
| 55 | * In order to remove a STA info structure, various sta_info_destroy_*() | 55 | * In order to remove a STA info structure, various sta_info_destroy_*() |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 57681149e37f..b2f95966c7f4 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
| @@ -173,7 +173,7 @@ struct sta_ampdu_mlme { | |||
| 173 | /** | 173 | /** |
| 174 | * enum plink_state - state of a mesh peer link finite state machine | 174 | * enum plink_state - state of a mesh peer link finite state machine |
| 175 | * | 175 | * |
| 176 | * @PLINK_LISTEN: initial state, considered the implicit state of non existant | 176 | * @PLINK_LISTEN: initial state, considered the implicit state of non existent |
| 177 | * mesh peer links | 177 | * mesh peer links |
| 178 | * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer | 178 | * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer |
| 179 | * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer | 179 | * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index c3f988aa1152..32bff6d86cb2 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
| @@ -652,7 +652,6 @@ comment "Xtables matches" | |||
| 652 | config NETFILTER_XT_MATCH_ADDRTYPE | 652 | config NETFILTER_XT_MATCH_ADDRTYPE |
| 653 | tristate '"addrtype" address type match support' | 653 | tristate '"addrtype" address type match support' |
| 654 | depends on NETFILTER_ADVANCED | 654 | depends on NETFILTER_ADVANCED |
| 655 | depends on (IPV6 || IPV6=n) | ||
| 656 | ---help--- | 655 | ---help--- |
| 657 | This option allows you to match what routing thinks of an address, | 656 | This option allows you to match what routing thinks of an address, |
| 658 | eg. UNICAST, LOCAL, BROADCAST, ... | 657 | eg. UNICAST, LOCAL, BROADCAST, ... |
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index bca96990218d..a113ff066928 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c | |||
| @@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb) | |||
| 338 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); | 338 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); |
| 339 | if (map->netmask != 32) | 339 | if (map->netmask != 32) |
| 340 | NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); | 340 | NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); |
| 341 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, | 341 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); |
| 342 | htonl(atomic_read(&set->ref) - 1)); | ||
| 343 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, | 342 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, |
| 344 | htonl(sizeof(*map) + map->memsize)); | 343 | htonl(sizeof(*map) + map->memsize)); |
| 345 | if (with_timeout(map->timeout)) | 344 | if (with_timeout(map->timeout)) |
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 5e790172deff..00a33242e90c 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c | |||
| @@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb) | |||
| 434 | goto nla_put_failure; | 434 | goto nla_put_failure; |
| 435 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); | 435 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); |
| 436 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); | 436 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); |
| 437 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, | 437 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); |
| 438 | htonl(atomic_read(&set->ref) - 1)); | ||
| 439 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, | 438 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, |
| 440 | htonl(sizeof(*map) | 439 | htonl(sizeof(*map) |
| 441 | + (map->last_ip - map->first_ip + 1) * map->dsize)); | 440 | + (map->last_ip - map->first_ip + 1) * map->dsize)); |
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index 165f09b1a9cb..6b38eb8f6ed8 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c | |||
| @@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb) | |||
| 320 | goto nla_put_failure; | 320 | goto nla_put_failure; |
| 321 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); | 321 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); |
| 322 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); | 322 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); |
| 323 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, | 323 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); |
| 324 | htonl(atomic_read(&set->ref) - 1)); | ||
| 325 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, | 324 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, |
| 326 | htonl(sizeof(*map) + map->memsize)); | 325 | htonl(sizeof(*map) + map->memsize)); |
| 327 | if (with_timeout(map->timeout)) | 326 | if (with_timeout(map->timeout)) |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index d6b48230a540..9152e69a162d 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | 26 | ||
| 27 | static LIST_HEAD(ip_set_type_list); /* all registered set types */ | 27 | static LIST_HEAD(ip_set_type_list); /* all registered set types */ |
| 28 | static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ | 28 | static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ |
| 29 | static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */ | ||
| 29 | 30 | ||
| 30 | static struct ip_set **ip_set_list; /* all individual sets */ | 31 | static struct ip_set **ip_set_list; /* all individual sets */ |
| 31 | static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ | 32 | static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ |
| @@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6); | |||
| 301 | static inline void | 302 | static inline void |
| 302 | __ip_set_get(ip_set_id_t index) | 303 | __ip_set_get(ip_set_id_t index) |
| 303 | { | 304 | { |
| 304 | atomic_inc(&ip_set_list[index]->ref); | 305 | write_lock_bh(&ip_set_ref_lock); |
| 306 | ip_set_list[index]->ref++; | ||
| 307 | write_unlock_bh(&ip_set_ref_lock); | ||
| 305 | } | 308 | } |
| 306 | 309 | ||
| 307 | static inline void | 310 | static inline void |
| 308 | __ip_set_put(ip_set_id_t index) | 311 | __ip_set_put(ip_set_id_t index) |
| 309 | { | 312 | { |
| 310 | atomic_dec(&ip_set_list[index]->ref); | 313 | write_lock_bh(&ip_set_ref_lock); |
| 314 | BUG_ON(ip_set_list[index]->ref == 0); | ||
| 315 | ip_set_list[index]->ref--; | ||
| 316 | write_unlock_bh(&ip_set_ref_lock); | ||
| 311 | } | 317 | } |
| 312 | 318 | ||
| 313 | /* | 319 | /* |
| @@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb, | |||
| 324 | struct ip_set *set = ip_set_list[index]; | 330 | struct ip_set *set = ip_set_list[index]; |
| 325 | int ret = 0; | 331 | int ret = 0; |
| 326 | 332 | ||
| 327 | BUG_ON(set == NULL || atomic_read(&set->ref) == 0); | 333 | BUG_ON(set == NULL); |
| 328 | pr_debug("set %s, index %u\n", set->name, index); | 334 | pr_debug("set %s, index %u\n", set->name, index); |
| 329 | 335 | ||
| 330 | if (dim < set->type->dimension || | 336 | if (dim < set->type->dimension || |
| @@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb, | |||
| 356 | struct ip_set *set = ip_set_list[index]; | 362 | struct ip_set *set = ip_set_list[index]; |
| 357 | int ret; | 363 | int ret; |
| 358 | 364 | ||
| 359 | BUG_ON(set == NULL || atomic_read(&set->ref) == 0); | 365 | BUG_ON(set == NULL); |
| 360 | pr_debug("set %s, index %u\n", set->name, index); | 366 | pr_debug("set %s, index %u\n", set->name, index); |
| 361 | 367 | ||
| 362 | if (dim < set->type->dimension || | 368 | if (dim < set->type->dimension || |
| @@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb, | |||
| 378 | struct ip_set *set = ip_set_list[index]; | 384 | struct ip_set *set = ip_set_list[index]; |
| 379 | int ret = 0; | 385 | int ret = 0; |
| 380 | 386 | ||
| 381 | BUG_ON(set == NULL || atomic_read(&set->ref) == 0); | 387 | BUG_ON(set == NULL); |
| 382 | pr_debug("set %s, index %u\n", set->name, index); | 388 | pr_debug("set %s, index %u\n", set->name, index); |
| 383 | 389 | ||
| 384 | if (dim < set->type->dimension || | 390 | if (dim < set->type->dimension || |
| @@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del); | |||
| 397 | * Find set by name, reference it once. The reference makes sure the | 403 | * Find set by name, reference it once. The reference makes sure the |
| 398 | * thing pointed to, does not go away under our feet. | 404 | * thing pointed to, does not go away under our feet. |
| 399 | * | 405 | * |
| 400 | * The nfnl mutex must already be activated. | ||
| 401 | */ | 406 | */ |
| 402 | ip_set_id_t | 407 | ip_set_id_t |
| 403 | ip_set_get_byname(const char *name, struct ip_set **set) | 408 | ip_set_get_byname(const char *name, struct ip_set **set) |
| @@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname); | |||
| 423 | * reference count by 1. The caller shall not assume the index | 428 | * reference count by 1. The caller shall not assume the index |
| 424 | * to be valid, after calling this function. | 429 | * to be valid, after calling this function. |
| 425 | * | 430 | * |
| 426 | * The nfnl mutex must already be activated. | ||
| 427 | */ | 431 | */ |
| 428 | void | 432 | void |
| 429 | ip_set_put_byindex(ip_set_id_t index) | 433 | ip_set_put_byindex(ip_set_id_t index) |
| 430 | { | 434 | { |
| 431 | if (ip_set_list[index] != NULL) { | 435 | if (ip_set_list[index] != NULL) |
| 432 | BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0); | ||
| 433 | __ip_set_put(index); | 436 | __ip_set_put(index); |
| 434 | } | ||
| 435 | } | 437 | } |
| 436 | EXPORT_SYMBOL_GPL(ip_set_put_byindex); | 438 | EXPORT_SYMBOL_GPL(ip_set_put_byindex); |
| 437 | 439 | ||
| @@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex); | |||
| 441 | * can't be destroyed. The set cannot be renamed due to | 443 | * can't be destroyed. The set cannot be renamed due to |
| 442 | * the referencing either. | 444 | * the referencing either. |
| 443 | * | 445 | * |
| 444 | * The nfnl mutex must already be activated. | ||
| 445 | */ | 446 | */ |
| 446 | const char * | 447 | const char * |
| 447 | ip_set_name_byindex(ip_set_id_t index) | 448 | ip_set_name_byindex(ip_set_id_t index) |
| @@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index) | |||
| 449 | const struct ip_set *set = ip_set_list[index]; | 450 | const struct ip_set *set = ip_set_list[index]; |
| 450 | 451 | ||
| 451 | BUG_ON(set == NULL); | 452 | BUG_ON(set == NULL); |
| 452 | BUG_ON(atomic_read(&set->ref) == 0); | 453 | BUG_ON(set->ref == 0); |
| 453 | 454 | ||
| 454 | /* Referenced, so it's safe */ | 455 | /* Referenced, so it's safe */ |
| 455 | return set->name; | 456 | return set->name; |
| @@ -515,10 +516,7 @@ void | |||
| 515 | ip_set_nfnl_put(ip_set_id_t index) | 516 | ip_set_nfnl_put(ip_set_id_t index) |
| 516 | { | 517 | { |
| 517 | nfnl_lock(); | 518 | nfnl_lock(); |
| 518 | if (ip_set_list[index] != NULL) { | 519 | ip_set_put_byindex(index); |
| 519 | BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0); | ||
| 520 | __ip_set_put(index); | ||
| 521 | } | ||
| 522 | nfnl_unlock(); | 520 | nfnl_unlock(); |
| 523 | } | 521 | } |
| 524 | EXPORT_SYMBOL_GPL(ip_set_nfnl_put); | 522 | EXPORT_SYMBOL_GPL(ip_set_nfnl_put); |
| @@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put); | |||
| 526 | /* | 524 | /* |
| 527 | * Communication protocol with userspace over netlink. | 525 | * Communication protocol with userspace over netlink. |
| 528 | * | 526 | * |
| 529 | * We already locked by nfnl_lock. | 527 | * The commands are serialized by the nfnl mutex. |
| 530 | */ | 528 | */ |
| 531 | 529 | ||
| 532 | static inline bool | 530 | static inline bool |
| @@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb, | |||
| 657 | return -ENOMEM; | 655 | return -ENOMEM; |
| 658 | rwlock_init(&set->lock); | 656 | rwlock_init(&set->lock); |
| 659 | strlcpy(set->name, name, IPSET_MAXNAMELEN); | 657 | strlcpy(set->name, name, IPSET_MAXNAMELEN); |
| 660 | atomic_set(&set->ref, 0); | ||
| 661 | set->family = family; | 658 | set->family = family; |
| 662 | 659 | ||
| 663 | /* | 660 | /* |
| @@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb, | |||
| 690 | 687 | ||
| 691 | /* | 688 | /* |
| 692 | * Here, we have a valid, constructed set and we are protected | 689 | * Here, we have a valid, constructed set and we are protected |
| 693 | * by nfnl_lock. Find the first free index in ip_set_list and | 690 | * by the nfnl mutex. Find the first free index in ip_set_list |
| 694 | * check clashing. | 691 | * and check clashing. |
| 695 | */ | 692 | */ |
| 696 | if ((ret = find_free_id(set->name, &index, &clash)) != 0) { | 693 | if ((ret = find_free_id(set->name, &index, &clash)) != 0) { |
| 697 | /* If this is the same set and requested, ignore error */ | 694 | /* If this is the same set and requested, ignore error */ |
| @@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb, | |||
| 751 | const struct nlattr * const attr[]) | 748 | const struct nlattr * const attr[]) |
| 752 | { | 749 | { |
| 753 | ip_set_id_t i; | 750 | ip_set_id_t i; |
| 751 | int ret = 0; | ||
| 754 | 752 | ||
| 755 | if (unlikely(protocol_failed(attr))) | 753 | if (unlikely(protocol_failed(attr))) |
| 756 | return -IPSET_ERR_PROTOCOL; | 754 | return -IPSET_ERR_PROTOCOL; |
| 757 | 755 | ||
| 758 | /* References are protected by the nfnl mutex */ | 756 | /* Commands are serialized and references are |
| 757 | * protected by the ip_set_ref_lock. | ||
| 758 | * External systems (i.e. xt_set) must call | ||
| 759 | * ip_set_put|get_nfnl_* functions, that way we | ||
| 760 | * can safely check references here. | ||
| 761 | * | ||
| 762 | * list:set timer can only decrement the reference | ||
| 763 | * counter, so if it's already zero, we can proceed | ||
| 764 | * without holding the lock. | ||
| 765 | */ | ||
| 766 | read_lock_bh(&ip_set_ref_lock); | ||
| 759 | if (!attr[IPSET_ATTR_SETNAME]) { | 767 | if (!attr[IPSET_ATTR_SETNAME]) { |
| 760 | for (i = 0; i < ip_set_max; i++) { | 768 | for (i = 0; i < ip_set_max; i++) { |
| 761 | if (ip_set_list[i] != NULL && | 769 | if (ip_set_list[i] != NULL && ip_set_list[i]->ref) { |
| 762 | (atomic_read(&ip_set_list[i]->ref))) | 770 | ret = IPSET_ERR_BUSY; |
| 763 | return -IPSET_ERR_BUSY; | 771 | goto out; |
| 772 | } | ||
| 764 | } | 773 | } |
| 774 | read_unlock_bh(&ip_set_ref_lock); | ||
| 765 | for (i = 0; i < ip_set_max; i++) { | 775 | for (i = 0; i < ip_set_max; i++) { |
| 766 | if (ip_set_list[i] != NULL) | 776 | if (ip_set_list[i] != NULL) |
| 767 | ip_set_destroy_set(i); | 777 | ip_set_destroy_set(i); |
| 768 | } | 778 | } |
| 769 | } else { | 779 | } else { |
| 770 | i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); | 780 | i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); |
| 771 | if (i == IPSET_INVALID_ID) | 781 | if (i == IPSET_INVALID_ID) { |
| 772 | return -ENOENT; | 782 | ret = -ENOENT; |
| 773 | else if (atomic_read(&ip_set_list[i]->ref)) | 783 | goto out; |
| 774 | return -IPSET_ERR_BUSY; | 784 | } else if (ip_set_list[i]->ref) { |
| 785 | ret = -IPSET_ERR_BUSY; | ||
| 786 | goto out; | ||
| 787 | } | ||
| 788 | read_unlock_bh(&ip_set_ref_lock); | ||
| 775 | 789 | ||
| 776 | ip_set_destroy_set(i); | 790 | ip_set_destroy_set(i); |
| 777 | } | 791 | } |
| 778 | return 0; | 792 | return 0; |
| 793 | out: | ||
| 794 | read_unlock_bh(&ip_set_ref_lock); | ||
| 795 | return ret; | ||
| 779 | } | 796 | } |
| 780 | 797 | ||
| 781 | /* Flush sets */ | 798 | /* Flush sets */ |
| @@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb, | |||
| 834 | struct ip_set *set; | 851 | struct ip_set *set; |
| 835 | const char *name2; | 852 | const char *name2; |
| 836 | ip_set_id_t i; | 853 | ip_set_id_t i; |
| 854 | int ret = 0; | ||
| 837 | 855 | ||
| 838 | if (unlikely(protocol_failed(attr) || | 856 | if (unlikely(protocol_failed(attr) || |
| 839 | attr[IPSET_ATTR_SETNAME] == NULL || | 857 | attr[IPSET_ATTR_SETNAME] == NULL || |
| @@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb, | |||
| 843 | set = find_set(nla_data(attr[IPSET_ATTR_SETNAME])); | 861 | set = find_set(nla_data(attr[IPSET_ATTR_SETNAME])); |
| 844 | if (set == NULL) | 862 | if (set == NULL) |
| 845 | return -ENOENT; | 863 | return -ENOENT; |
| 846 | if (atomic_read(&set->ref) != 0) | 864 | |
| 847 | return -IPSET_ERR_REFERENCED; | 865 | read_lock_bh(&ip_set_ref_lock); |
| 866 | if (set->ref != 0) { | ||
| 867 | ret = -IPSET_ERR_REFERENCED; | ||
| 868 | goto out; | ||
| 869 | } | ||
| 848 | 870 | ||
| 849 | name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); | 871 | name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); |
| 850 | for (i = 0; i < ip_set_max; i++) { | 872 | for (i = 0; i < ip_set_max; i++) { |
| 851 | if (ip_set_list[i] != NULL && | 873 | if (ip_set_list[i] != NULL && |
| 852 | STREQ(ip_set_list[i]->name, name2)) | 874 | STREQ(ip_set_list[i]->name, name2)) { |
| 853 | return -IPSET_ERR_EXIST_SETNAME2; | 875 | ret = -IPSET_ERR_EXIST_SETNAME2; |
| 876 | goto out; | ||
| 877 | } | ||
| 854 | } | 878 | } |
| 855 | strncpy(set->name, name2, IPSET_MAXNAMELEN); | 879 | strncpy(set->name, name2, IPSET_MAXNAMELEN); |
| 856 | 880 | ||
| 857 | return 0; | 881 | out: |
| 882 | read_unlock_bh(&ip_set_ref_lock); | ||
| 883 | return ret; | ||
| 858 | } | 884 | } |
| 859 | 885 | ||
| 860 | /* Swap two sets so that name/index points to the other. | 886 | /* Swap two sets so that name/index points to the other. |
| 861 | * References and set names are also swapped. | 887 | * References and set names are also swapped. |
| 862 | * | 888 | * |
| 863 | * We are protected by the nfnl mutex and references are | 889 | * The commands are serialized by the nfnl mutex and references are |
| 864 | * manipulated only by holding the mutex. The kernel interfaces | 890 | * protected by the ip_set_ref_lock. The kernel interfaces |
| 865 | * do not hold the mutex but the pointer settings are atomic | 891 | * do not hold the mutex but the pointer settings are atomic |
| 866 | * so the ip_set_list always contains valid pointers to the sets. | 892 | * so the ip_set_list always contains valid pointers to the sets. |
| 867 | */ | 893 | */ |
| @@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb, | |||
| 874 | struct ip_set *from, *to; | 900 | struct ip_set *from, *to; |
| 875 | ip_set_id_t from_id, to_id; | 901 | ip_set_id_t from_id, to_id; |
| 876 | char from_name[IPSET_MAXNAMELEN]; | 902 | char from_name[IPSET_MAXNAMELEN]; |
| 877 | u32 from_ref; | ||
| 878 | 903 | ||
| 879 | if (unlikely(protocol_failed(attr) || | 904 | if (unlikely(protocol_failed(attr) || |
| 880 | attr[IPSET_ATTR_SETNAME] == NULL || | 905 | attr[IPSET_ATTR_SETNAME] == NULL || |
| @@ -893,23 +918,21 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb, | |||
| 893 | to = ip_set_list[to_id]; | 918 | to = ip_set_list[to_id]; |
| 894 | 919 | ||
| 895 | /* Features must not change. | 920 | /* Features must not change. |
| 896 | * Not an artifical restriction anymore, as we must prevent | 921 | * Not an artificial restriction anymore, as we must prevent |
| 897 | * possible loops created by swapping in setlist type of sets. */ | 922 | * possible loops created by swapping in setlist type of sets. */ |
| 898 | if (!(from->type->features == to->type->features && | 923 | if (!(from->type->features == to->type->features && |
| 899 | from->type->family == to->type->family)) | 924 | from->type->family == to->type->family)) |
| 900 | return -IPSET_ERR_TYPE_MISMATCH; | 925 | return -IPSET_ERR_TYPE_MISMATCH; |
| 901 | 926 | ||
| 902 | /* No magic here: ref munging protected by the nfnl_lock */ | ||
| 903 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); | 927 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); |
| 904 | from_ref = atomic_read(&from->ref); | ||
| 905 | |||
| 906 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); | 928 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); |
| 907 | atomic_set(&from->ref, atomic_read(&to->ref)); | ||
| 908 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); | 929 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); |
| 909 | atomic_set(&to->ref, from_ref); | ||
| 910 | 930 | ||
| 931 | write_lock_bh(&ip_set_ref_lock); | ||
| 932 | swap(from->ref, to->ref); | ||
| 911 | ip_set_list[from_id] = to; | 933 | ip_set_list[from_id] = to; |
| 912 | ip_set_list[to_id] = from; | 934 | ip_set_list[to_id] = from; |
| 935 | write_unlock_bh(&ip_set_ref_lock); | ||
| 913 | 936 | ||
| 914 | return 0; | 937 | return 0; |
| 915 | } | 938 | } |
| @@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb) | |||
| 926 | { | 949 | { |
| 927 | if (cb->args[2]) { | 950 | if (cb->args[2]) { |
| 928 | pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); | 951 | pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); |
| 929 | __ip_set_put((ip_set_id_t) cb->args[1]); | 952 | ip_set_put_byindex((ip_set_id_t) cb->args[1]); |
| 930 | } | 953 | } |
| 931 | return 0; | 954 | return 0; |
| 932 | } | 955 | } |
| @@ -1068,7 +1091,7 @@ release_refcount: | |||
| 1068 | /* If there was an error or set is done, release set */ | 1091 | /* If there was an error or set is done, release set */ |
| 1069 | if (ret || !cb->args[2]) { | 1092 | if (ret || !cb->args[2]) { |
| 1070 | pr_debug("release set %s\n", ip_set_list[index]->name); | 1093 | pr_debug("release set %s\n", ip_set_list[index]->name); |
| 1071 | __ip_set_put(index); | 1094 | ip_set_put_byindex(index); |
| 1072 | } | 1095 | } |
| 1073 | 1096 | ||
| 1074 | /* If we dump all sets, continue with dumping last ones */ | 1097 | /* If we dump all sets, continue with dumping last ones */ |
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index a47c32982f06..e9159e99fc4b 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c | |||
| @@ -43,14 +43,19 @@ struct list_set { | |||
| 43 | static inline struct set_elem * | 43 | static inline struct set_elem * |
| 44 | list_set_elem(const struct list_set *map, u32 id) | 44 | list_set_elem(const struct list_set *map, u32 id) |
| 45 | { | 45 | { |
| 46 | return (struct set_elem *)((char *)map->members + id * map->dsize); | 46 | return (struct set_elem *)((void *)map->members + id * map->dsize); |
| 47 | } | ||
| 48 | |||
| 49 | static inline struct set_telem * | ||
| 50 | list_set_telem(const struct list_set *map, u32 id) | ||
| 51 | { | ||
| 52 | return (struct set_telem *)((void *)map->members + id * map->dsize); | ||
| 47 | } | 53 | } |
| 48 | 54 | ||
| 49 | static inline bool | 55 | static inline bool |
| 50 | list_set_timeout(const struct list_set *map, u32 id) | 56 | list_set_timeout(const struct list_set *map, u32 id) |
| 51 | { | 57 | { |
| 52 | const struct set_telem *elem = | 58 | const struct set_telem *elem = list_set_telem(map, id); |
| 53 | (const struct set_telem *) list_set_elem(map, id); | ||
| 54 | 59 | ||
| 55 | return ip_set_timeout_test(elem->timeout); | 60 | return ip_set_timeout_test(elem->timeout); |
| 56 | } | 61 | } |
| @@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id) | |||
| 58 | static inline bool | 63 | static inline bool |
| 59 | list_set_expired(const struct list_set *map, u32 id) | 64 | list_set_expired(const struct list_set *map, u32 id) |
| 60 | { | 65 | { |
| 61 | const struct set_telem *elem = | 66 | const struct set_telem *elem = list_set_telem(map, id); |
| 62 | (const struct set_telem *) list_set_elem(map, id); | ||
| 63 | 67 | ||
| 64 | return ip_set_timeout_expired(elem->timeout); | 68 | return ip_set_timeout_expired(elem->timeout); |
| 65 | } | 69 | } |
| 66 | 70 | ||
| 67 | static inline int | ||
| 68 | list_set_exist(const struct set_telem *elem) | ||
| 69 | { | ||
| 70 | return elem->id != IPSET_INVALID_ID && | ||
| 71 | !ip_set_timeout_expired(elem->timeout); | ||
| 72 | } | ||
| 73 | |||
| 74 | /* Set list without and with timeout */ | 71 | /* Set list without and with timeout */ |
| 75 | 72 | ||
| 76 | static int | 73 | static int |
| @@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id, | |||
| 146 | struct set_telem *e; | 143 | struct set_telem *e; |
| 147 | 144 | ||
| 148 | for (; i < map->size; i++) { | 145 | for (; i < map->size; i++) { |
| 149 | e = (struct set_telem *)list_set_elem(map, i); | 146 | e = list_set_telem(map, i); |
| 150 | swap(e->id, id); | 147 | swap(e->id, id); |
| 148 | swap(e->timeout, timeout); | ||
| 151 | if (e->id == IPSET_INVALID_ID) | 149 | if (e->id == IPSET_INVALID_ID) |
| 152 | break; | 150 | break; |
| 153 | swap(e->timeout, timeout); | ||
| 154 | } | 151 | } |
| 155 | } | 152 | } |
| 156 | 153 | ||
| @@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id, | |||
| 164 | /* Last element replaced: e.g. add new,before,last */ | 161 | /* Last element replaced: e.g. add new,before,last */ |
| 165 | ip_set_put_byindex(e->id); | 162 | ip_set_put_byindex(e->id); |
| 166 | if (with_timeout(map->timeout)) | 163 | if (with_timeout(map->timeout)) |
| 167 | list_elem_tadd(map, i, id, timeout); | 164 | list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); |
| 168 | else | 165 | else |
| 169 | list_elem_add(map, i, id); | 166 | list_elem_add(map, i, id); |
| 170 | 167 | ||
| @@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id, | |||
| 172 | } | 169 | } |
| 173 | 170 | ||
| 174 | static int | 171 | static int |
| 175 | list_set_del(struct list_set *map, ip_set_id_t id, u32 i) | 172 | list_set_del(struct list_set *map, u32 i) |
| 176 | { | 173 | { |
| 177 | struct set_elem *a = list_set_elem(map, i), *b; | 174 | struct set_elem *a = list_set_elem(map, i), *b; |
| 178 | 175 | ||
| 179 | ip_set_put_byindex(id); | 176 | ip_set_put_byindex(a->id); |
| 180 | 177 | ||
| 181 | for (; i < map->size - 1; i++) { | 178 | for (; i < map->size - 1; i++) { |
| 182 | b = list_set_elem(map, i + 1); | 179 | b = list_set_elem(map, i + 1); |
| @@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 308 | (before == 0 || | 305 | (before == 0 || |
| 309 | (before > 0 && | 306 | (before > 0 && |
| 310 | next_id_eq(map, i, refid)))) | 307 | next_id_eq(map, i, refid)))) |
| 311 | ret = list_set_del(map, id, i); | 308 | ret = list_set_del(map, i); |
| 312 | else if (before < 0 && | 309 | else if (before < 0 && |
| 313 | elem->id == refid && | 310 | elem->id == refid && |
| 314 | next_id_eq(map, i, id)) | 311 | next_id_eq(map, i, id)) |
| 315 | ret = list_set_del(map, id, i + 1); | 312 | ret = list_set_del(map, i + 1); |
| 316 | } | 313 | } |
| 317 | break; | 314 | break; |
| 318 | default: | 315 | default: |
| @@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb) | |||
| 369 | NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); | 366 | NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); |
| 370 | if (with_timeout(map->timeout)) | 367 | if (with_timeout(map->timeout)) |
| 371 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); | 368 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); |
| 372 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, | 369 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); |
| 373 | htonl(atomic_read(&set->ref) - 1)); | ||
| 374 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, | 370 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, |
| 375 | htonl(sizeof(*map) + map->size * map->dsize)); | 371 | htonl(sizeof(*map) + map->size * map->dsize)); |
| 376 | ipset_nest_end(skb, nested); | 372 | ipset_nest_end(skb, nested); |
| @@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set) | |||
| 461 | struct set_telem *e; | 457 | struct set_telem *e; |
| 462 | u32 i; | 458 | u32 i; |
| 463 | 459 | ||
| 464 | /* We run parallel with other readers (test element) | 460 | write_lock_bh(&set->lock); |
| 465 | * but adding/deleting new entries is locked out */ | 461 | for (i = 0; i < map->size; i++) { |
| 466 | read_lock_bh(&set->lock); | 462 | e = list_set_telem(map, i); |
| 467 | for (i = map->size - 1; i >= 0; i--) { | 463 | if (e->id != IPSET_INVALID_ID && list_set_expired(map, i)) |
| 468 | e = (struct set_telem *) list_set_elem(map, i); | 464 | list_set_del(map, i); |
| 469 | if (e->id != IPSET_INVALID_ID && | ||
| 470 | list_set_expired(map, i)) | ||
| 471 | list_set_del(map, e->id, i); | ||
| 472 | } | 465 | } |
| 473 | read_unlock_bh(&set->lock); | 466 | write_unlock_bh(&set->lock); |
| 474 | 467 | ||
| 475 | map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; | 468 | map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; |
| 476 | add_timer(&map->gc); | 469 | add_timer(&map->gc); |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index f289306cbf12..c97bd45975be 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
| @@ -595,7 +595,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) | |||
| 595 | atomic_inc(&dest->inactconns); | 595 | atomic_inc(&dest->inactconns); |
| 596 | } else { | 596 | } else { |
| 597 | /* It is a persistent connection/template, so increase | 597 | /* It is a persistent connection/template, so increase |
| 598 | the peristent connection counter */ | 598 | the persistent connection counter */ |
| 599 | atomic_inc(&dest->persistconns); | 599 | atomic_inc(&dest->persistconns); |
| 600 | } | 600 | } |
| 601 | 601 | ||
| @@ -657,7 +657,7 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp) | |||
| 657 | } | 657 | } |
| 658 | } else { | 658 | } else { |
| 659 | /* It is a persistent connection/template, so decrease | 659 | /* It is a persistent connection/template, so decrease |
| 660 | the peristent connection counter */ | 660 | the persistent connection counter */ |
| 661 | atomic_dec(&dest->persistconns); | 661 | atomic_dec(&dest->persistconns); |
| 662 | } | 662 | } |
| 663 | 663 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 33733c8872e7..ae47090bf45f 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
| @@ -3120,7 +3120,7 @@ nla_put_failure: | |||
| 3120 | static int ip_vs_genl_dump_daemons(struct sk_buff *skb, | 3120 | static int ip_vs_genl_dump_daemons(struct sk_buff *skb, |
| 3121 | struct netlink_callback *cb) | 3121 | struct netlink_callback *cb) |
| 3122 | { | 3122 | { |
| 3123 | struct net *net = skb_net(skb); | 3123 | struct net *net = skb_sknet(skb); |
| 3124 | struct netns_ipvs *ipvs = net_ipvs(net); | 3124 | struct netns_ipvs *ipvs = net_ipvs(net); |
| 3125 | 3125 | ||
| 3126 | mutex_lock(&__ip_vs_mutex); | 3126 | mutex_lock(&__ip_vs_mutex); |
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index f276df9896b3..87e40ea77a95 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c | |||
| @@ -131,7 +131,7 @@ static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) | |||
| 131 | { | 131 | { |
| 132 | list_del(&en->list); | 132 | list_del(&en->list); |
| 133 | /* | 133 | /* |
| 134 | * We don't kfree dest because it is refered either by its service | 134 | * We don't kfree dest because it is referred either by its service |
| 135 | * or the trash dest list. | 135 | * or the trash dest list. |
| 136 | */ | 136 | */ |
| 137 | atomic_dec(&en->dest->refcnt); | 137 | atomic_dec(&en->dest->refcnt); |
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index cb1c9913d38b..90f618ab6dda 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c | |||
| @@ -152,7 +152,7 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) | |||
| 152 | write_lock(&set->lock); | 152 | write_lock(&set->lock); |
| 153 | list_for_each_entry_safe(e, ep, &set->list, list) { | 153 | list_for_each_entry_safe(e, ep, &set->list, list) { |
| 154 | /* | 154 | /* |
| 155 | * We don't kfree dest because it is refered either | 155 | * We don't kfree dest because it is referred either |
| 156 | * by its service or by the trash dest list. | 156 | * by its service or by the trash dest list. |
| 157 | */ | 157 | */ |
| 158 | atomic_dec(&e->dest->refcnt); | 158 | atomic_dec(&e->dest->refcnt); |
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index b027ccc49f43..d12ed53ec95f 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c | |||
| @@ -566,7 +566,7 @@ static struct ipvs_sctp_nextstate | |||
| 566 | * SHUTDOWN sent from the client, waitinf for SHUT ACK from the server | 566 | * SHUTDOWN sent from the client, waitinf for SHUT ACK from the server |
| 567 | */ | 567 | */ |
| 568 | /* | 568 | /* |
| 569 | * We recieved the data chuck, keep the state unchanged. I assume | 569 | * We received the data chuck, keep the state unchanged. I assume |
| 570 | * that still data chuncks can be received by both the peers in | 570 | * that still data chuncks can be received by both the peers in |
| 571 | * SHUDOWN state | 571 | * SHUDOWN state |
| 572 | */ | 572 | */ |
| @@ -633,7 +633,7 @@ static struct ipvs_sctp_nextstate | |||
| 633 | * SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client | 633 | * SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client |
| 634 | */ | 634 | */ |
| 635 | /* | 635 | /* |
| 636 | * We recieved the data chuck, keep the state unchanged. I assume | 636 | * We received the data chuck, keep the state unchanged. I assume |
| 637 | * that still data chuncks can be received by both the peers in | 637 | * that still data chuncks can be received by both the peers in |
| 638 | * SHUDOWN state | 638 | * SHUDOWN state |
| 639 | */ | 639 | */ |
| @@ -701,7 +701,7 @@ static struct ipvs_sctp_nextstate | |||
| 701 | * SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server | 701 | * SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server |
| 702 | */ | 702 | */ |
| 703 | /* | 703 | /* |
| 704 | * We recieved the data chuck, keep the state unchanged. I assume | 704 | * We received the data chuck, keep the state unchanged. I assume |
| 705 | * that still data chuncks can be received by both the peers in | 705 | * that still data chuncks can be received by both the peers in |
| 706 | * SHUDOWN state | 706 | * SHUDOWN state |
| 707 | */ | 707 | */ |
| @@ -771,7 +771,7 @@ static struct ipvs_sctp_nextstate | |||
| 771 | * SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client | 771 | * SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client |
| 772 | */ | 772 | */ |
| 773 | /* | 773 | /* |
| 774 | * We recieved the data chuck, keep the state unchanged. I assume | 774 | * We received the data chuck, keep the state unchanged. I assume |
| 775 | * that still data chuncks can be received by both the peers in | 775 | * that still data chuncks can be received by both the peers in |
| 776 | * SHUDOWN state | 776 | * SHUDOWN state |
| 777 | */ | 777 | */ |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 941286ca911d..2e1c11f78419 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
| @@ -453,7 +453,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) | |||
| 453 | REJECT will give spurious warnings here. */ | 453 | REJECT will give spurious warnings here. */ |
| 454 | /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ | 454 | /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ |
| 455 | 455 | ||
| 456 | /* No external references means noone else could have | 456 | /* No external references means no one else could have |
| 457 | confirmed us. */ | 457 | confirmed us. */ |
| 458 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | 458 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); |
| 459 | pr_debug("Confirming conntrack %p\n", ct); | 459 | pr_debug("Confirming conntrack %p\n", ct); |
| @@ -901,7 +901,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, | |||
| 901 | ret = l3proto->get_l4proto(skb, skb_network_offset(skb), | 901 | ret = l3proto->get_l4proto(skb, skb_network_offset(skb), |
| 902 | &dataoff, &protonum); | 902 | &dataoff, &protonum); |
| 903 | if (ret <= 0) { | 903 | if (ret <= 0) { |
| 904 | pr_debug("not prepared to track yet or error occured\n"); | 904 | pr_debug("not prepared to track yet or error occurred\n"); |
| 905 | NF_CT_STAT_INC_ATOMIC(net, error); | 905 | NF_CT_STAT_INC_ATOMIC(net, error); |
| 906 | NF_CT_STAT_INC_ATOMIC(net, invalid); | 906 | NF_CT_STAT_INC_ATOMIC(net, invalid); |
| 907 | ret = -ret; | 907 | ret = -ret; |
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index 867882313e49..bcd5ed6b7130 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c | |||
| @@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f, | |||
| 631 | CHECK_BOUND(bs, 2); | 631 | CHECK_BOUND(bs, 2); |
| 632 | count = *bs->cur++; | 632 | count = *bs->cur++; |
| 633 | count <<= 8; | 633 | count <<= 8; |
| 634 | count = *bs->cur++; | 634 | count += *bs->cur++; |
| 635 | break; | 635 | break; |
| 636 | case SEMI: | 636 | case SEMI: |
| 637 | BYTE_ALIGN(bs); | 637 | BYTE_ALIGN(bs); |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 533a183e6661..18b2ce5c8ced 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
| @@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src, | |||
| 731 | 731 | ||
| 732 | memset(&fl2, 0, sizeof(fl2)); | 732 | memset(&fl2, 0, sizeof(fl2)); |
| 733 | fl2.daddr = dst->ip; | 733 | fl2.daddr = dst->ip; |
| 734 | if (!afinfo->route((struct dst_entry **)&rt1, | 734 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt1, |
| 735 | flowi4_to_flowi(&fl1))) { | 735 | flowi4_to_flowi(&fl1), false)) { |
| 736 | if (!afinfo->route((struct dst_entry **)&rt2, | 736 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, |
| 737 | flowi4_to_flowi(&fl2))) { | 737 | flowi4_to_flowi(&fl2), false)) { |
| 738 | if (rt1->rt_gateway == rt2->rt_gateway && | 738 | if (rt1->rt_gateway == rt2->rt_gateway && |
| 739 | rt1->dst.dev == rt2->dst.dev) | 739 | rt1->dst.dev == rt2->dst.dev) |
| 740 | ret = 1; | 740 | ret = 1; |
| @@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src, | |||
| 755 | 755 | ||
| 756 | memset(&fl2, 0, sizeof(fl2)); | 756 | memset(&fl2, 0, sizeof(fl2)); |
| 757 | ipv6_addr_copy(&fl2.daddr, &dst->in6); | 757 | ipv6_addr_copy(&fl2.daddr, &dst->in6); |
| 758 | if (!afinfo->route((struct dst_entry **)&rt1, | 758 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt1, |
| 759 | flowi6_to_flowi(&fl1))) { | 759 | flowi6_to_flowi(&fl1), false)) { |
| 760 | if (!afinfo->route((struct dst_entry **)&rt2, | 760 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, |
| 761 | flowi6_to_flowi(&fl2))) { | 761 | flowi6_to_flowi(&fl2), false)) { |
| 762 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, | 762 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, |
| 763 | sizeof(rt1->rt6i_gateway)) && | 763 | sizeof(rt1->rt6i_gateway)) && |
| 764 | rt1->dst.dev == rt2->dst.dev) | 764 | rt1->dst.dev == rt2->dst.dev) |
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 9ae57c57c50e..2e664a69d7db 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
| @@ -98,7 +98,7 @@ static const char * const dccp_state_names[] = { | |||
| 98 | #define sIV CT_DCCP_INVALID | 98 | #define sIV CT_DCCP_INVALID |
| 99 | 99 | ||
| 100 | /* | 100 | /* |
| 101 | * DCCP state transistion table | 101 | * DCCP state transition table |
| 102 | * | 102 | * |
| 103 | * The assumption is the same as for TCP tracking: | 103 | * The assumption is the same as for TCP tracking: |
| 104 | * | 104 | * |
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 6f4ee70f460b..6772b1154654 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
| @@ -107,9 +107,9 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = { | |||
| 107 | /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, | 107 | /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, |
| 108 | /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA}, | 108 | /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA}, |
| 109 | /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA}, | 109 | /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA}, |
| 110 | /* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant have Stale cookie*/ | 110 | /* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't have Stale cookie*/ |
| 111 | /* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */ | 111 | /* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */ |
| 112 | /* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in orig dir */ | 112 | /* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in orig dir */ |
| 113 | /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL} | 113 | /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL} |
| 114 | }, | 114 | }, |
| 115 | { | 115 | { |
| @@ -121,7 +121,7 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = { | |||
| 121 | /* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA}, | 121 | /* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA}, |
| 122 | /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA}, | 122 | /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA}, |
| 123 | /* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA}, | 123 | /* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA}, |
| 124 | /* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in reply dir */ | 124 | /* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in reply dir */ |
| 125 | /* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA}, | 125 | /* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA}, |
| 126 | /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL} | 126 | /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL} |
| 127 | } | 127 | } |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index bcf47eb518ef..237cc1981b89 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
| @@ -707,7 +707,7 @@ static const char *ct_sdp_header_search(const char *dptr, const char *limit, | |||
| 707 | } | 707 | } |
| 708 | 708 | ||
| 709 | /* Locate a SDP header (optionally a substring within the header value), | 709 | /* Locate a SDP header (optionally a substring within the header value), |
| 710 | * optionally stopping at the first occurence of the term header, parse | 710 | * optionally stopping at the first occurrence of the term header, parse |
| 711 | * it and return the offset and length of the data we're interested in. | 711 | * it and return the offset and length of the data we're interested in. |
| 712 | */ | 712 | */ |
| 713 | int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, | 713 | int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 5ab22e2bbd7d..5b466cd1272f 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
| @@ -134,7 +134,7 @@ static int __nf_queue(struct sk_buff *skb, | |||
| 134 | const struct nf_afinfo *afinfo; | 134 | const struct nf_afinfo *afinfo; |
| 135 | const struct nf_queue_handler *qh; | 135 | const struct nf_queue_handler *qh; |
| 136 | 136 | ||
| 137 | /* QUEUE == DROP if noone is waiting, to be safe. */ | 137 | /* QUEUE == DROP if no one is waiting, to be safe. */ |
| 138 | rcu_read_lock(); | 138 | rcu_read_lock(); |
| 139 | 139 | ||
| 140 | qh = rcu_dereference(queue_handler[pf]); | 140 | qh = rcu_dereference(queue_handler[pf]); |
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 6e6b46cb1db9..9e63b43faeed 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
| @@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb, | |||
| 166 | rcu_read_lock(); | 166 | rcu_read_lock(); |
| 167 | ai = nf_get_afinfo(family); | 167 | ai = nf_get_afinfo(family); |
| 168 | if (ai != NULL) | 168 | if (ai != NULL) |
| 169 | ai->route((struct dst_entry **)&rt, &fl); | 169 | ai->route(&init_net, (struct dst_entry **)&rt, &fl, false); |
| 170 | rcu_read_unlock(); | 170 | rcu_read_unlock(); |
| 171 | 171 | ||
| 172 | if (rt != NULL) { | 172 | if (rt != NULL) { |
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c index 2220b85e9519..b77d383cec78 100644 --- a/net/netfilter/xt_addrtype.c +++ b/net/netfilter/xt_addrtype.c | |||
| @@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype"); | |||
| 32 | MODULE_ALIAS("ip6t_addrtype"); | 32 | MODULE_ALIAS("ip6t_addrtype"); |
| 33 | 33 | ||
| 34 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | 34 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) |
| 35 | static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt) | 35 | static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, |
| 36 | const struct in6_addr *addr) | ||
| 36 | { | 37 | { |
| 38 | const struct nf_afinfo *afinfo; | ||
| 39 | struct flowi6 flow; | ||
| 40 | struct rt6_info *rt; | ||
| 37 | u32 ret; | 41 | u32 ret; |
| 42 | int route_err; | ||
| 38 | 43 | ||
| 39 | if (!rt) | 44 | memset(&flow, 0, sizeof(flow)); |
| 45 | ipv6_addr_copy(&flow.daddr, addr); | ||
| 46 | if (dev) | ||
| 47 | flow.flowi6_oif = dev->ifindex; | ||
| 48 | |||
| 49 | rcu_read_lock(); | ||
| 50 | |||
| 51 | afinfo = nf_get_afinfo(NFPROTO_IPV6); | ||
| 52 | if (afinfo != NULL) | ||
| 53 | route_err = afinfo->route(net, (struct dst_entry **)&rt, | ||
| 54 | flowi6_to_flowi(&flow), !!dev); | ||
| 55 | else | ||
| 56 | route_err = 1; | ||
| 57 | |||
| 58 | rcu_read_unlock(); | ||
| 59 | |||
| 60 | if (route_err) | ||
| 40 | return XT_ADDRTYPE_UNREACHABLE; | 61 | return XT_ADDRTYPE_UNREACHABLE; |
| 41 | 62 | ||
| 42 | if (rt->rt6i_flags & RTF_REJECT) | 63 | if (rt->rt6i_flags & RTF_REJECT) |
| @@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt) | |||
| 48 | ret |= XT_ADDRTYPE_LOCAL; | 69 | ret |= XT_ADDRTYPE_LOCAL; |
| 49 | if (rt->rt6i_flags & RTF_ANYCAST) | 70 | if (rt->rt6i_flags & RTF_ANYCAST) |
| 50 | ret |= XT_ADDRTYPE_ANYCAST; | 71 | ret |= XT_ADDRTYPE_ANYCAST; |
| 72 | |||
| 73 | |||
| 74 | dst_release(&rt->dst); | ||
| 51 | return ret; | 75 | return ret; |
| 52 | } | 76 | } |
| 53 | 77 | ||
| @@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev, | |||
| 65 | return false; | 89 | return false; |
| 66 | 90 | ||
| 67 | if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | | 91 | if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | |
| 68 | XT_ADDRTYPE_UNREACHABLE) & mask) { | 92 | XT_ADDRTYPE_UNREACHABLE) & mask) |
| 69 | struct rt6_info *rt; | 93 | return !!(mask & match_lookup_rt6(net, dev, addr)); |
| 70 | u32 type; | ||
| 71 | int ifindex = dev ? dev->ifindex : 0; | ||
| 72 | |||
| 73 | rt = rt6_lookup(net, addr, NULL, ifindex, !!dev); | ||
| 74 | |||
| 75 | type = xt_addrtype_rt6_to_type(rt); | ||
| 76 | |||
| 77 | dst_release(&rt->dst); | ||
| 78 | return !!(mask & type); | ||
| 79 | } | ||
| 80 | return true; | 94 | return true; |
| 81 | } | 95 | } |
| 82 | 96 | ||
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 2c0086a4751e..481a86fdc409 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
| @@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, | |||
| 195 | return info->match_flags & XT_CONNTRACK_STATE; | 195 | return info->match_flags & XT_CONNTRACK_STATE; |
| 196 | if ((info->match_flags & XT_CONNTRACK_DIRECTION) && | 196 | if ((info->match_flags & XT_CONNTRACK_DIRECTION) && |
| 197 | (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^ | 197 | (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^ |
| 198 | !!(info->invert_flags & XT_CONNTRACK_DIRECTION)) | 198 | !(info->invert_flags & XT_CONNTRACK_DIRECTION)) |
| 199 | return false; | 199 | return false; |
| 200 | 200 | ||
| 201 | if (info->match_flags & XT_CONNTRACK_ORIGSRC) | 201 | if (info->match_flags & XT_CONNTRACK_ORIGSRC) |
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index d37b7f80fa37..de0d8e4cbfb6 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c | |||
| @@ -109,7 +109,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) | |||
| 109 | * | 109 | * |
| 110 | * Description: | 110 | * Description: |
| 111 | * This is the hashing function for the domain hash table, it returns the | 111 | * This is the hashing function for the domain hash table, it returns the |
| 112 | * correct bucket number for the domain. The caller is responsibile for | 112 | * correct bucket number for the domain. The caller is responsible for |
| 113 | * ensuring that the hash table is protected with either a RCU read lock or the | 113 | * ensuring that the hash table is protected with either a RCU read lock or the |
| 114 | * hash table lock. | 114 | * hash table lock. |
| 115 | * | 115 | * |
| @@ -134,7 +134,7 @@ static u32 netlbl_domhsh_hash(const char *key) | |||
| 134 | * | 134 | * |
| 135 | * Description: | 135 | * Description: |
| 136 | * Searches the domain hash table and returns a pointer to the hash table | 136 | * Searches the domain hash table and returns a pointer to the hash table |
| 137 | * entry if found, otherwise NULL is returned. The caller is responsibile for | 137 | * entry if found, otherwise NULL is returned. The caller is responsible for |
| 138 | * ensuring that the hash table is protected with either a RCU read lock or the | 138 | * ensuring that the hash table is protected with either a RCU read lock or the |
| 139 | * hash table lock. | 139 | * hash table lock. |
| 140 | * | 140 | * |
| @@ -165,7 +165,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) | |||
| 165 | * Searches the domain hash table and returns a pointer to the hash table | 165 | * Searches the domain hash table and returns a pointer to the hash table |
| 166 | * entry if an exact match is found, if an exact match is not present in the | 166 | * entry if an exact match is found, if an exact match is not present in the |
| 167 | * hash table then the default entry is returned if valid otherwise NULL is | 167 | * hash table then the default entry is returned if valid otherwise NULL is |
| 168 | * returned. The caller is responsibile ensuring that the hash table is | 168 | * returned. The caller is responsible ensuring that the hash table is |
| 169 | * protected with either a RCU read lock or the hash table lock. | 169 | * protected with either a RCU read lock or the hash table lock. |
| 170 | * | 170 | * |
| 171 | */ | 171 | */ |
| @@ -193,7 +193,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) | |||
| 193 | * | 193 | * |
| 194 | * Description: | 194 | * Description: |
| 195 | * Generate an audit record for adding a new NetLabel/LSM mapping entry with | 195 | * Generate an audit record for adding a new NetLabel/LSM mapping entry with |
| 196 | * the given information. Caller is responsibile for holding the necessary | 196 | * the given information. Caller is responsible for holding the necessary |
| 197 | * locks. | 197 | * locks. |
| 198 | * | 198 | * |
| 199 | */ | 199 | */ |
| @@ -605,7 +605,7 @@ int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info) | |||
| 605 | * | 605 | * |
| 606 | * Description: | 606 | * Description: |
| 607 | * Look through the domain hash table searching for an entry to match @domain, | 607 | * Look through the domain hash table searching for an entry to match @domain, |
| 608 | * return a pointer to a copy of the entry or NULL. The caller is responsibile | 608 | * return a pointer to a copy of the entry or NULL. The caller is responsible |
| 609 | * for ensuring that rcu_read_[un]lock() is called. | 609 | * for ensuring that rcu_read_[un]lock() is called. |
| 610 | * | 610 | * |
| 611 | */ | 611 | */ |
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index 998e85e895d0..4f251b19fbcc 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c | |||
| @@ -259,7 +259,7 @@ add_failure: | |||
| 259 | * | 259 | * |
| 260 | * Description: | 260 | * Description: |
| 261 | * This function is a helper function used by the LISTALL and LISTDEF command | 261 | * This function is a helper function used by the LISTALL and LISTDEF command |
| 262 | * handlers. The caller is responsibile for ensuring that the RCU read lock | 262 | * handlers. The caller is responsible for ensuring that the RCU read lock |
| 263 | * is held. Returns zero on success, negative values on failure. | 263 | * is held. Returns zero on success, negative values on failure. |
| 264 | * | 264 | * |
| 265 | */ | 265 | */ |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index c47a511f203d..7c4dce8fa5e6 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
| @@ -355,7 +355,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
| 355 | * | 355 | * |
| 356 | * Conceptually, we have two counters: | 356 | * Conceptually, we have two counters: |
| 357 | * - send credits: this tells us how many WRs we're allowed | 357 | * - send credits: this tells us how many WRs we're allowed |
| 358 | * to submit without overruning the reciever's queue. For | 358 | * to submit without overruning the receiver's queue. For |
| 359 | * each SEND WR we post, we decrement this by one. | 359 | * each SEND WR we post, we decrement this by one. |
| 360 | * | 360 | * |
| 361 | * - posted credits: this tells us how many WRs we recently | 361 | * - posted credits: this tells us how many WRs we recently |
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index 712cf2d1f28e..3a60a15d1b4a 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
| @@ -181,7 +181,7 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr, | |||
| 181 | unsigned int send_size, recv_size; | 181 | unsigned int send_size, recv_size; |
| 182 | int ret; | 182 | int ret; |
| 183 | 183 | ||
| 184 | /* The offset of 1 is to accomodate the additional ACK WR. */ | 184 | /* The offset of 1 is to accommodate the additional ACK WR. */ |
| 185 | send_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_send_wr + 1); | 185 | send_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_send_wr + 1); |
| 186 | recv_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_recv_wr + 1); | 186 | recv_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_recv_wr + 1); |
| 187 | rds_iw_ring_resize(send_ring, send_size - 1); | 187 | rds_iw_ring_resize(send_ring, send_size - 1); |
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index 59509e9a9e72..6deaa77495e3 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c | |||
| @@ -122,7 +122,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd | |||
| 122 | #else | 122 | #else |
| 123 | /* FIXME - needs to compare the local and remote | 123 | /* FIXME - needs to compare the local and remote |
| 124 | * ipaddr/port tuple, but the ipaddr is the only | 124 | * ipaddr/port tuple, but the ipaddr is the only |
| 125 | * available infomation in the rds_sock (as the rest are | 125 | * available information in the rds_sock (as the rest are |
| 126 | * zero'ed. It doesn't appear to be properly populated | 126 | * zero'ed. It doesn't appear to be properly populated |
| 127 | * during connection setup... | 127 | * during connection setup... |
| 128 | */ | 128 | */ |
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 6280ea020d4e..545d8ee3efb1 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c | |||
| @@ -307,7 +307,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
| 307 | * | 307 | * |
| 308 | * Conceptually, we have two counters: | 308 | * Conceptually, we have two counters: |
| 309 | * - send credits: this tells us how many WRs we're allowed | 309 | * - send credits: this tells us how many WRs we're allowed |
| 310 | * to submit without overruning the reciever's queue. For | 310 | * to submit without overruning the receiver's queue. For |
| 311 | * each SEND WR we post, we decrement this by one. | 311 | * each SEND WR we post, we decrement this by one. |
| 312 | * | 312 | * |
| 313 | * - posted credits: this tells us how many WRs we recently | 313 | * - posted credits: this tells us how many WRs we recently |
diff --git a/net/rds/send.c b/net/rds/send.c index 35b9c2e9caf1..d58ae5f9339e 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
| @@ -116,7 +116,7 @@ static void release_in_xmit(struct rds_connection *conn) | |||
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | /* | 118 | /* |
| 119 | * We're making the concious trade-off here to only send one message | 119 | * We're making the conscious trade-off here to only send one message |
| 120 | * down the connection at a time. | 120 | * down the connection at a time. |
| 121 | * Pro: | 121 | * Pro: |
| 122 | * - tx queueing is a simple fifo list | 122 | * - tx queueing is a simple fifo list |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 08dcd2f29cdc..479cae57d187 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
| @@ -587,7 +587,7 @@ static int rose_clear_routes(void) | |||
| 587 | 587 | ||
| 588 | /* | 588 | /* |
| 589 | * Check that the device given is a valid AX.25 interface that is "up". | 589 | * Check that the device given is a valid AX.25 interface that is "up". |
| 590 | * called whith RTNL | 590 | * called with RTNL |
| 591 | */ | 591 | */ |
| 592 | static struct net_device *rose_ax25_dev_find(char *devname) | 592 | static struct net_device *rose_ax25_dev_find(char *devname) |
| 593 | { | 593 | { |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 15873e14cb54..14b42f4ad791 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
| @@ -999,7 +999,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
| 999 | switch (n->nlmsg_type) { | 999 | switch (n->nlmsg_type) { |
| 1000 | case RTM_NEWACTION: | 1000 | case RTM_NEWACTION: |
| 1001 | /* we are going to assume all other flags | 1001 | /* we are going to assume all other flags |
| 1002 | * imply create only if it doesnt exist | 1002 | * imply create only if it doesn't exist |
| 1003 | * Note that CREATE | EXCL implies that | 1003 | * Note that CREATE | EXCL implies that |
| 1004 | * but since we want avoid ambiguity (eg when flags | 1004 | * but since we want avoid ambiguity (eg when flags |
| 1005 | * is zero) then just set this | 1005 | * is zero) then just set this |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 50c7c06c019d..7affe9a92757 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
| @@ -161,7 +161,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
| 161 | } | 161 | } |
| 162 | if (offset > 0 && offset > skb->len) { | 162 | if (offset > 0 && offset > skb->len) { |
| 163 | pr_info("tc filter pedit" | 163 | pr_info("tc filter pedit" |
| 164 | " offset %d cant exceed pkt length %d\n", | 164 | " offset %d can't exceed pkt length %d\n", |
| 165 | offset, skb->len); | 165 | offset, skb->len); |
| 166 | goto bad; | 166 | goto bad; |
| 167 | } | 167 | } |
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index a4de67eca824..49130e8abff0 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | * on the meta type. Obviously, the length of the data must also | 47 | * on the meta type. Obviously, the length of the data must also |
| 48 | * be provided for non-numeric types. | 48 | * be provided for non-numeric types. |
| 49 | * | 49 | * |
| 50 | * Additionaly, type dependant modifiers such as shift operators | 50 | * Additionally, type dependent modifiers such as shift operators |
| 51 | * or mask may be applied to extend the functionaliy. As of now, | 51 | * or mask may be applied to extend the functionaliy. As of now, |
| 52 | * the variable length type supports shifting the byte string to | 52 | * the variable length type supports shifting the byte string to |
| 53 | * the right, eating up any number of octets and thus supporting | 53 | * the right, eating up any number of octets and thus supporting |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index e1429a85091f..29b942ce9e82 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
| @@ -183,7 +183,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) | |||
| 183 | * filters in qdisc and in inner nodes (if higher filter points to the inner | 183 | * filters in qdisc and in inner nodes (if higher filter points to the inner |
| 184 | * node). If we end up with classid MAJOR:0 we enqueue the skb into special | 184 | * node). If we end up with classid MAJOR:0 we enqueue the skb into special |
| 185 | * internal fifo (direct). These packets then go directly thru. If we still | 185 | * internal fifo (direct). These packets then go directly thru. If we still |
| 186 | * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull | 186 | * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful |
| 187 | * then finish and return direct queue. | 187 | * then finish and return direct queue. |
| 188 | */ | 188 | */ |
| 189 | #define HTB_DIRECT ((struct htb_class *)-1L) | 189 | #define HTB_DIRECT ((struct htb_class *)-1L) |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index edbbf7ad6623..69c35f6cd13f 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
| @@ -160,7 +160,7 @@ static bool loss_4state(struct netem_sched_data *q) | |||
| 160 | u32 rnd = net_random(); | 160 | u32 rnd = net_random(); |
| 161 | 161 | ||
| 162 | /* | 162 | /* |
| 163 | * Makes a comparision between rnd and the transition | 163 | * Makes a comparison between rnd and the transition |
| 164 | * probabilities outgoing from the current state, then decides the | 164 | * probabilities outgoing from the current state, then decides the |
| 165 | * next state and if the next packet has to be transmitted or lost. | 165 | * next state and if the next packet has to be transmitted or lost. |
| 166 | * The four states correspond to: | 166 | * The four states correspond to: |
| @@ -212,9 +212,9 @@ static bool loss_4state(struct netem_sched_data *q) | |||
| 212 | * Generates losses according to the Gilbert-Elliot loss model or | 212 | * Generates losses according to the Gilbert-Elliot loss model or |
| 213 | * its special cases (Gilbert or Simple Gilbert) | 213 | * its special cases (Gilbert or Simple Gilbert) |
| 214 | * | 214 | * |
| 215 | * Makes a comparision between random number and the transition | 215 | * Makes a comparison between random number and the transition |
| 216 | * probabilities outgoing from the current state, then decides the | 216 | * probabilities outgoing from the current state, then decides the |
| 217 | * next state. A second random number is extracted and the comparision | 217 | * next state. A second random number is extracted and the comparison |
| 218 | * with the loss probability of the current state decides if the next | 218 | * with the loss probability of the current state decides if the next |
| 219 | * packet will be transmitted or lost. | 219 | * packet will be transmitted or lost. |
| 220 | */ | 220 | */ |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 6b04287913cd..0698cad61763 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -1593,7 +1593,7 @@ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc) | |||
| 1593 | struct sctp_chunk *ack; | 1593 | struct sctp_chunk *ack; |
| 1594 | struct sctp_chunk *tmp; | 1594 | struct sctp_chunk *tmp; |
| 1595 | 1595 | ||
| 1596 | /* We can remove all the entries from the queue upto | 1596 | /* We can remove all the entries from the queue up to |
| 1597 | * the "Peer-Sequence-Number". | 1597 | * the "Peer-Sequence-Number". |
| 1598 | */ | 1598 | */ |
| 1599 | list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, | 1599 | list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index ddbbf7c81fa1..865e68fef21c 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
| @@ -113,7 +113,7 @@ struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp) | |||
| 113 | return new; | 113 | return new; |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | /* Free the shared key stucture */ | 116 | /* Free the shared key structure */ |
| 117 | static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) | 117 | static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) |
| 118 | { | 118 | { |
| 119 | BUG_ON(!list_empty(&sh_key->key_list)); | 119 | BUG_ON(!list_empty(&sh_key->key_list)); |
| @@ -122,7 +122,7 @@ static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) | |||
| 122 | kfree(sh_key); | 122 | kfree(sh_key); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | /* Destory the entire key list. This is done during the | 125 | /* Destroy the entire key list. This is done during the |
| 126 | * associon and endpoint free process. | 126 | * associon and endpoint free process. |
| 127 | */ | 127 | */ |
| 128 | void sctp_auth_destroy_keys(struct list_head *keys) | 128 | void sctp_auth_destroy_keys(struct list_head *keys) |
| @@ -324,7 +324,7 @@ static struct sctp_auth_bytes *sctp_auth_asoc_create_secret( | |||
| 324 | if (!peer_key_vector || !local_key_vector) | 324 | if (!peer_key_vector || !local_key_vector) |
| 325 | goto out; | 325 | goto out; |
| 326 | 326 | ||
| 327 | /* Figure out the order in wich the key_vectors will be | 327 | /* Figure out the order in which the key_vectors will be |
| 328 | * added to the endpoint shared key. | 328 | * added to the endpoint shared key. |
| 329 | * SCTP-AUTH, Section 6.1: | 329 | * SCTP-AUTH, Section 6.1: |
| 330 | * This is performed by selecting the numerically smaller key | 330 | * This is performed by selecting the numerically smaller key |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 826661be73e7..5436c6921167 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
| @@ -1034,7 +1034,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup( | |||
| 1034 | * association. | 1034 | * association. |
| 1035 | * | 1035 | * |
| 1036 | * This means that any chunks that can help us identify the association need | 1036 | * This means that any chunks that can help us identify the association need |
| 1037 | * to be looked at to find this assocation. | 1037 | * to be looked at to find this association. |
| 1038 | */ | 1038 | */ |
| 1039 | static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb, | 1039 | static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb, |
| 1040 | const union sctp_addr *laddr, | 1040 | const union sctp_addr *laddr, |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 60600d337a3a..b4f3cf06d8da 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -510,7 +510,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
| 510 | sh->checksum = sctp_end_cksum(crc32); | 510 | sh->checksum = sctp_end_cksum(crc32); |
| 511 | } else { | 511 | } else { |
| 512 | if (dst->dev->features & NETIF_F_SCTP_CSUM) { | 512 | if (dst->dev->features & NETIF_F_SCTP_CSUM) { |
| 513 | /* no need to seed psuedo checksum for SCTP */ | 513 | /* no need to seed pseudo checksum for SCTP */ |
| 514 | nskb->ip_summed = CHECKSUM_PARTIAL; | 514 | nskb->ip_summed = CHECKSUM_PARTIAL; |
| 515 | nskb->csum_start = (skb_transport_header(nskb) - | 515 | nskb->csum_start = (skb_transport_header(nskb) - |
| 516 | nskb->head); | 516 | nskb->head); |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 26dc005113a0..bf92a5b68f8b 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
| @@ -177,13 +177,13 @@ static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) | |||
| 177 | * 3) If the missing report count for TSN t is to be | 177 | * 3) If the missing report count for TSN t is to be |
| 178 | * incremented according to [RFC2960] and | 178 | * incremented according to [RFC2960] and |
| 179 | * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, | 179 | * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, |
| 180 | * then the sender MUST futher execute steps 3.1 and | 180 | * then the sender MUST further execute steps 3.1 and |
| 181 | * 3.2 to determine if the missing report count for | 181 | * 3.2 to determine if the missing report count for |
| 182 | * TSN t SHOULD NOT be incremented. | 182 | * TSN t SHOULD NOT be incremented. |
| 183 | * | 183 | * |
| 184 | * 3.3) If 3.1 and 3.2 do not dictate that the missing | 184 | * 3.3) If 3.1 and 3.2 do not dictate that the missing |
| 185 | * report count for t should not be incremented, then | 185 | * report count for t should not be incremented, then |
| 186 | * the sender SOULD increment missing report count for | 186 | * the sender SHOULD increment missing report count for |
| 187 | * t (according to [RFC2960] and [SCTP_STEWART_2002]). | 187 | * t (according to [RFC2960] and [SCTP_STEWART_2002]). |
| 188 | */ | 188 | */ |
| 189 | static inline int sctp_cacc_skip(struct sctp_transport *primary, | 189 | static inline int sctp_cacc_skip(struct sctp_transport *primary, |
| @@ -843,7 +843,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
| 843 | case SCTP_CID_ECN_CWR: | 843 | case SCTP_CID_ECN_CWR: |
| 844 | case SCTP_CID_ASCONF_ACK: | 844 | case SCTP_CID_ASCONF_ACK: |
| 845 | one_packet = 1; | 845 | one_packet = 1; |
| 846 | /* Fall throught */ | 846 | /* Fall through */ |
| 847 | 847 | ||
| 848 | case SCTP_CID_SACK: | 848 | case SCTP_CID_SACK: |
| 849 | case SCTP_CID_HEARTBEAT: | 849 | case SCTP_CID_HEARTBEAT: |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index b21b218d564f..5f86ee4b54c1 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
| @@ -482,7 +482,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, | |||
| 482 | * If the timer was a heartbeat, we only increment error counts | 482 | * If the timer was a heartbeat, we only increment error counts |
| 483 | * when we already have an outstanding HEARTBEAT that has not | 483 | * when we already have an outstanding HEARTBEAT that has not |
| 484 | * been acknowledged. | 484 | * been acknowledged. |
| 485 | * Additionaly, some tranport states inhibit error increments. | 485 | * Additionally, some tranport states inhibit error increments. |
| 486 | */ | 486 | */ |
| 487 | if (!is_hb) { | 487 | if (!is_hb) { |
| 488 | asoc->overall_error_count++; | 488 | asoc->overall_error_count++; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 4b4eb7c96bbd..76792083c379 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -551,7 +551,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, | |||
| 551 | * | 551 | * |
| 552 | * This means that if we only want to abort associations | 552 | * This means that if we only want to abort associations |
| 553 | * in an authenticated way (i.e AUTH+ABORT), then we | 553 | * in an authenticated way (i.e AUTH+ABORT), then we |
| 554 | * can't destroy this association just becuase the packet | 554 | * can't destroy this association just because the packet |
| 555 | * was malformed. | 555 | * was malformed. |
| 556 | */ | 556 | */ |
| 557 | if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) | 557 | if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) |
| @@ -1546,7 +1546,7 @@ cleanup: | |||
| 1546 | } | 1546 | } |
| 1547 | 1547 | ||
| 1548 | /* | 1548 | /* |
| 1549 | * Handle simultanous INIT. | 1549 | * Handle simultaneous INIT. |
| 1550 | * This means we started an INIT and then we got an INIT request from | 1550 | * This means we started an INIT and then we got an INIT request from |
| 1551 | * our peer. | 1551 | * our peer. |
| 1552 | * | 1552 | * |
| @@ -2079,7 +2079,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort( | |||
| 2079 | * RFC 2960, Section 3.3.7 | 2079 | * RFC 2960, Section 3.3.7 |
| 2080 | * If an endpoint receives an ABORT with a format error or for an | 2080 | * If an endpoint receives an ABORT with a format error or for an |
| 2081 | * association that doesn't exist, it MUST silently discard it. | 2081 | * association that doesn't exist, it MUST silently discard it. |
| 2082 | * Becasue the length is "invalid", we can't really discard just | 2082 | * Because the length is "invalid", we can't really discard just |
| 2083 | * as we do not know its true length. So, to be safe, discard the | 2083 | * as we do not know its true length. So, to be safe, discard the |
| 2084 | * packet. | 2084 | * packet. |
| 2085 | */ | 2085 | */ |
| @@ -2120,7 +2120,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep, | |||
| 2120 | * RFC 2960, Section 3.3.7 | 2120 | * RFC 2960, Section 3.3.7 |
| 2121 | * If an endpoint receives an ABORT with a format error or for an | 2121 | * If an endpoint receives an ABORT with a format error or for an |
| 2122 | * association that doesn't exist, it MUST silently discard it. | 2122 | * association that doesn't exist, it MUST silently discard it. |
| 2123 | * Becasue the length is "invalid", we can't really discard just | 2123 | * Because the length is "invalid", we can't really discard just |
| 2124 | * as we do not know its true length. So, to be safe, discard the | 2124 | * as we do not know its true length. So, to be safe, discard the |
| 2125 | * packet. | 2125 | * packet. |
| 2126 | */ | 2126 | */ |
| @@ -2381,7 +2381,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, | |||
| 2381 | * RFC 2960, Section 3.3.7 | 2381 | * RFC 2960, Section 3.3.7 |
| 2382 | * If an endpoint receives an ABORT with a format error or for an | 2382 | * If an endpoint receives an ABORT with a format error or for an |
| 2383 | * association that doesn't exist, it MUST silently discard it. | 2383 | * association that doesn't exist, it MUST silently discard it. |
| 2384 | * Becasue the length is "invalid", we can't really discard just | 2384 | * Because the length is "invalid", we can't really discard just |
| 2385 | * as we do not know its true length. So, to be safe, discard the | 2385 | * as we do not know its true length. So, to be safe, discard the |
| 2386 | * packet. | 2386 | * packet. |
| 2387 | */ | 2387 | */ |
| @@ -2448,7 +2448,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep, | |||
| 2448 | * RFC 2960, Section 3.3.7 | 2448 | * RFC 2960, Section 3.3.7 |
| 2449 | * If an endpoint receives an ABORT with a format error or for an | 2449 | * If an endpoint receives an ABORT with a format error or for an |
| 2450 | * association that doesn't exist, it MUST silently discard it. | 2450 | * association that doesn't exist, it MUST silently discard it. |
| 2451 | * Becasue the length is "invalid", we can't really discard just | 2451 | * Because the length is "invalid", we can't really discard just |
| 2452 | * as we do not know its true length. So, to be safe, discard the | 2452 | * as we do not know its true length. So, to be safe, discard the |
| 2453 | * packet. | 2453 | * packet. |
| 2454 | */ | 2454 | */ |
| @@ -3855,7 +3855,7 @@ gen_shutdown: | |||
| 3855 | } | 3855 | } |
| 3856 | 3856 | ||
| 3857 | /* | 3857 | /* |
| 3858 | * SCTP-AUTH Section 6.3 Receving authenticated chukns | 3858 | * SCTP-AUTH Section 6.3 Receiving authenticated chukns |
| 3859 | * | 3859 | * |
| 3860 | * The receiver MUST use the HMAC algorithm indicated in the HMAC | 3860 | * The receiver MUST use the HMAC algorithm indicated in the HMAC |
| 3861 | * Identifier field. If this algorithm was not specified by the | 3861 | * Identifier field. If this algorithm was not specified by the |
| @@ -4231,7 +4231,7 @@ static sctp_disposition_t sctp_sf_abort_violation( | |||
| 4231 | * | 4231 | * |
| 4232 | * This means that if we only want to abort associations | 4232 | * This means that if we only want to abort associations |
| 4233 | * in an authenticated way (i.e AUTH+ABORT), then we | 4233 | * in an authenticated way (i.e AUTH+ABORT), then we |
| 4234 | * can't destroy this association just becuase the packet | 4234 | * can't destroy this association just because the packet |
| 4235 | * was malformed. | 4235 | * was malformed. |
| 4236 | */ | 4236 | */ |
| 4237 | if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) | 4237 | if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) |
| @@ -4402,9 +4402,9 @@ static sctp_disposition_t sctp_sf_violation_ctsn( | |||
| 4402 | } | 4402 | } |
| 4403 | 4403 | ||
| 4404 | /* Handle protocol violation of an invalid chunk bundling. For example, | 4404 | /* Handle protocol violation of an invalid chunk bundling. For example, |
| 4405 | * when we have an association and we recieve bundled INIT-ACK, or | 4405 | * when we have an association and we receive bundled INIT-ACK, or |
| 4406 | * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle" | 4406 | * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle" |
| 4407 | * statement from the specs. Additinally, there might be an attacker | 4407 | * statement from the specs. Additionally, there might be an attacker |
| 4408 | * on the path and we may not want to continue this communication. | 4408 | * on the path and we may not want to continue this communication. |
| 4409 | */ | 4409 | */ |
| 4410 | static sctp_disposition_t sctp_sf_violation_chunk( | 4410 | static sctp_disposition_t sctp_sf_violation_chunk( |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 3951a10605bc..deb82e35a107 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -1193,7 +1193,7 @@ out_free: | |||
| 1193 | * an endpoint that is multi-homed. Much like sctp_bindx() this call | 1193 | * an endpoint that is multi-homed. Much like sctp_bindx() this call |
| 1194 | * allows a caller to specify multiple addresses at which a peer can be | 1194 | * allows a caller to specify multiple addresses at which a peer can be |
| 1195 | * reached. The way the SCTP stack uses the list of addresses to set up | 1195 | * reached. The way the SCTP stack uses the list of addresses to set up |
| 1196 | * the association is implementation dependant. This function only | 1196 | * the association is implementation dependent. This function only |
| 1197 | * specifies that the stack will try to make use of all the addresses in | 1197 | * specifies that the stack will try to make use of all the addresses in |
| 1198 | * the list when needed. | 1198 | * the list when needed. |
| 1199 | * | 1199 | * |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index aa72e89c3ee1..dff27d5e22fd 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
| @@ -554,7 +554,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed( | |||
| 554 | memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); | 554 | memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); |
| 555 | 555 | ||
| 556 | /* Per TSVWG discussion with Randy. Allow the application to | 556 | /* Per TSVWG discussion with Randy. Allow the application to |
| 557 | * ressemble a fragmented message. | 557 | * resemble a fragmented message. |
| 558 | */ | 558 | */ |
| 559 | ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; | 559 | ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; |
| 560 | 560 | ||
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 17678189d054..f2d1de7f2ffb 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
| @@ -240,7 +240,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | |||
| 240 | } else { | 240 | } else { |
| 241 | /* | 241 | /* |
| 242 | * If fragment interleave is enabled, we | 242 | * If fragment interleave is enabled, we |
| 243 | * can queue this to the recieve queue instead | 243 | * can queue this to the receive queue instead |
| 244 | * of the lobby. | 244 | * of the lobby. |
| 245 | */ | 245 | */ |
| 246 | if (sctp_sk(sk)->frag_interleave) | 246 | if (sctp_sk(sk)->frag_interleave) |
diff --git a/net/socket.c b/net/socket.c index 5212447c86e7..310d16b1b3c9 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -2986,7 +2986,7 @@ out: | |||
| 2986 | 2986 | ||
| 2987 | /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE | 2987 | /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE |
| 2988 | * for some operations; this forces use of the newer bridge-utils that | 2988 | * for some operations; this forces use of the newer bridge-utils that |
| 2989 | * use compatiable ioctls | 2989 | * use compatible ioctls |
| 2990 | */ | 2990 | */ |
| 2991 | static int old_bridge_ioctl(compat_ulong_t __user *argp) | 2991 | static int old_bridge_ioctl(compat_ulong_t __user *argp) |
| 2992 | { | 2992 | { |
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 9022f0a6503e..0a9a2ec2e469 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
| @@ -427,7 +427,7 @@ static int | |||
| 427 | context_derive_keys_rc4(struct krb5_ctx *ctx) | 427 | context_derive_keys_rc4(struct krb5_ctx *ctx) |
| 428 | { | 428 | { |
| 429 | struct crypto_hash *hmac; | 429 | struct crypto_hash *hmac; |
| 430 | static const char sigkeyconstant[] = "signaturekey"; | 430 | char sigkeyconstant[] = "signaturekey"; |
| 431 | int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ | 431 | int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ |
| 432 | struct hash_desc desc; | 432 | struct hash_desc desc; |
| 433 | struct scatterlist sg[1]; | 433 | struct scatterlist sg[1]; |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index bcdae78fdfc6..8d0f7d3c71c8 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
| @@ -1101,7 +1101,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
| 1101 | 1101 | ||
| 1102 | /* credential is: | 1102 | /* credential is: |
| 1103 | * version(==1), proc(0,1,2,3), seq, service (1,2,3), handle | 1103 | * version(==1), proc(0,1,2,3), seq, service (1,2,3), handle |
| 1104 | * at least 5 u32s, and is preceeded by length, so that makes 6. | 1104 | * at least 5 u32s, and is preceded by length, so that makes 6. |
| 1105 | */ | 1105 | */ |
| 1106 | 1106 | ||
| 1107 | if (argv->iov_len < 5 * 4) | 1107 | if (argv->iov_len < 5 * 4) |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 1e336a06d3e6..bf005d3c65ef 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -504,7 +504,7 @@ static int xs_nospace(struct rpc_task *task) | |||
| 504 | * EAGAIN: The socket was blocked, please call again later to | 504 | * EAGAIN: The socket was blocked, please call again later to |
| 505 | * complete the request | 505 | * complete the request |
| 506 | * ENOTCONN: Caller needs to invoke connect logic then call again | 506 | * ENOTCONN: Caller needs to invoke connect logic then call again |
| 507 | * other: Some other error occured, the request was not sent | 507 | * other: Some other error occurred, the request was not sent |
| 508 | */ | 508 | */ |
| 509 | static int xs_udp_send_request(struct rpc_task *task) | 509 | static int xs_udp_send_request(struct rpc_task *task) |
| 510 | { | 510 | { |
| @@ -590,7 +590,7 @@ static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) | |||
| 590 | * EAGAIN: The socket was blocked, please call again later to | 590 | * EAGAIN: The socket was blocked, please call again later to |
| 591 | * complete the request | 591 | * complete the request |
| 592 | * ENOTCONN: Caller needs to invoke connect logic then call again | 592 | * ENOTCONN: Caller needs to invoke connect logic then call again |
| 593 | * other: Some other error occured, the request was not sent | 593 | * other: Some other error occurred, the request was not sent |
| 594 | * | 594 | * |
| 595 | * XXX: In the case of soft timeouts, should we eventually give up | 595 | * XXX: In the case of soft timeouts, should we eventually give up |
| 596 | * if sendmsg is not able to make progress? | 596 | * if sendmsg is not able to make progress? |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 43639ff1cbec..ebf338f7b14e 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
| @@ -2471,7 +2471,7 @@ exit: | |||
| 2471 | * A pending message being re-assembled must store certain values | 2471 | * A pending message being re-assembled must store certain values |
| 2472 | * to handle subsequent fragments correctly. The following functions | 2472 | * to handle subsequent fragments correctly. The following functions |
| 2473 | * help storing these values in unused, available fields in the | 2473 | * help storing these values in unused, available fields in the |
| 2474 | * pending message. This makes dynamic memory allocation unecessary. | 2474 | * pending message. This makes dynamic memory allocation unnecessary. |
| 2475 | */ | 2475 | */ |
| 2476 | 2476 | ||
| 2477 | static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno) | 2477 | static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno) |
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index c9fa6dfcf287..80025a1b3bfd 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c | |||
| @@ -160,7 +160,7 @@ void tipc_named_withdraw(struct publication *publ) | |||
| 160 | 160 | ||
| 161 | buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); | 161 | buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); |
| 162 | if (!buf) { | 162 | if (!buf) { |
| 163 | warn("Withdrawl distribution failure\n"); | 163 | warn("Withdrawal distribution failure\n"); |
| 164 | return; | 164 | return; |
| 165 | } | 165 | } |
| 166 | 166 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 1663e1a2efdd..3a43a8304768 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -207,7 +207,7 @@ static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp) | |||
| 207 | /* | 207 | /* |
| 208 | * This may look like an off by one error but it is a bit more | 208 | * This may look like an off by one error but it is a bit more |
| 209 | * subtle. 108 is the longest valid AF_UNIX path for a binding. | 209 | * subtle. 108 is the longest valid AF_UNIX path for a binding. |
| 210 | * sun_path[108] doesnt as such exist. However in kernel space | 210 | * sun_path[108] doesn't as such exist. However in kernel space |
| 211 | * we are guaranteed that it is a valid memory location in our | 211 | * we are guaranteed that it is a valid memory location in our |
| 212 | * kernel address buffer. | 212 | * kernel address buffer. |
| 213 | */ | 213 | */ |
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c index 11f25c7a7a05..f346395314ba 100644 --- a/net/wanrouter/wanproc.c +++ b/net/wanrouter/wanproc.c | |||
| @@ -51,7 +51,7 @@ | |||
| 51 | 51 | ||
| 52 | /* | 52 | /* |
| 53 | * Structures for interfacing with the /proc filesystem. | 53 | * Structures for interfacing with the /proc filesystem. |
| 54 | * Router creates its own directory /proc/net/router with the folowing | 54 | * Router creates its own directory /proc/net/router with the following |
| 55 | * entries: | 55 | * entries: |
| 56 | * config device configuration | 56 | * config device configuration |
| 57 | * status global device statistics | 57 | * status global device statistics |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 3332d5bce317..ab801a1097b2 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
| @@ -809,7 +809,7 @@ static void handle_channel(struct wiphy *wiphy, | |||
| 809 | if (r) { | 809 | if (r) { |
| 810 | /* | 810 | /* |
| 811 | * We will disable all channels that do not match our | 811 | * We will disable all channels that do not match our |
| 812 | * recieved regulatory rule unless the hint is coming | 812 | * received regulatory rule unless the hint is coming |
| 813 | * from a Country IE and the Country IE had no information | 813 | * from a Country IE and the Country IE had no information |
| 814 | * about a band. The IEEE 802.11 spec allows for an AP | 814 | * about a band. The IEEE 802.11 spec allows for an AP |
| 815 | * to send only a subset of the regulatory rules allowed, | 815 | * to send only a subset of the regulatory rules allowed, |
| @@ -838,7 +838,7 @@ static void handle_channel(struct wiphy *wiphy, | |||
| 838 | request_wiphy && request_wiphy == wiphy && | 838 | request_wiphy && request_wiphy == wiphy && |
| 839 | request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { | 839 | request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { |
| 840 | /* | 840 | /* |
| 841 | * This gaurantees the driver's requested regulatory domain | 841 | * This guarantees the driver's requested regulatory domain |
| 842 | * will always be used as a base for further regulatory | 842 | * will always be used as a base for further regulatory |
| 843 | * settings | 843 | * settings |
| 844 | */ | 844 | */ |
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 406207515b5e..f77e4e75f914 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | * x25_parse_facilities - Parse facilities from skb into the facilities structs | 31 | * x25_parse_facilities - Parse facilities from skb into the facilities structs |
| 32 | * | 32 | * |
| 33 | * @skb: sk_buff to parse | 33 | * @skb: sk_buff to parse |
| 34 | * @facilities: Regular facilites, updated as facilities are found | 34 | * @facilities: Regular facilities, updated as facilities are found |
| 35 | * @dte_facs: ITU DTE facilities, updated as DTE facilities are found | 35 | * @dte_facs: ITU DTE facilities, updated as DTE facilities are found |
| 36 | * @vc_fac_mask: mask is updated with all facilities found | 36 | * @vc_fac_mask: mask is updated with all facilities found |
| 37 | * | 37 | * |
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c index 25a810793968..c541b622ae16 100644 --- a/net/x25/x25_forward.c +++ b/net/x25/x25_forward.c | |||
| @@ -31,7 +31,7 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from, | |||
| 31 | goto out_no_route; | 31 | goto out_no_route; |
| 32 | 32 | ||
| 33 | if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) { | 33 | if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) { |
| 34 | /* This shouldnt happen, if it occurs somehow | 34 | /* This shouldn't happen, if it occurs somehow |
| 35 | * do something sensible | 35 | * do something sensible |
| 36 | */ | 36 | */ |
| 37 | goto out_put_route; | 37 | goto out_put_route; |
| @@ -45,7 +45,7 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from, | |||
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | /* Remote end sending a call request on an already | 47 | /* Remote end sending a call request on an already |
| 48 | * established LCI? It shouldnt happen, just in case.. | 48 | * established LCI? It shouldn't happen, just in case.. |
| 49 | */ | 49 | */ |
| 50 | read_lock_bh(&x25_forward_list_lock); | 50 | read_lock_bh(&x25_forward_list_lock); |
| 51 | list_for_each(entry, &x25_forward_list) { | 51 | list_for_each(entry, &x25_forward_list) { |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 3d15d3e1b2c4..5d1d60d3ca83 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
| @@ -894,7 +894,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net, | |||
| 894 | u32 *f; | 894 | u32 *f; |
| 895 | 895 | ||
| 896 | nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); | 896 | nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); |
| 897 | if (nlh == NULL) /* shouldnt really happen ... */ | 897 | if (nlh == NULL) /* shouldn't really happen ... */ |
| 898 | return -EMSGSIZE; | 898 | return -EMSGSIZE; |
| 899 | 899 | ||
| 900 | f = nlmsg_data(nlh); | 900 | f = nlmsg_data(nlh); |
| @@ -954,7 +954,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net, | |||
| 954 | u32 *f; | 954 | u32 *f; |
| 955 | 955 | ||
| 956 | nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); | 956 | nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); |
| 957 | if (nlh == NULL) /* shouldnt really happen ... */ | 957 | if (nlh == NULL) /* shouldn't really happen ... */ |
| 958 | return -EMSGSIZE; | 958 | return -EMSGSIZE; |
| 959 | 959 | ||
| 960 | f = nlmsg_data(nlh); | 960 | f = nlmsg_data(nlh); |
| @@ -1361,7 +1361,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
| 1361 | if (!xp) | 1361 | if (!xp) |
| 1362 | return err; | 1362 | return err; |
| 1363 | 1363 | ||
| 1364 | /* shouldnt excl be based on nlh flags?? | 1364 | /* shouldn't excl be based on nlh flags?? |
| 1365 | * Aha! this is anti-netlink really i.e more pfkey derived | 1365 | * Aha! this is anti-netlink really i.e more pfkey derived |
| 1366 | * in netlink excl is a flag and you wouldnt need | 1366 | * in netlink excl is a flag and you wouldnt need |
| 1367 | * a type XFRM_MSG_UPDPOLICY - JHS */ | 1367 | * a type XFRM_MSG_UPDPOLICY - JHS */ |
