diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/802/tr.c | 22 | ||||
| -rw-r--r-- | net/ax25/af_ax25.c | 27 | ||||
| -rw-r--r-- | net/ax25/ax25_route.c | 12 | ||||
| -rw-r--r-- | net/ax25/ax25_uid.c | 83 | ||||
| -rw-r--r-- | net/decnet/dn_neigh.c | 2 | ||||
| -rw-r--r-- | net/ipv4/fib_trie.c | 14 | ||||
| -rw-r--r-- | net/ipv4/icmp.c | 12 | ||||
| -rw-r--r-- | net/ipv4/inetpeer.c | 11 | ||||
| -rw-r--r-- | net/ipv4/ipcomp.c | 2 | ||||
| -rw-r--r-- | net/ipv4/netfilter/ip_queue.c | 7 | ||||
| -rw-r--r-- | net/ipv4/netfilter/ipt_ECN.c | 17 | ||||
| -rw-r--r-- | net/ipv4/netfilter/ipt_TCPMSS.c | 7 | ||||
| -rw-r--r-- | net/ipv4/tcp.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_ipv4.c | 9 | ||||
| -rw-r--r-- | net/ipv4/tcp_output.c | 43 | ||||
| -rw-r--r-- | net/ipv6/ip6_input.c | 9 | ||||
| -rw-r--r-- | net/ipv6/ipcomp6.c | 2 | ||||
| -rw-r--r-- | net/ipv6/netfilter/ip6_queue.c | 7 | ||||
| -rw-r--r-- | net/ipv6/raw.c | 2 | ||||
| -rw-r--r-- | net/ipv6/tcp_ipv6.c | 9 | ||||
| -rw-r--r-- | net/netrom/af_netrom.c | 31 | ||||
| -rw-r--r-- | net/rose/af_rose.c | 27 | ||||
| -rw-r--r-- | net/rose/rose_route.c | 6 | ||||
| -rw-r--r-- | net/sched/sch_generic.c | 1 | ||||
| -rw-r--r-- | net/sctp/proc.c | 1 | ||||
| -rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_crypto.c | 2 |
26 files changed, 197 insertions, 170 deletions
diff --git a/net/802/tr.c b/net/802/tr.c index a755e880f4ba..1bb7dc1b85cd 100644 --- a/net/802/tr.c +++ b/net/802/tr.c | |||
| @@ -251,10 +251,11 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device * | |||
| 251 | unsigned int hash; | 251 | unsigned int hash; |
| 252 | struct rif_cache *entry; | 252 | struct rif_cache *entry; |
| 253 | unsigned char *olddata; | 253 | unsigned char *olddata; |
| 254 | unsigned long flags; | ||
| 254 | static const unsigned char mcast_func_addr[] | 255 | static const unsigned char mcast_func_addr[] |
| 255 | = {0xC0,0x00,0x00,0x04,0x00,0x00}; | 256 | = {0xC0,0x00,0x00,0x04,0x00,0x00}; |
| 256 | 257 | ||
| 257 | spin_lock_bh(&rif_lock); | 258 | spin_lock_irqsave(&rif_lock, flags); |
| 258 | 259 | ||
| 259 | /* | 260 | /* |
| 260 | * Broadcasts are single route as stated in RFC 1042 | 261 | * Broadcasts are single route as stated in RFC 1042 |
| @@ -323,7 +324,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0], | |||
| 323 | else | 324 | else |
| 324 | slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8); | 325 | slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8); |
| 325 | olddata = skb->data; | 326 | olddata = skb->data; |
| 326 | spin_unlock_bh(&rif_lock); | 327 | spin_unlock_irqrestore(&rif_lock, flags); |
| 327 | 328 | ||
| 328 | skb_pull(skb, slack); | 329 | skb_pull(skb, slack); |
| 329 | memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack); | 330 | memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack); |
| @@ -337,10 +338,11 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0], | |||
| 337 | static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) | 338 | static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) |
| 338 | { | 339 | { |
| 339 | unsigned int hash, rii_p = 0; | 340 | unsigned int hash, rii_p = 0; |
| 341 | unsigned long flags; | ||
| 340 | struct rif_cache *entry; | 342 | struct rif_cache *entry; |
| 341 | 343 | ||
| 342 | 344 | ||
| 343 | spin_lock_bh(&rif_lock); | 345 | spin_lock_irqsave(&rif_lock, flags); |
| 344 | 346 | ||
| 345 | /* | 347 | /* |
| 346 | * Firstly see if the entry exists | 348 | * Firstly see if the entry exists |
| @@ -378,7 +380,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", | |||
| 378 | if(!entry) | 380 | if(!entry) |
| 379 | { | 381 | { |
| 380 | printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n"); | 382 | printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n"); |
| 381 | spin_unlock_bh(&rif_lock); | 383 | spin_unlock_irqrestore(&rif_lock, flags); |
| 382 | return; | 384 | return; |
| 383 | } | 385 | } |
| 384 | 386 | ||
| @@ -420,7 +422,7 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", | |||
| 420 | } | 422 | } |
| 421 | entry->last_used=jiffies; | 423 | entry->last_used=jiffies; |
| 422 | } | 424 | } |
| 423 | spin_unlock_bh(&rif_lock); | 425 | spin_unlock_irqrestore(&rif_lock, flags); |
| 424 | } | 426 | } |
| 425 | 427 | ||
| 426 | /* | 428 | /* |
| @@ -430,9 +432,9 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", | |||
| 430 | static void rif_check_expire(unsigned long dummy) | 432 | static void rif_check_expire(unsigned long dummy) |
| 431 | { | 433 | { |
| 432 | int i; | 434 | int i; |
| 433 | unsigned long next_interval = jiffies + sysctl_tr_rif_timeout/2; | 435 | unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2; |
| 434 | 436 | ||
| 435 | spin_lock_bh(&rif_lock); | 437 | spin_lock_irqsave(&rif_lock, flags); |
| 436 | 438 | ||
| 437 | for(i =0; i < RIF_TABLE_SIZE; i++) { | 439 | for(i =0; i < RIF_TABLE_SIZE; i++) { |
| 438 | struct rif_cache *entry, **pentry; | 440 | struct rif_cache *entry, **pentry; |
| @@ -454,7 +456,7 @@ static void rif_check_expire(unsigned long dummy) | |||
| 454 | } | 456 | } |
| 455 | } | 457 | } |
| 456 | 458 | ||
| 457 | spin_unlock_bh(&rif_lock); | 459 | spin_unlock_irqrestore(&rif_lock, flags); |
| 458 | 460 | ||
| 459 | mod_timer(&rif_timer, next_interval); | 461 | mod_timer(&rif_timer, next_interval); |
| 460 | 462 | ||
| @@ -485,7 +487,7 @@ static struct rif_cache *rif_get_idx(loff_t pos) | |||
| 485 | 487 | ||
| 486 | static void *rif_seq_start(struct seq_file *seq, loff_t *pos) | 488 | static void *rif_seq_start(struct seq_file *seq, loff_t *pos) |
| 487 | { | 489 | { |
| 488 | spin_lock_bh(&rif_lock); | 490 | spin_lock_irq(&rif_lock); |
| 489 | 491 | ||
| 490 | return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN; | 492 | return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN; |
| 491 | } | 493 | } |
| @@ -516,7 +518,7 @@ static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 516 | 518 | ||
| 517 | static void rif_seq_stop(struct seq_file *seq, void *v) | 519 | static void rif_seq_stop(struct seq_file *seq, void *v) |
| 518 | { | 520 | { |
| 519 | spin_unlock_bh(&rif_lock); | 521 | spin_unlock_irq(&rif_lock); |
| 520 | } | 522 | } |
| 521 | 523 | ||
| 522 | static int rif_seq_show(struct seq_file *seq, void *v) | 524 | static int rif_seq_show(struct seq_file *seq, void *v) |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 707097deac3d..a5c94f11547c 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
| @@ -875,12 +875,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) | |||
| 875 | sk->sk_sndbuf = osk->sk_sndbuf; | 875 | sk->sk_sndbuf = osk->sk_sndbuf; |
| 876 | sk->sk_state = TCP_ESTABLISHED; | 876 | sk->sk_state = TCP_ESTABLISHED; |
| 877 | sk->sk_sleep = osk->sk_sleep; | 877 | sk->sk_sleep = osk->sk_sleep; |
| 878 | 878 | sock_copy_flags(sk, osk); | |
| 879 | if (sock_flag(osk, SOCK_DBG)) | ||
| 880 | sock_set_flag(sk, SOCK_DBG); | ||
| 881 | |||
| 882 | if (sock_flag(osk, SOCK_ZAPPED)) | ||
| 883 | sock_set_flag(sk, SOCK_ZAPPED); | ||
| 884 | 879 | ||
| 885 | oax25 = ax25_sk(osk); | 880 | oax25 = ax25_sk(osk); |
| 886 | 881 | ||
| @@ -1007,7 +1002,8 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 1007 | struct sock *sk = sock->sk; | 1002 | struct sock *sk = sock->sk; |
| 1008 | struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr; | 1003 | struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr; |
| 1009 | ax25_dev *ax25_dev = NULL; | 1004 | ax25_dev *ax25_dev = NULL; |
| 1010 | ax25_address *call; | 1005 | ax25_uid_assoc *user; |
| 1006 | ax25_address call; | ||
| 1011 | ax25_cb *ax25; | 1007 | ax25_cb *ax25; |
| 1012 | int err = 0; | 1008 | int err = 0; |
| 1013 | 1009 | ||
| @@ -1026,9 +1022,15 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 1026 | if (addr->fsa_ax25.sax25_family != AF_AX25) | 1022 | if (addr->fsa_ax25.sax25_family != AF_AX25) |
| 1027 | return -EINVAL; | 1023 | return -EINVAL; |
| 1028 | 1024 | ||
| 1029 | call = ax25_findbyuid(current->euid); | 1025 | user = ax25_findbyuid(current->euid); |
| 1030 | if (call == NULL && ax25_uid_policy && !capable(CAP_NET_ADMIN)) { | 1026 | if (user) { |
| 1031 | return -EACCES; | 1027 | call = user->call; |
| 1028 | ax25_uid_put(user); | ||
| 1029 | } else { | ||
| 1030 | if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) | ||
| 1031 | return -EACCES; | ||
| 1032 | |||
| 1033 | call = addr->fsa_ax25.sax25_call; | ||
| 1032 | } | 1034 | } |
| 1033 | 1035 | ||
| 1034 | lock_sock(sk); | 1036 | lock_sock(sk); |
| @@ -1039,10 +1041,7 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 1039 | goto out; | 1041 | goto out; |
| 1040 | } | 1042 | } |
| 1041 | 1043 | ||
| 1042 | if (call == NULL) | 1044 | ax25->source_addr = call; |
| 1043 | ax25->source_addr = addr->fsa_ax25.sax25_call; | ||
| 1044 | else | ||
| 1045 | ax25->source_addr = *call; | ||
| 1046 | 1045 | ||
| 1047 | /* | 1046 | /* |
| 1048 | * User already set interface with SO_BINDTODEVICE | 1047 | * User already set interface with SO_BINDTODEVICE |
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index 44b99b1ff9f8..c288526da4ce 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c | |||
| @@ -422,8 +422,8 @@ static inline void ax25_adjust_path(ax25_address *addr, ax25_digi *digipeat) | |||
| 422 | */ | 422 | */ |
| 423 | int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) | 423 | int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) |
| 424 | { | 424 | { |
| 425 | ax25_uid_assoc *user; | ||
| 425 | ax25_route *ax25_rt; | 426 | ax25_route *ax25_rt; |
| 426 | ax25_address *call; | ||
| 427 | int err; | 427 | int err; |
| 428 | 428 | ||
| 429 | if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) | 429 | if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) |
| @@ -434,16 +434,18 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) | |||
| 434 | goto put; | 434 | goto put; |
| 435 | } | 435 | } |
| 436 | 436 | ||
| 437 | if ((call = ax25_findbyuid(current->euid)) == NULL) { | 437 | user = ax25_findbyuid(current->euid); |
| 438 | if (user) { | ||
| 439 | ax25->source_addr = user->call; | ||
| 440 | ax25_uid_put(user); | ||
| 441 | } else { | ||
| 438 | if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { | 442 | if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { |
| 439 | err = -EPERM; | 443 | err = -EPERM; |
| 440 | goto put; | 444 | goto put; |
| 441 | } | 445 | } |
| 442 | call = (ax25_address *)ax25->ax25_dev->dev->dev_addr; | 446 | ax25->source_addr = *(ax25_address *)ax25->ax25_dev->dev->dev_addr; |
| 443 | } | 447 | } |
| 444 | 448 | ||
| 445 | ax25->source_addr = *call; | ||
| 446 | |||
| 447 | if (ax25_rt->digipeat != NULL) { | 449 | if (ax25_rt->digipeat != NULL) { |
| 448 | if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { | 450 | if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { |
| 449 | err = -ENOMEM; | 451 | err = -ENOMEM; |
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c index cea6b7d19729..a8b3822f3ee4 100644 --- a/net/ax25/ax25_uid.c +++ b/net/ax25/ax25_uid.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/fcntl.h> | 28 | #include <linux/fcntl.h> |
| 29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
| 30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
| 31 | #include <linux/list.h> | ||
| 31 | #include <linux/notifier.h> | 32 | #include <linux/notifier.h> |
| 32 | #include <linux/proc_fs.h> | 33 | #include <linux/proc_fs.h> |
| 33 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
| @@ -41,38 +42,41 @@ | |||
| 41 | * Callsign/UID mapper. This is in kernel space for security on multi-amateur machines. | 42 | * Callsign/UID mapper. This is in kernel space for security on multi-amateur machines. |
| 42 | */ | 43 | */ |
| 43 | 44 | ||
| 44 | static ax25_uid_assoc *ax25_uid_list; | 45 | HLIST_HEAD(ax25_uid_list); |
| 45 | static DEFINE_RWLOCK(ax25_uid_lock); | 46 | static DEFINE_RWLOCK(ax25_uid_lock); |
| 46 | 47 | ||
| 47 | int ax25_uid_policy = 0; | 48 | int ax25_uid_policy = 0; |
| 48 | 49 | ||
| 49 | ax25_address *ax25_findbyuid(uid_t uid) | 50 | ax25_uid_assoc *ax25_findbyuid(uid_t uid) |
| 50 | { | 51 | { |
| 51 | ax25_uid_assoc *ax25_uid; | 52 | ax25_uid_assoc *ax25_uid, *res = NULL; |
| 52 | ax25_address *res = NULL; | 53 | struct hlist_node *node; |
| 53 | 54 | ||
| 54 | read_lock(&ax25_uid_lock); | 55 | read_lock(&ax25_uid_lock); |
| 55 | for (ax25_uid = ax25_uid_list; ax25_uid != NULL; ax25_uid = ax25_uid->next) { | 56 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { |
| 56 | if (ax25_uid->uid == uid) { | 57 | if (ax25_uid->uid == uid) { |
| 57 | res = &ax25_uid->call; | 58 | ax25_uid_hold(ax25_uid); |
| 59 | res = ax25_uid; | ||
| 58 | break; | 60 | break; |
| 59 | } | 61 | } |
| 60 | } | 62 | } |
| 61 | read_unlock(&ax25_uid_lock); | 63 | read_unlock(&ax25_uid_lock); |
| 62 | 64 | ||
| 63 | return NULL; | 65 | return res; |
| 64 | } | 66 | } |
| 65 | 67 | ||
| 66 | int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | 68 | int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) |
| 67 | { | 69 | { |
| 68 | ax25_uid_assoc *s, *ax25_uid; | 70 | ax25_uid_assoc *ax25_uid; |
| 71 | struct hlist_node *node; | ||
| 72 | ax25_uid_assoc *user; | ||
| 69 | unsigned long res; | 73 | unsigned long res; |
| 70 | 74 | ||
| 71 | switch (cmd) { | 75 | switch (cmd) { |
| 72 | case SIOCAX25GETUID: | 76 | case SIOCAX25GETUID: |
| 73 | res = -ENOENT; | 77 | res = -ENOENT; |
| 74 | read_lock(&ax25_uid_lock); | 78 | read_lock(&ax25_uid_lock); |
| 75 | for (ax25_uid = ax25_uid_list; ax25_uid != NULL; ax25_uid = ax25_uid->next) { | 79 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { |
| 76 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { | 80 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { |
| 77 | res = ax25_uid->uid; | 81 | res = ax25_uid->uid; |
| 78 | break; | 82 | break; |
| @@ -85,19 +89,22 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
| 85 | case SIOCAX25ADDUID: | 89 | case SIOCAX25ADDUID: |
| 86 | if (!capable(CAP_NET_ADMIN)) | 90 | if (!capable(CAP_NET_ADMIN)) |
| 87 | return -EPERM; | 91 | return -EPERM; |
| 88 | if (ax25_findbyuid(sax->sax25_uid)) | 92 | user = ax25_findbyuid(sax->sax25_uid); |
| 93 | if (user) { | ||
| 94 | ax25_uid_put(user); | ||
| 89 | return -EEXIST; | 95 | return -EEXIST; |
| 96 | } | ||
| 90 | if (sax->sax25_uid == 0) | 97 | if (sax->sax25_uid == 0) |
| 91 | return -EINVAL; | 98 | return -EINVAL; |
| 92 | if ((ax25_uid = kmalloc(sizeof(*ax25_uid), GFP_KERNEL)) == NULL) | 99 | if ((ax25_uid = kmalloc(sizeof(*ax25_uid), GFP_KERNEL)) == NULL) |
| 93 | return -ENOMEM; | 100 | return -ENOMEM; |
| 94 | 101 | ||
| 102 | atomic_set(&ax25_uid->refcount, 1); | ||
| 95 | ax25_uid->uid = sax->sax25_uid; | 103 | ax25_uid->uid = sax->sax25_uid; |
| 96 | ax25_uid->call = sax->sax25_call; | 104 | ax25_uid->call = sax->sax25_call; |
| 97 | 105 | ||
| 98 | write_lock(&ax25_uid_lock); | 106 | write_lock(&ax25_uid_lock); |
| 99 | ax25_uid->next = ax25_uid_list; | 107 | hlist_add_head(&ax25_uid->uid_node, &ax25_uid_list); |
| 100 | ax25_uid_list = ax25_uid; | ||
| 101 | write_unlock(&ax25_uid_lock); | 108 | write_unlock(&ax25_uid_lock); |
| 102 | 109 | ||
| 103 | return 0; | 110 | return 0; |
| @@ -106,34 +113,21 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
| 106 | if (!capable(CAP_NET_ADMIN)) | 113 | if (!capable(CAP_NET_ADMIN)) |
| 107 | return -EPERM; | 114 | return -EPERM; |
| 108 | 115 | ||
| 116 | ax25_uid = NULL; | ||
| 109 | write_lock(&ax25_uid_lock); | 117 | write_lock(&ax25_uid_lock); |
| 110 | for (ax25_uid = ax25_uid_list; ax25_uid != NULL; ax25_uid = ax25_uid->next) { | 118 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { |
| 111 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { | 119 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) |
| 112 | break; | 120 | break; |
| 113 | } | ||
| 114 | } | 121 | } |
| 115 | if (ax25_uid == NULL) { | 122 | if (ax25_uid == NULL) { |
| 116 | write_unlock(&ax25_uid_lock); | 123 | write_unlock(&ax25_uid_lock); |
| 117 | return -ENOENT; | 124 | return -ENOENT; |
| 118 | } | 125 | } |
| 119 | if ((s = ax25_uid_list) == ax25_uid) { | 126 | hlist_del_init(&ax25_uid->uid_node); |
| 120 | ax25_uid_list = s->next; | 127 | ax25_uid_put(ax25_uid); |
| 121 | write_unlock(&ax25_uid_lock); | ||
| 122 | kfree(ax25_uid); | ||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | while (s != NULL && s->next != NULL) { | ||
| 126 | if (s->next == ax25_uid) { | ||
| 127 | s->next = ax25_uid->next; | ||
| 128 | write_unlock(&ax25_uid_lock); | ||
| 129 | kfree(ax25_uid); | ||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | s = s->next; | ||
| 133 | } | ||
| 134 | write_unlock(&ax25_uid_lock); | 128 | write_unlock(&ax25_uid_lock); |
| 135 | 129 | ||
| 136 | return -ENOENT; | 130 | return 0; |
| 137 | 131 | ||
| 138 | default: | 132 | default: |
| 139 | return -EINVAL; | 133 | return -EINVAL; |
| @@ -147,13 +141,11 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
| 147 | static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos) | 141 | static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos) |
| 148 | { | 142 | { |
| 149 | struct ax25_uid_assoc *pt; | 143 | struct ax25_uid_assoc *pt; |
| 150 | int i = 1; | 144 | struct hlist_node *node; |
| 145 | int i = 0; | ||
| 151 | 146 | ||
| 152 | read_lock(&ax25_uid_lock); | 147 | read_lock(&ax25_uid_lock); |
| 153 | if (*pos == 0) | 148 | ax25_uid_for_each(pt, node, &ax25_uid_list) { |
| 154 | return SEQ_START_TOKEN; | ||
| 155 | |||
| 156 | for (pt = ax25_uid_list; pt != NULL; pt = pt->next) { | ||
| 157 | if (i == *pos) | 149 | if (i == *pos) |
| 158 | return pt; | 150 | return pt; |
| 159 | ++i; | 151 | ++i; |
| @@ -164,8 +156,9 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 164 | static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 156 | static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 165 | { | 157 | { |
| 166 | ++*pos; | 158 | ++*pos; |
| 167 | return (v == SEQ_START_TOKEN) ? ax25_uid_list : | 159 | |
| 168 | ((struct ax25_uid_assoc *) v)->next; | 160 | return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next, |
| 161 | ax25_uid_assoc, uid_node); | ||
| 169 | } | 162 | } |
| 170 | 163 | ||
| 171 | static void ax25_uid_seq_stop(struct seq_file *seq, void *v) | 164 | static void ax25_uid_seq_stop(struct seq_file *seq, void *v) |
| @@ -179,7 +172,6 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v) | |||
| 179 | seq_printf(seq, "Policy: %d\n", ax25_uid_policy); | 172 | seq_printf(seq, "Policy: %d\n", ax25_uid_policy); |
| 180 | else { | 173 | else { |
| 181 | struct ax25_uid_assoc *pt = v; | 174 | struct ax25_uid_assoc *pt = v; |
| 182 | |||
| 183 | 175 | ||
| 184 | seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(&pt->call)); | 176 | seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(&pt->call)); |
| 185 | } | 177 | } |
| @@ -213,16 +205,13 @@ struct file_operations ax25_uid_fops = { | |||
| 213 | */ | 205 | */ |
| 214 | void __exit ax25_uid_free(void) | 206 | void __exit ax25_uid_free(void) |
| 215 | { | 207 | { |
| 216 | ax25_uid_assoc *s, *ax25_uid; | 208 | ax25_uid_assoc *ax25_uid; |
| 209 | struct hlist_node *node; | ||
| 217 | 210 | ||
| 218 | write_lock(&ax25_uid_lock); | 211 | write_lock(&ax25_uid_lock); |
| 219 | ax25_uid = ax25_uid_list; | 212 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { |
| 220 | while (ax25_uid != NULL) { | 213 | hlist_del_init(&ax25_uid->uid_node); |
| 221 | s = ax25_uid; | 214 | ax25_uid_put(ax25_uid); |
| 222 | ax25_uid = ax25_uid->next; | ||
| 223 | |||
| 224 | kfree(s); | ||
| 225 | } | 215 | } |
| 226 | ax25_uid_list = NULL; | ||
| 227 | write_unlock(&ax25_uid_lock); | 216 | write_unlock(&ax25_uid_lock); |
| 228 | } | 217 | } |
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index f32dba9e26fe..8d0cc3cf3e49 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c | |||
| @@ -148,12 +148,12 @@ static int dn_neigh_construct(struct neighbour *neigh) | |||
| 148 | 148 | ||
| 149 | __neigh_parms_put(neigh->parms); | 149 | __neigh_parms_put(neigh->parms); |
| 150 | neigh->parms = neigh_parms_clone(parms); | 150 | neigh->parms = neigh_parms_clone(parms); |
| 151 | rcu_read_unlock(); | ||
| 152 | 151 | ||
| 153 | if (dn_db->use_long) | 152 | if (dn_db->use_long) |
| 154 | neigh->ops = &dn_long_ops; | 153 | neigh->ops = &dn_long_ops; |
| 155 | else | 154 | else |
| 156 | neigh->ops = &dn_short_ops; | 155 | neigh->ops = &dn_short_ops; |
| 156 | rcu_read_unlock(); | ||
| 157 | 157 | ||
| 158 | if (dn->flags & DN_NDFLAG_P3) | 158 | if (dn->flags & DN_NDFLAG_P3) |
| 159 | neigh->ops = &dn_phase3_ops; | 159 | neigh->ops = &dn_phase3_ops; |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index a701405fab0b..45efd5f4741b 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
| @@ -1333,9 +1333,9 @@ err:; | |||
| 1333 | } | 1333 | } |
| 1334 | 1334 | ||
| 1335 | static inline int check_leaf(struct trie *t, struct leaf *l, t_key key, int *plen, const struct flowi *flp, | 1335 | static inline int check_leaf(struct trie *t, struct leaf *l, t_key key, int *plen, const struct flowi *flp, |
| 1336 | struct fib_result *res, int *err) | 1336 | struct fib_result *res) |
| 1337 | { | 1337 | { |
| 1338 | int i; | 1338 | int err, i; |
| 1339 | t_key mask; | 1339 | t_key mask; |
| 1340 | struct leaf_info *li; | 1340 | struct leaf_info *li; |
| 1341 | struct hlist_head *hhead = &l->list; | 1341 | struct hlist_head *hhead = &l->list; |
| @@ -1348,18 +1348,18 @@ static inline int check_leaf(struct trie *t, struct leaf *l, t_key key, int *pl | |||
| 1348 | if (l->key != (key & mask)) | 1348 | if (l->key != (key & mask)) |
| 1349 | continue; | 1349 | continue; |
| 1350 | 1350 | ||
| 1351 | if (((*err) = fib_semantic_match(&li->falh, flp, res, l->key, mask, i)) == 0) { | 1351 | if ((err = fib_semantic_match(&li->falh, flp, res, l->key, mask, i)) <= 0) { |
| 1352 | *plen = i; | 1352 | *plen = i; |
| 1353 | #ifdef CONFIG_IP_FIB_TRIE_STATS | 1353 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
| 1354 | t->stats.semantic_match_passed++; | 1354 | t->stats.semantic_match_passed++; |
| 1355 | #endif | 1355 | #endif |
| 1356 | return 1; | 1356 | return err; |
| 1357 | } | 1357 | } |
| 1358 | #ifdef CONFIG_IP_FIB_TRIE_STATS | 1358 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
| 1359 | t->stats.semantic_match_miss++; | 1359 | t->stats.semantic_match_miss++; |
| 1360 | #endif | 1360 | #endif |
| 1361 | } | 1361 | } |
| 1362 | return 0; | 1362 | return 1; |
| 1363 | } | 1363 | } |
| 1364 | 1364 | ||
| 1365 | static int | 1365 | static int |
| @@ -1386,7 +1386,7 @@ fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result | |||
| 1386 | 1386 | ||
| 1387 | /* Just a leaf? */ | 1387 | /* Just a leaf? */ |
| 1388 | if (IS_LEAF(n)) { | 1388 | if (IS_LEAF(n)) { |
| 1389 | if (check_leaf(t, (struct leaf *)n, key, &plen, flp, res, &ret)) | 1389 | if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0) |
| 1390 | goto found; | 1390 | goto found; |
| 1391 | goto failed; | 1391 | goto failed; |
| 1392 | } | 1392 | } |
| @@ -1508,7 +1508,7 @@ fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result | |||
| 1508 | continue; | 1508 | continue; |
| 1509 | } | 1509 | } |
| 1510 | if (IS_LEAF(n)) { | 1510 | if (IS_LEAF(n)) { |
| 1511 | if (check_leaf(t, (struct leaf *)n, key, &plen, flp, res, &ret)) | 1511 | if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0) |
| 1512 | goto found; | 1512 | goto found; |
| 1513 | } | 1513 | } |
| 1514 | backtrace: | 1514 | backtrace: |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 3d78464f64ea..badfc5849973 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
| @@ -349,12 +349,12 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
| 349 | { | 349 | { |
| 350 | struct sk_buff *skb; | 350 | struct sk_buff *skb; |
| 351 | 351 | ||
| 352 | ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param, | 352 | if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param, |
| 353 | icmp_param->data_len+icmp_param->head_len, | 353 | icmp_param->data_len+icmp_param->head_len, |
| 354 | icmp_param->head_len, | 354 | icmp_param->head_len, |
| 355 | ipc, rt, MSG_DONTWAIT); | 355 | ipc, rt, MSG_DONTWAIT) < 0) |
| 356 | 356 | ip_flush_pending_frames(icmp_socket->sk); | |
| 357 | if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { | 357 | else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { |
| 358 | struct icmphdr *icmph = skb->h.icmph; | 358 | struct icmphdr *icmph = skb->h.icmph; |
| 359 | unsigned int csum = 0; | 359 | unsigned int csum = 0; |
| 360 | struct sk_buff *skb1; | 360 | struct sk_buff *skb1; |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 95473953c406..ab18a853d7ce 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
| @@ -450,10 +450,13 @@ static void peer_check_expire(unsigned long dummy) | |||
| 450 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime | 450 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime |
| 451 | * interval depending on the total number of entries (more entries, | 451 | * interval depending on the total number of entries (more entries, |
| 452 | * less interval). */ | 452 | * less interval). */ |
| 453 | peer_periodic_timer.expires = jiffies | 453 | if (peer_total >= inet_peer_threshold) |
| 454 | + inet_peer_gc_maxtime | 454 | peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; |
| 455 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * | 455 | else |
| 456 | peer_total / inet_peer_threshold * HZ; | 456 | peer_periodic_timer.expires = jiffies |
| 457 | + inet_peer_gc_maxtime | ||
| 458 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * | ||
| 459 | peer_total / inet_peer_threshold * HZ; | ||
| 457 | add_timer(&peer_periodic_timer); | 460 | add_timer(&peer_periodic_timer); |
| 458 | } | 461 | } |
| 459 | 462 | ||
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 2065944fd9e5..7ded6e60f43a 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
| @@ -358,7 +358,7 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) | |||
| 358 | int cpu; | 358 | int cpu; |
| 359 | 359 | ||
| 360 | /* This can be any valid CPU ID so we don't need locking. */ | 360 | /* This can be any valid CPU ID so we don't need locking. */ |
| 361 | cpu = smp_processor_id(); | 361 | cpu = raw_smp_processor_id(); |
| 362 | 362 | ||
| 363 | list_for_each_entry(pos, &ipcomp_tfms_list, list) { | 363 | list_for_each_entry(pos, &ipcomp_tfms_list, list) { |
| 364 | struct crypto_tfm *tfm; | 364 | struct crypto_tfm *tfm; |
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index eda1fba431a4..c6baa8174389 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
| @@ -214,6 +214,12 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) | |||
| 214 | break; | 214 | break; |
| 215 | 215 | ||
| 216 | case IPQ_COPY_PACKET: | 216 | case IPQ_COPY_PACKET: |
| 217 | if (entry->skb->ip_summed == CHECKSUM_HW && | ||
| 218 | (*errp = skb_checksum_help(entry->skb, | ||
| 219 | entry->info->outdev == NULL))) { | ||
| 220 | read_unlock_bh(&queue_lock); | ||
| 221 | return NULL; | ||
| 222 | } | ||
| 217 | if (copy_range == 0 || copy_range > entry->skb->len) | 223 | if (copy_range == 0 || copy_range > entry->skb->len) |
| 218 | data_len = entry->skb->len; | 224 | data_len = entry->skb->len; |
| 219 | else | 225 | else |
| @@ -385,6 +391,7 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) | |||
| 385 | if (!skb_ip_make_writable(&e->skb, v->data_len)) | 391 | if (!skb_ip_make_writable(&e->skb, v->data_len)) |
| 386 | return -ENOMEM; | 392 | return -ENOMEM; |
| 387 | memcpy(e->skb->data, v->payload, v->data_len); | 393 | memcpy(e->skb->data, v->payload, v->data_len); |
| 394 | e->skb->ip_summed = CHECKSUM_NONE; | ||
| 388 | e->skb->nfcache |= NFC_ALTERED; | 395 | e->skb->nfcache |= NFC_ALTERED; |
| 389 | 396 | ||
| 390 | /* | 397 | /* |
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index ada9911118e9..94a0ce1c1c9d 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c | |||
| @@ -61,16 +61,20 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo, int inward) | |||
| 61 | if (!tcph) | 61 | if (!tcph) |
| 62 | return 0; | 62 | return 0; |
| 63 | 63 | ||
| 64 | if (!(einfo->operation & IPT_ECN_OP_SET_ECE | 64 | if ((!(einfo->operation & IPT_ECN_OP_SET_ECE) || |
| 65 | || tcph->ece == einfo->proto.tcp.ece) | 65 | tcph->ece == einfo->proto.tcp.ece) && |
| 66 | && (!(einfo->operation & IPT_ECN_OP_SET_CWR | 66 | ((!(einfo->operation & IPT_ECN_OP_SET_CWR) || |
| 67 | || tcph->cwr == einfo->proto.tcp.cwr))) | 67 | tcph->cwr == einfo->proto.tcp.cwr))) |
| 68 | return 1; | 68 | return 1; |
| 69 | 69 | ||
| 70 | if (!skb_ip_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph))) | 70 | if (!skb_ip_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph))) |
| 71 | return 0; | 71 | return 0; |
| 72 | tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4; | 72 | tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4; |
| 73 | 73 | ||
| 74 | if ((*pskb)->ip_summed == CHECKSUM_HW && | ||
| 75 | skb_checksum_help(*pskb, inward)) | ||
| 76 | return 0; | ||
| 77 | |||
| 74 | diffs[0] = ((u_int16_t *)tcph)[6]; | 78 | diffs[0] = ((u_int16_t *)tcph)[6]; |
| 75 | if (einfo->operation & IPT_ECN_OP_SET_ECE) | 79 | if (einfo->operation & IPT_ECN_OP_SET_ECE) |
| 76 | tcph->ece = einfo->proto.tcp.ece; | 80 | tcph->ece = einfo->proto.tcp.ece; |
| @@ -79,13 +83,10 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo, int inward) | |||
| 79 | diffs[1] = ((u_int16_t *)tcph)[6]; | 83 | diffs[1] = ((u_int16_t *)tcph)[6]; |
| 80 | diffs[0] = diffs[0] ^ 0xFFFF; | 84 | diffs[0] = diffs[0] ^ 0xFFFF; |
| 81 | 85 | ||
| 82 | if ((*pskb)->ip_summed != CHECKSUM_HW) | 86 | if ((*pskb)->ip_summed != CHECKSUM_UNNECESSARY) |
| 83 | tcph->check = csum_fold(csum_partial((char *)diffs, | 87 | tcph->check = csum_fold(csum_partial((char *)diffs, |
| 84 | sizeof(diffs), | 88 | sizeof(diffs), |
| 85 | tcph->check^0xFFFF)); | 89 | tcph->check^0xFFFF)); |
| 86 | else | ||
| 87 | if (skb_checksum_help(*pskb, inward)) | ||
| 88 | return 0; | ||
| 89 | (*pskb)->nfcache |= NFC_ALTERED; | 90 | (*pskb)->nfcache |= NFC_ALTERED; |
| 90 | return 1; | 91 | return 1; |
| 91 | } | 92 | } |
diff --git a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c index 1049050b2bfb..7b84a254440e 100644 --- a/net/ipv4/netfilter/ipt_TCPMSS.c +++ b/net/ipv4/netfilter/ipt_TCPMSS.c | |||
| @@ -61,6 +61,10 @@ ipt_tcpmss_target(struct sk_buff **pskb, | |||
| 61 | if (!skb_ip_make_writable(pskb, (*pskb)->len)) | 61 | if (!skb_ip_make_writable(pskb, (*pskb)->len)) |
| 62 | return NF_DROP; | 62 | return NF_DROP; |
| 63 | 63 | ||
| 64 | if ((*pskb)->ip_summed == CHECKSUM_HW && | ||
| 65 | skb_checksum_help(*pskb, out == NULL)) | ||
| 66 | return NF_DROP; | ||
| 67 | |||
| 64 | iph = (*pskb)->nh.iph; | 68 | iph = (*pskb)->nh.iph; |
| 65 | tcplen = (*pskb)->len - iph->ihl*4; | 69 | tcplen = (*pskb)->len - iph->ihl*4; |
| 66 | 70 | ||
| @@ -186,9 +190,6 @@ ipt_tcpmss_target(struct sk_buff **pskb, | |||
| 186 | newmss); | 190 | newmss); |
| 187 | 191 | ||
| 188 | retmodified: | 192 | retmodified: |
| 189 | /* We never hw checksum SYN packets. */ | ||
| 190 | BUG_ON((*pskb)->ip_summed == CHECKSUM_HW); | ||
| 191 | |||
| 192 | (*pskb)->nfcache |= NFC_UNKNOWN | NFC_ALTERED; | 193 | (*pskb)->nfcache |= NFC_UNKNOWN | NFC_ALTERED; |
| 193 | return IPT_CONTINUE; | 194 | return IPT_CONTINUE; |
| 194 | } | 195 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index ddb6ce4ecff2..69b1fcf70077 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -584,7 +584,7 @@ static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, | |||
| 584 | sk_charge_skb(sk, skb); | 584 | sk_charge_skb(sk, skb); |
| 585 | if (!sk->sk_send_head) | 585 | if (!sk->sk_send_head) |
| 586 | sk->sk_send_head = skb; | 586 | sk->sk_send_head = skb; |
| 587 | else if (tp->nonagle&TCP_NAGLE_PUSH) | 587 | if (tp->nonagle & TCP_NAGLE_PUSH) |
| 588 | tp->nonagle &= ~TCP_NAGLE_PUSH; | 588 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
| 589 | } | 589 | } |
| 590 | 590 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5d91213d34c0..67c670886c1f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -242,9 +242,14 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
| 242 | tcp_port_rover = rover; | 242 | tcp_port_rover = rover; |
| 243 | spin_unlock(&tcp_portalloc_lock); | 243 | spin_unlock(&tcp_portalloc_lock); |
| 244 | 244 | ||
| 245 | /* Exhausted local port range during search? */ | 245 | /* Exhausted local port range during search? It is not |
| 246 | * possible for us to be holding one of the bind hash | ||
| 247 | * locks if this test triggers, because if 'remaining' | ||
| 248 | * drops to zero, we broke out of the do/while loop at | ||
| 249 | * the top level, not from the 'break;' statement. | ||
| 250 | */ | ||
| 246 | ret = 1; | 251 | ret = 1; |
| 247 | if (remaining <= 0) | 252 | if (unlikely(remaining <= 0)) |
| 248 | goto fail; | 253 | goto fail; |
| 249 | 254 | ||
| 250 | /* OK, here is the one we will use. HEAD is | 255 | /* OK, here is the one we will use. HEAD is |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3ed6fc15815b..dd30dd137b74 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -861,7 +861,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | |||
| 861 | u16 flags; | 861 | u16 flags; |
| 862 | 862 | ||
| 863 | /* All of a TSO frame must be composed of paged data. */ | 863 | /* All of a TSO frame must be composed of paged data. */ |
| 864 | BUG_ON(skb->len != skb->data_len); | 864 | if (skb->len != skb->data_len) |
| 865 | return tcp_fragment(sk, skb, len, mss_now); | ||
| 865 | 866 | ||
| 866 | buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC); | 867 | buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC); |
| 867 | if (unlikely(buff == NULL)) | 868 | if (unlikely(buff == NULL)) |
| @@ -924,10 +925,6 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_ | |||
| 924 | 925 | ||
| 925 | limit = min(send_win, cong_win); | 926 | limit = min(send_win, cong_win); |
| 926 | 927 | ||
| 927 | /* If sk_send_head can be sent fully now, just do it. */ | ||
| 928 | if (skb->len <= limit) | ||
| 929 | return 0; | ||
| 930 | |||
| 931 | if (sysctl_tcp_tso_win_divisor) { | 928 | if (sysctl_tcp_tso_win_divisor) { |
| 932 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); | 929 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); |
| 933 | 930 | ||
| @@ -974,6 +971,8 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) | |||
| 974 | 971 | ||
| 975 | sent_pkts = 0; | 972 | sent_pkts = 0; |
| 976 | while ((skb = sk->sk_send_head)) { | 973 | while ((skb = sk->sk_send_head)) { |
| 974 | unsigned int limit; | ||
| 975 | |||
| 977 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); | 976 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); |
| 978 | BUG_ON(!tso_segs); | 977 | BUG_ON(!tso_segs); |
| 979 | 978 | ||
| @@ -994,9 +993,10 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) | |||
| 994 | break; | 993 | break; |
| 995 | } | 994 | } |
| 996 | 995 | ||
| 996 | limit = mss_now; | ||
| 997 | if (tso_segs > 1) { | 997 | if (tso_segs > 1) { |
| 998 | u32 limit = tcp_window_allows(tp, skb, | 998 | limit = tcp_window_allows(tp, skb, |
| 999 | mss_now, cwnd_quota); | 999 | mss_now, cwnd_quota); |
| 1000 | 1000 | ||
| 1001 | if (skb->len < limit) { | 1001 | if (skb->len < limit) { |
| 1002 | unsigned int trim = skb->len % mss_now; | 1002 | unsigned int trim = skb->len % mss_now; |
| @@ -1004,15 +1004,12 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) | |||
| 1004 | if (trim) | 1004 | if (trim) |
| 1005 | limit = skb->len - trim; | 1005 | limit = skb->len - trim; |
| 1006 | } | 1006 | } |
| 1007 | if (skb->len > limit) { | ||
| 1008 | if (tso_fragment(sk, skb, limit, mss_now)) | ||
| 1009 | break; | ||
| 1010 | } | ||
| 1011 | } else if (unlikely(skb->len > mss_now)) { | ||
| 1012 | if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now))) | ||
| 1013 | break; | ||
| 1014 | } | 1007 | } |
| 1015 | 1008 | ||
| 1009 | if (skb->len > limit && | ||
| 1010 | unlikely(tso_fragment(sk, skb, limit, mss_now))) | ||
| 1011 | break; | ||
| 1012 | |||
| 1016 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 1013 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
| 1017 | 1014 | ||
| 1018 | if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)))) | 1015 | if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)))) |
| @@ -1064,11 +1061,14 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
| 1064 | cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); | 1061 | cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); |
| 1065 | 1062 | ||
| 1066 | if (likely(cwnd_quota)) { | 1063 | if (likely(cwnd_quota)) { |
| 1064 | unsigned int limit; | ||
| 1065 | |||
| 1067 | BUG_ON(!tso_segs); | 1066 | BUG_ON(!tso_segs); |
| 1068 | 1067 | ||
| 1068 | limit = mss_now; | ||
| 1069 | if (tso_segs > 1) { | 1069 | if (tso_segs > 1) { |
| 1070 | u32 limit = tcp_window_allows(tp, skb, | 1070 | limit = tcp_window_allows(tp, skb, |
| 1071 | mss_now, cwnd_quota); | 1071 | mss_now, cwnd_quota); |
| 1072 | 1072 | ||
| 1073 | if (skb->len < limit) { | 1073 | if (skb->len < limit) { |
| 1074 | unsigned int trim = skb->len % mss_now; | 1074 | unsigned int trim = skb->len % mss_now; |
| @@ -1076,15 +1076,12 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
| 1076 | if (trim) | 1076 | if (trim) |
| 1077 | limit = skb->len - trim; | 1077 | limit = skb->len - trim; |
| 1078 | } | 1078 | } |
| 1079 | if (skb->len > limit) { | ||
| 1080 | if (unlikely(tso_fragment(sk, skb, limit, mss_now))) | ||
| 1081 | return; | ||
| 1082 | } | ||
| 1083 | } else if (unlikely(skb->len > mss_now)) { | ||
| 1084 | if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now))) | ||
| 1085 | return; | ||
| 1086 | } | 1079 | } |
| 1087 | 1080 | ||
| 1081 | if (skb->len > limit && | ||
| 1082 | unlikely(tso_fragment(sk, skb, limit, mss_now))) | ||
| 1083 | return; | ||
| 1084 | |||
| 1088 | /* Send it out now. */ | 1085 | /* Send it out now. */ |
| 1089 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 1086 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
| 1090 | 1087 | ||
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 866f10726c58..10fbb50daea4 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
| @@ -198,12 +198,13 @@ resubmit: | |||
| 198 | if (!raw_sk) { | 198 | if (!raw_sk) { |
| 199 | if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 199 | if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
| 200 | IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS); | 200 | IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS); |
| 201 | icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff); | 201 | icmpv6_send(skb, ICMPV6_PARAMPROB, |
| 202 | ICMPV6_UNK_NEXTHDR, nhoff, | ||
| 203 | skb->dev); | ||
| 202 | } | 204 | } |
| 203 | } else { | 205 | } else |
| 204 | IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS); | 206 | IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS); |
| 205 | kfree_skb(skb); | 207 | kfree_skb(skb); |
| 206 | } | ||
| 207 | } | 208 | } |
| 208 | rcu_read_unlock(); | 209 | rcu_read_unlock(); |
| 209 | return 0; | 210 | return 0; |
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 423feb46ccc0..135383ef538f 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c | |||
| @@ -354,7 +354,7 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name) | |||
| 354 | int cpu; | 354 | int cpu; |
| 355 | 355 | ||
| 356 | /* This can be any valid CPU ID so we don't need locking. */ | 356 | /* This can be any valid CPU ID so we don't need locking. */ |
| 357 | cpu = smp_processor_id(); | 357 | cpu = raw_smp_processor_id(); |
| 358 | 358 | ||
| 359 | list_for_each_entry(pos, &ipcomp6_tfms_list, list) { | 359 | list_for_each_entry(pos, &ipcomp6_tfms_list, list) { |
| 360 | struct crypto_tfm *tfm; | 360 | struct crypto_tfm *tfm; |
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 5493180f0d44..a16df5b27c84 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
| @@ -211,6 +211,12 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) | |||
| 211 | break; | 211 | break; |
| 212 | 212 | ||
| 213 | case IPQ_COPY_PACKET: | 213 | case IPQ_COPY_PACKET: |
| 214 | if (entry->skb->ip_summed == CHECKSUM_HW && | ||
| 215 | (*errp = skb_checksum_help(entry->skb, | ||
| 216 | entry->info->outdev == NULL))) { | ||
| 217 | read_unlock_bh(&queue_lock); | ||
| 218 | return NULL; | ||
| 219 | } | ||
| 214 | if (copy_range == 0 || copy_range > entry->skb->len) | 220 | if (copy_range == 0 || copy_range > entry->skb->len) |
| 215 | data_len = entry->skb->len; | 221 | data_len = entry->skb->len; |
| 216 | else | 222 | else |
| @@ -381,6 +387,7 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) | |||
| 381 | if (!skb_ip_make_writable(&e->skb, v->data_len)) | 387 | if (!skb_ip_make_writable(&e->skb, v->data_len)) |
| 382 | return -ENOMEM; | 388 | return -ENOMEM; |
| 383 | memcpy(e->skb->data, v->payload, v->data_len); | 389 | memcpy(e->skb->data, v->payload, v->data_len); |
| 390 | e->skb->ip_summed = CHECKSUM_NONE; | ||
| 384 | e->skb->nfcache |= NFC_ALTERED; | 391 | e->skb->nfcache |= NFC_ALTERED; |
| 385 | 392 | ||
| 386 | /* | 393 | /* |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index e2b848ec9851..1d4d75b34d32 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
| @@ -328,6 +328,8 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 328 | 328 | ||
| 329 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) { | 329 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) { |
| 330 | if (skb->ip_summed == CHECKSUM_HW) { | 330 | if (skb->ip_summed == CHECKSUM_HW) { |
| 331 | skb_postpull_rcsum(skb, skb->nh.raw, | ||
| 332 | skb->h.raw - skb->nh.raw); | ||
| 331 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 333 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 332 | if (csum_ipv6_magic(&skb->nh.ipv6h->saddr, | 334 | if (csum_ipv6_magic(&skb->nh.ipv6h->saddr, |
| 333 | &skb->nh.ipv6h->daddr, | 335 | &skb->nh.ipv6h->daddr, |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f6e288dc116e..ef29cfd936d3 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -158,9 +158,14 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
| 158 | tcp_port_rover = rover; | 158 | tcp_port_rover = rover; |
| 159 | spin_unlock(&tcp_portalloc_lock); | 159 | spin_unlock(&tcp_portalloc_lock); |
| 160 | 160 | ||
| 161 | /* Exhausted local port range during search? */ | 161 | /* Exhausted local port range during search? It is not |
| 162 | * possible for us to be holding one of the bind hash | ||
| 163 | * locks if this test triggers, because if 'remaining' | ||
| 164 | * drops to zero, we broke out of the do/while loop at | ||
| 165 | * the top level, not from the 'break;' statement. | ||
| 166 | */ | ||
| 162 | ret = 1; | 167 | ret = 1; |
| 163 | if (remaining <= 0) | 168 | if (unlikely(remaining <= 0)) |
| 164 | goto fail; | 169 | goto fail; |
| 165 | 170 | ||
| 166 | /* OK, here is the one we will use. */ | 171 | /* OK, here is the one we will use. */ |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 31ed4a9a1d06..162a85fed150 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
| @@ -459,12 +459,7 @@ static struct sock *nr_make_new(struct sock *osk) | |||
| 459 | sk->sk_sndbuf = osk->sk_sndbuf; | 459 | sk->sk_sndbuf = osk->sk_sndbuf; |
| 460 | sk->sk_state = TCP_ESTABLISHED; | 460 | sk->sk_state = TCP_ESTABLISHED; |
| 461 | sk->sk_sleep = osk->sk_sleep; | 461 | sk->sk_sleep = osk->sk_sleep; |
| 462 | 462 | sock_copy_flags(sk, osk); | |
| 463 | if (sock_flag(osk, SOCK_ZAPPED)) | ||
| 464 | sock_set_flag(sk, SOCK_ZAPPED); | ||
| 465 | |||
| 466 | if (sock_flag(osk, SOCK_DBG)) | ||
| 467 | sock_set_flag(sk, SOCK_DBG); | ||
| 468 | 463 | ||
| 469 | skb_queue_head_init(&nr->ack_queue); | 464 | skb_queue_head_init(&nr->ack_queue); |
| 470 | skb_queue_head_init(&nr->reseq_queue); | 465 | skb_queue_head_init(&nr->reseq_queue); |
| @@ -541,7 +536,8 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 541 | struct nr_sock *nr = nr_sk(sk); | 536 | struct nr_sock *nr = nr_sk(sk); |
| 542 | struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr; | 537 | struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr; |
| 543 | struct net_device *dev; | 538 | struct net_device *dev; |
| 544 | ax25_address *user, *source; | 539 | ax25_uid_assoc *user; |
| 540 | ax25_address *source; | ||
| 545 | 541 | ||
| 546 | lock_sock(sk); | 542 | lock_sock(sk); |
| 547 | if (!sock_flag(sk, SOCK_ZAPPED)) { | 543 | if (!sock_flag(sk, SOCK_ZAPPED)) { |
| @@ -580,16 +576,19 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 580 | } else { | 576 | } else { |
| 581 | source = &addr->fsa_ax25.sax25_call; | 577 | source = &addr->fsa_ax25.sax25_call; |
| 582 | 578 | ||
| 583 | if ((user = ax25_findbyuid(current->euid)) == NULL) { | 579 | user = ax25_findbyuid(current->euid); |
| 580 | if (user) { | ||
| 581 | nr->user_addr = user->call; | ||
| 582 | ax25_uid_put(user); | ||
| 583 | } else { | ||
| 584 | if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { | 584 | if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { |
| 585 | release_sock(sk); | 585 | release_sock(sk); |
| 586 | dev_put(dev); | 586 | dev_put(dev); |
| 587 | return -EPERM; | 587 | return -EPERM; |
| 588 | } | 588 | } |
| 589 | user = source; | 589 | nr->user_addr = *source; |
| 590 | } | 590 | } |
| 591 | 591 | ||
| 592 | nr->user_addr = *user; | ||
| 593 | nr->source_addr = *source; | 592 | nr->source_addr = *source; |
| 594 | } | 593 | } |
| 595 | 594 | ||
| @@ -609,7 +608,8 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr, | |||
| 609 | struct sock *sk = sock->sk; | 608 | struct sock *sk = sock->sk; |
| 610 | struct nr_sock *nr = nr_sk(sk); | 609 | struct nr_sock *nr = nr_sk(sk); |
| 611 | struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr; | 610 | struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr; |
| 612 | ax25_address *user, *source = NULL; | 611 | ax25_address *source = NULL; |
| 612 | ax25_uid_assoc *user; | ||
| 613 | struct net_device *dev; | 613 | struct net_device *dev; |
| 614 | 614 | ||
| 615 | lock_sock(sk); | 615 | lock_sock(sk); |
| @@ -650,16 +650,19 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr, | |||
| 650 | } | 650 | } |
| 651 | source = (ax25_address *)dev->dev_addr; | 651 | source = (ax25_address *)dev->dev_addr; |
| 652 | 652 | ||
| 653 | if ((user = ax25_findbyuid(current->euid)) == NULL) { | 653 | user = ax25_findbyuid(current->euid); |
| 654 | if (user) { | ||
| 655 | nr->user_addr = user->call; | ||
| 656 | ax25_uid_put(user); | ||
| 657 | } else { | ||
| 654 | if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) { | 658 | if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) { |
| 655 | dev_put(dev); | 659 | dev_put(dev); |
| 656 | release_sock(sk); | 660 | release_sock(sk); |
| 657 | return -EPERM; | 661 | return -EPERM; |
| 658 | } | 662 | } |
| 659 | user = source; | 663 | nr->user_addr = *source; |
| 660 | } | 664 | } |
| 661 | 665 | ||
| 662 | nr->user_addr = *user; | ||
| 663 | nr->source_addr = *source; | 666 | nr->source_addr = *source; |
| 664 | nr->device = dev; | 667 | nr->device = dev; |
| 665 | 668 | ||
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 7eb6a5bf93ea..5480caf8ccc2 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
| @@ -556,12 +556,7 @@ static struct sock *rose_make_new(struct sock *osk) | |||
| 556 | sk->sk_sndbuf = osk->sk_sndbuf; | 556 | sk->sk_sndbuf = osk->sk_sndbuf; |
| 557 | sk->sk_state = TCP_ESTABLISHED; | 557 | sk->sk_state = TCP_ESTABLISHED; |
| 558 | sk->sk_sleep = osk->sk_sleep; | 558 | sk->sk_sleep = osk->sk_sleep; |
| 559 | 559 | sock_copy_flags(sk, osk); | |
| 560 | if (sock_flag(osk, SOCK_ZAPPED)) | ||
| 561 | sock_set_flag(sk, SOCK_ZAPPED); | ||
| 562 | |||
| 563 | if (sock_flag(osk, SOCK_DBG)) | ||
| 564 | sock_set_flag(sk, SOCK_DBG); | ||
| 565 | 560 | ||
| 566 | init_timer(&rose->timer); | 561 | init_timer(&rose->timer); |
| 567 | init_timer(&rose->idletimer); | 562 | init_timer(&rose->idletimer); |
| @@ -631,7 +626,8 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 631 | struct rose_sock *rose = rose_sk(sk); | 626 | struct rose_sock *rose = rose_sk(sk); |
| 632 | struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; | 627 | struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; |
| 633 | struct net_device *dev; | 628 | struct net_device *dev; |
| 634 | ax25_address *user, *source; | 629 | ax25_address *source; |
| 630 | ax25_uid_assoc *user; | ||
| 635 | int n; | 631 | int n; |
| 636 | 632 | ||
| 637 | if (!sock_flag(sk, SOCK_ZAPPED)) | 633 | if (!sock_flag(sk, SOCK_ZAPPED)) |
| @@ -656,14 +652,17 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 656 | 652 | ||
| 657 | source = &addr->srose_call; | 653 | source = &addr->srose_call; |
| 658 | 654 | ||
| 659 | if ((user = ax25_findbyuid(current->euid)) == NULL) { | 655 | user = ax25_findbyuid(current->euid); |
| 656 | if (user) { | ||
| 657 | rose->source_call = user->call; | ||
| 658 | ax25_uid_put(user); | ||
| 659 | } else { | ||
| 660 | if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) | 660 | if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) |
| 661 | return -EACCES; | 661 | return -EACCES; |
| 662 | user = source; | 662 | rose->source_call = *source; |
| 663 | } | 663 | } |
| 664 | 664 | ||
| 665 | rose->source_addr = addr->srose_addr; | 665 | rose->source_addr = addr->srose_addr; |
| 666 | rose->source_call = *user; | ||
| 667 | rose->device = dev; | 666 | rose->device = dev; |
| 668 | rose->source_ndigis = addr->srose_ndigis; | 667 | rose->source_ndigis = addr->srose_ndigis; |
| 669 | 668 | ||
| @@ -690,8 +689,8 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le | |||
| 690 | struct rose_sock *rose = rose_sk(sk); | 689 | struct rose_sock *rose = rose_sk(sk); |
| 691 | struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; | 690 | struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; |
| 692 | unsigned char cause, diagnostic; | 691 | unsigned char cause, diagnostic; |
| 693 | ax25_address *user; | ||
| 694 | struct net_device *dev; | 692 | struct net_device *dev; |
| 693 | ax25_uid_assoc *user; | ||
| 695 | int n; | 694 | int n; |
| 696 | 695 | ||
| 697 | if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { | 696 | if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { |
| @@ -741,12 +740,14 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le | |||
| 741 | if ((dev = rose_dev_first()) == NULL) | 740 | if ((dev = rose_dev_first()) == NULL) |
| 742 | return -ENETUNREACH; | 741 | return -ENETUNREACH; |
| 743 | 742 | ||
| 744 | if ((user = ax25_findbyuid(current->euid)) == NULL) | 743 | user = ax25_findbyuid(current->euid); |
| 744 | if (!user) | ||
| 745 | return -EINVAL; | 745 | return -EINVAL; |
| 746 | 746 | ||
| 747 | memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); | 747 | memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); |
| 748 | rose->source_call = *user; | 748 | rose->source_call = user->call; |
| 749 | rose->device = dev; | 749 | rose->device = dev; |
| 750 | ax25_uid_put(user); | ||
| 750 | 751 | ||
| 751 | rose_insert_socket(sk); /* Finish the bind */ | 752 | rose_insert_socket(sk); /* Finish the bind */ |
| 752 | } | 753 | } |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index ff73ebb912b8..25da6f699fd0 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
| @@ -994,8 +994,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) | |||
| 994 | * 1. The frame isn't for us, | 994 | * 1. The frame isn't for us, |
| 995 | * 2. It isn't "owned" by any existing route. | 995 | * 2. It isn't "owned" by any existing route. |
| 996 | */ | 996 | */ |
| 997 | if (frametype != ROSE_CALL_REQUEST) /* XXX */ | 997 | if (frametype != ROSE_CALL_REQUEST) { /* XXX */ |
| 998 | return 0; | 998 | res = 0; |
| 999 | goto out; | ||
| 1000 | } | ||
| 999 | 1001 | ||
| 1000 | len = (((skb->data[3] >> 4) & 0x0F) + 1) / 2; | 1002 | len = (((skb->data[3] >> 4) & 0x0F) + 1) / 2; |
| 1001 | len += (((skb->data[3] >> 0) & 0x0F) + 1) / 2; | 1003 | len += (((skb->data[3] >> 0) & 0x0F) + 1) / 2; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 8edefd5d095d..0d066c965342 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
| @@ -438,6 +438,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) | |||
| 438 | if (!ops->init || ops->init(sch, NULL) == 0) | 438 | if (!ops->init || ops->init(sch, NULL) == 0) |
| 439 | return sch; | 439 | return sch; |
| 440 | 440 | ||
| 441 | qdisc_destroy(sch); | ||
| 441 | errout: | 442 | errout: |
| 442 | return NULL; | 443 | return NULL; |
| 443 | } | 444 | } |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 98d49ec9b74b..b74f7772b576 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
| @@ -57,6 +57,7 @@ static struct snmp_mib sctp_snmp_list[] = { | |||
| 57 | SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS), | 57 | SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS), |
| 58 | SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS), | 58 | SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS), |
| 59 | SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS), | 59 | SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS), |
| 60 | SNMP_MIB_SENTINEL | ||
| 60 | }; | 61 | }; |
| 61 | 62 | ||
| 62 | /* Return the current value of a particular entry in the mib by adding its | 63 | /* Return the current value of a particular entry in the mib by adding its |
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 24c21f2a33a7..5a7265aeaf83 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
| @@ -185,9 +185,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, | |||
| 185 | sg->page = body->pages[i]; | 185 | sg->page = body->pages[i]; |
| 186 | sg->offset = offset; | 186 | sg->offset = offset; |
| 187 | sg->length = thislen; | 187 | sg->length = thislen; |
| 188 | kmap(sg->page); /* XXX kmap_atomic? */ | ||
| 189 | crypto_digest_update(tfm, sg, 1); | 188 | crypto_digest_update(tfm, sg, 1); |
| 190 | kunmap(sg->page); | ||
| 191 | len -= thislen; | 189 | len -= thislen; |
| 192 | i++; | 190 | i++; |
| 193 | offset = 0; | 191 | offset = 0; |
