diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-01-02 16:41:36 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-01-02 16:41:36 -0500 |
| commit | 923a789b49c7269a0245d5af6afe486188d940df (patch) | |
| tree | c3f168427372e64f7467a794f313416da5086ba0 /net | |
| parent | 103ceffb9501531f6931df6aebc11a05189201f0 (diff) | |
| parent | b840d79631c882786925303c2b0f4fefc31845ed (diff) | |
Merge branch 'linus' into x86/cleanups
Conflicts:
arch/x86/kernel/reboot.c
Diffstat (limited to 'net')
33 files changed, 431 insertions, 169 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 446424027d24..09c66a449da6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -5066,13 +5066,14 @@ static struct pernet_operations __net_initdata netdev_net_ops = { | |||
| 5066 | 5066 | ||
| 5067 | static void __net_exit default_device_exit(struct net *net) | 5067 | static void __net_exit default_device_exit(struct net *net) |
| 5068 | { | 5068 | { |
| 5069 | struct net_device *dev, *next; | 5069 | struct net_device *dev; |
| 5070 | /* | 5070 | /* |
| 5071 | * Push all migratable of the network devices back to the | 5071 | * Push all migratable of the network devices back to the |
| 5072 | * initial network namespace | 5072 | * initial network namespace |
| 5073 | */ | 5073 | */ |
| 5074 | rtnl_lock(); | 5074 | rtnl_lock(); |
| 5075 | for_each_netdev_safe(net, dev, next) { | 5075 | restart: |
| 5076 | for_each_netdev(net, dev) { | ||
| 5076 | int err; | 5077 | int err; |
| 5077 | char fb_name[IFNAMSIZ]; | 5078 | char fb_name[IFNAMSIZ]; |
| 5078 | 5079 | ||
| @@ -5083,7 +5084,7 @@ static void __net_exit default_device_exit(struct net *net) | |||
| 5083 | /* Delete virtual devices */ | 5084 | /* Delete virtual devices */ |
| 5084 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { | 5085 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { |
| 5085 | dev->rtnl_link_ops->dellink(dev); | 5086 | dev->rtnl_link_ops->dellink(dev); |
| 5086 | continue; | 5087 | goto restart; |
| 5087 | } | 5088 | } |
| 5088 | 5089 | ||
| 5089 | /* Push remaing network devices to init_net */ | 5090 | /* Push remaing network devices to init_net */ |
| @@ -5094,6 +5095,7 @@ static void __net_exit default_device_exit(struct net *net) | |||
| 5094 | __func__, dev->name, err); | 5095 | __func__, dev->name, err); |
| 5095 | BUG(); | 5096 | BUG(); |
| 5096 | } | 5097 | } |
| 5098 | goto restart; | ||
| 5097 | } | 5099 | } |
| 5098 | rtnl_unlock(); | 5100 | rtnl_unlock(); |
| 5099 | } | 5101 | } |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 9c3717a23cf7..f66c58df8953 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -2414,7 +2414,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 2414 | if (*pos == 0) | 2414 | if (*pos == 0) |
| 2415 | return SEQ_START_TOKEN; | 2415 | return SEQ_START_TOKEN; |
| 2416 | 2416 | ||
| 2417 | for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | 2417 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { |
| 2418 | if (!cpu_possible(cpu)) | 2418 | if (!cpu_possible(cpu)) |
| 2419 | continue; | 2419 | continue; |
| 2420 | *pos = cpu+1; | 2420 | *pos = cpu+1; |
| @@ -2429,7 +2429,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 2429 | struct neigh_table *tbl = pde->data; | 2429 | struct neigh_table *tbl = pde->data; |
| 2430 | int cpu; | 2430 | int cpu; |
| 2431 | 2431 | ||
| 2432 | for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | 2432 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
| 2433 | if (!cpu_possible(cpu)) | 2433 | if (!cpu_possible(cpu)) |
| 2434 | continue; | 2434 | continue; |
| 2435 | *pos = cpu+1; | 2435 | *pos = cpu+1; |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index d5c2bacb713c..1747ccae8e8d 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
| @@ -964,7 +964,6 @@ adjudge_to_death: | |||
| 964 | state = sk->sk_state; | 964 | state = sk->sk_state; |
| 965 | sock_hold(sk); | 965 | sock_hold(sk); |
| 966 | sock_orphan(sk); | 966 | sock_orphan(sk); |
| 967 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 968 | 967 | ||
| 969 | /* | 968 | /* |
| 970 | * It is the last release_sock in its life. It will remove backlog. | 969 | * It is the last release_sock in its life. It will remove backlog. |
| @@ -978,6 +977,8 @@ adjudge_to_death: | |||
| 978 | bh_lock_sock(sk); | 977 | bh_lock_sock(sk); |
| 979 | WARN_ON(sock_owned_by_user(sk)); | 978 | WARN_ON(sock_owned_by_user(sk)); |
| 980 | 979 | ||
| 980 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 981 | |||
| 981 | /* Have we already been destroyed by a softirq or backlog? */ | 982 | /* Have we already been destroyed by a softirq or backlog? */ |
| 982 | if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) | 983 | if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) |
| 983 | goto out; | 984 | goto out; |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index c7cda1ca8e65..f26ab38680de 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
| @@ -633,8 +633,6 @@ void inet_csk_listen_stop(struct sock *sk) | |||
| 633 | 633 | ||
| 634 | acc_req = req->dl_next; | 634 | acc_req = req->dl_next; |
| 635 | 635 | ||
| 636 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 637 | |||
| 638 | local_bh_disable(); | 636 | local_bh_disable(); |
| 639 | bh_lock_sock(child); | 637 | bh_lock_sock(child); |
| 640 | WARN_ON(sock_owned_by_user(child)); | 638 | WARN_ON(sock_owned_by_user(child)); |
| @@ -644,6 +642,8 @@ void inet_csk_listen_stop(struct sock *sk) | |||
| 644 | 642 | ||
| 645 | sock_orphan(child); | 643 | sock_orphan(child); |
| 646 | 644 | ||
| 645 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 646 | |||
| 647 | inet_csk_destroy_sock(child); | 647 | inet_csk_destroy_sock(child); |
| 648 | 648 | ||
| 649 | bh_unlock_sock(child); | 649 | bh_unlock_sock(child); |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 313ebf00ee36..6ba5c557690c 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
| @@ -291,7 +291,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 291 | if (*pos == 0) | 291 | if (*pos == 0) |
| 292 | return SEQ_START_TOKEN; | 292 | return SEQ_START_TOKEN; |
| 293 | 293 | ||
| 294 | for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | 294 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { |
| 295 | if (!cpu_possible(cpu)) | 295 | if (!cpu_possible(cpu)) |
| 296 | continue; | 296 | continue; |
| 297 | *pos = cpu+1; | 297 | *pos = cpu+1; |
| @@ -306,7 +306,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 306 | struct net *net = seq_file_net(seq); | 306 | struct net *net = seq_file_net(seq); |
| 307 | int cpu; | 307 | int cpu; |
| 308 | 308 | ||
| 309 | for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | 309 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
| 310 | if (!cpu_possible(cpu)) | 310 | if (!cpu_possible(cpu)) |
| 311 | continue; | 311 | continue; |
| 312 | *pos = cpu+1; | 312 | *pos = cpu+1; |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 614958b7c276..eb62e58bff79 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <net/tcp.h> | 38 | #include <net/tcp.h> |
| 39 | #include <net/udp.h> | 39 | #include <net/udp.h> |
| 40 | #include <net/udplite.h> | 40 | #include <net/udplite.h> |
| 41 | #include <linux/bottom_half.h> | ||
| 41 | #include <linux/inetdevice.h> | 42 | #include <linux/inetdevice.h> |
| 42 | #include <linux/proc_fs.h> | 43 | #include <linux/proc_fs.h> |
| 43 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
| @@ -50,13 +51,17 @@ | |||
| 50 | static int sockstat_seq_show(struct seq_file *seq, void *v) | 51 | static int sockstat_seq_show(struct seq_file *seq, void *v) |
| 51 | { | 52 | { |
| 52 | struct net *net = seq->private; | 53 | struct net *net = seq->private; |
| 54 | int orphans, sockets; | ||
| 55 | |||
| 56 | local_bh_disable(); | ||
| 57 | orphans = percpu_counter_sum_positive(&tcp_orphan_count), | ||
| 58 | sockets = percpu_counter_sum_positive(&tcp_sockets_allocated), | ||
| 59 | local_bh_enable(); | ||
| 53 | 60 | ||
| 54 | socket_seq_show(seq); | 61 | socket_seq_show(seq); |
| 55 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", | 62 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", |
| 56 | sock_prot_inuse_get(net, &tcp_prot), | 63 | sock_prot_inuse_get(net, &tcp_prot), orphans, |
| 57 | (int)percpu_counter_sum_positive(&tcp_orphan_count), | 64 | tcp_death_row.tw_count, sockets, |
| 58 | tcp_death_row.tw_count, | ||
| 59 | (int)percpu_counter_sum_positive(&tcp_sockets_allocated), | ||
| 60 | atomic_read(&tcp_memory_allocated)); | 65 | atomic_read(&tcp_memory_allocated)); |
| 61 | seq_printf(seq, "UDP: inuse %d mem %d\n", | 66 | seq_printf(seq, "UDP: inuse %d mem %d\n", |
| 62 | sock_prot_inuse_get(net, &udp_prot), | 67 | sock_prot_inuse_get(net, &udp_prot), |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 77bfba975959..97f71153584f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -429,7 +429,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 429 | if (*pos == 0) | 429 | if (*pos == 0) |
| 430 | return SEQ_START_TOKEN; | 430 | return SEQ_START_TOKEN; |
| 431 | 431 | ||
| 432 | for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | 432 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { |
| 433 | if (!cpu_possible(cpu)) | 433 | if (!cpu_possible(cpu)) |
| 434 | continue; | 434 | continue; |
| 435 | *pos = cpu+1; | 435 | *pos = cpu+1; |
| @@ -442,7 +442,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 442 | { | 442 | { |
| 443 | int cpu; | 443 | int cpu; |
| 444 | 444 | ||
| 445 | for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | 445 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
| 446 | if (!cpu_possible(cpu)) | 446 | if (!cpu_possible(cpu)) |
| 447 | continue; | 447 | continue; |
| 448 | *pos = cpu+1; | 448 | *pos = cpu+1; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1f3d52946b3b..f28acf11fc67 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -1836,7 +1836,6 @@ adjudge_to_death: | |||
| 1836 | state = sk->sk_state; | 1836 | state = sk->sk_state; |
| 1837 | sock_hold(sk); | 1837 | sock_hold(sk); |
| 1838 | sock_orphan(sk); | 1838 | sock_orphan(sk); |
| 1839 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 1840 | 1839 | ||
| 1841 | /* It is the last release_sock in its life. It will remove backlog. */ | 1840 | /* It is the last release_sock in its life. It will remove backlog. */ |
| 1842 | release_sock(sk); | 1841 | release_sock(sk); |
| @@ -1849,6 +1848,8 @@ adjudge_to_death: | |||
| 1849 | bh_lock_sock(sk); | 1848 | bh_lock_sock(sk); |
| 1850 | WARN_ON(sock_owned_by_user(sk)); | 1849 | WARN_ON(sock_owned_by_user(sk)); |
| 1851 | 1850 | ||
| 1851 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 1852 | |||
| 1852 | /* Have we already been destroyed by a softirq or backlog? */ | 1853 | /* Have we already been destroyed by a softirq or backlog? */ |
| 1853 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) | 1854 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) |
| 1854 | goto out; | 1855 | goto out; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 10172487921b..9d839fa9331e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | */ | 51 | */ |
| 52 | 52 | ||
| 53 | 53 | ||
| 54 | #include <linux/bottom_half.h> | ||
| 54 | #include <linux/types.h> | 55 | #include <linux/types.h> |
| 55 | #include <linux/fcntl.h> | 56 | #include <linux/fcntl.h> |
| 56 | #include <linux/module.h> | 57 | #include <linux/module.h> |
| @@ -1797,7 +1798,9 @@ static int tcp_v4_init_sock(struct sock *sk) | |||
| 1797 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | 1798 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
| 1798 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | 1799 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
| 1799 | 1800 | ||
| 1801 | local_bh_disable(); | ||
| 1800 | percpu_counter_inc(&tcp_sockets_allocated); | 1802 | percpu_counter_inc(&tcp_sockets_allocated); |
| 1803 | local_bh_enable(); | ||
| 1801 | 1804 | ||
| 1802 | return 0; | 1805 | return 0; |
| 1803 | } | 1806 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 8702b06cb60a..e8b8337a8310 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | * 2 of the License, or (at your option) any later version. | 23 | * 2 of the License, or (at your option) any later version. |
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #include <linux/bottom_half.h> | ||
| 26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 27 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
| 28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
| @@ -1830,7 +1831,9 @@ static int tcp_v6_init_sock(struct sock *sk) | |||
| 1830 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | 1831 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
| 1831 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | 1832 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
| 1832 | 1833 | ||
| 1834 | local_bh_disable(); | ||
| 1833 | percpu_counter_inc(&tcp_sockets_allocated); | 1835 | percpu_counter_inc(&tcp_sockets_allocated); |
| 1836 | local_bh_enable(); | ||
| 1834 | 1837 | ||
| 1835 | return 0; | 1838 | return 0; |
| 1836 | } | 1839 | } |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index e4e2caeb9d82..086d5ef098fd 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
| @@ -371,9 +371,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
| 371 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 371 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
| 372 | 372 | ||
| 373 | line = tty->index; | 373 | line = tty->index; |
| 374 | if ((line < 0) || (line >= IRCOMM_TTY_PORTS)) { | 374 | if (line >= IRCOMM_TTY_PORTS) |
| 375 | return -ENODEV; | 375 | return -ENODEV; |
| 376 | } | ||
| 377 | 376 | ||
| 378 | /* Check if instance already exists */ | 377 | /* Check if instance already exists */ |
| 379 | self = hashbin_lock_find(ircomm_tty, line, NULL); | 378 | self = hashbin_lock_find(ircomm_tty, line, NULL); |
| @@ -405,6 +404,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
| 405 | * Force TTY into raw mode by default which is usually what | 404 | * Force TTY into raw mode by default which is usually what |
| 406 | * we want for IrCOMM and IrLPT. This way applications will | 405 | * we want for IrCOMM and IrLPT. This way applications will |
| 407 | * not have to twiddle with printcap etc. | 406 | * not have to twiddle with printcap etc. |
| 407 | * | ||
| 408 | * Note this is completely usafe and doesn't work properly | ||
| 408 | */ | 409 | */ |
| 409 | tty->termios->c_iflag = 0; | 410 | tty->termios->c_iflag = 0; |
| 410 | tty->termios->c_oflag = 0; | 411 | tty->termios->c_oflag = 0; |
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 9394f539966a..3eb5e2660c49 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c | |||
| @@ -507,7 +507,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 507 | /* No cache entry or it is invalid, time to schedule */ | 507 | /* No cache entry or it is invalid, time to schedule */ |
| 508 | dest = __ip_vs_lblc_schedule(svc); | 508 | dest = __ip_vs_lblc_schedule(svc); |
| 509 | if (!dest) { | 509 | if (!dest) { |
| 510 | IP_VS_DBG(1, "no destination available\n"); | 510 | IP_VS_ERR_RL("LBLC: no destination available\n"); |
| 511 | return NULL; | 511 | return NULL; |
| 512 | } | 512 | } |
| 513 | 513 | ||
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 92dc76a6842c..c04ce56c7f0f 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c | |||
| @@ -690,7 +690,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 690 | /* The cache entry is invalid, time to schedule */ | 690 | /* The cache entry is invalid, time to schedule */ |
| 691 | dest = __ip_vs_lblcr_schedule(svc); | 691 | dest = __ip_vs_lblcr_schedule(svc); |
| 692 | if (!dest) { | 692 | if (!dest) { |
| 693 | IP_VS_DBG(1, "no destination available\n"); | 693 | IP_VS_ERR_RL("LBLCR: no destination available\n"); |
| 694 | read_unlock(&svc->sched_lock); | 694 | read_unlock(&svc->sched_lock); |
| 695 | return NULL; | 695 | return NULL; |
| 696 | } | 696 | } |
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c index 51912cab777b..d0dadc8a65fd 100644 --- a/net/netfilter/ipvs/ip_vs_lc.c +++ b/net/netfilter/ipvs/ip_vs_lc.c | |||
| @@ -66,11 +66,15 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 66 | } | 66 | } |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | if (least) | 69 | if (!least) |
| 70 | IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n", | 70 | IP_VS_ERR_RL("LC: no destination available\n"); |
| 71 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), | 71 | else |
| 72 | atomic_read(&least->activeconns), | 72 | IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d " |
| 73 | atomic_read(&least->inactconns)); | 73 | "inactconns %d\n", |
| 74 | IP_VS_DBG_ADDR(svc->af, &least->addr), | ||
| 75 | ntohs(least->port), | ||
| 76 | atomic_read(&least->activeconns), | ||
| 77 | atomic_read(&least->inactconns)); | ||
| 74 | 78 | ||
| 75 | return least; | 79 | return least; |
| 76 | } | 80 | } |
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c index 6758ad2ceaaf..694952db5026 100644 --- a/net/netfilter/ipvs/ip_vs_nq.c +++ b/net/netfilter/ipvs/ip_vs_nq.c | |||
| @@ -95,8 +95,10 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 95 | } | 95 | } |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | if (!least) | 98 | if (!least) { |
| 99 | IP_VS_ERR_RL("NQ: no destination available\n"); | ||
| 99 | return NULL; | 100 | return NULL; |
| 101 | } | ||
| 100 | 102 | ||
| 101 | out: | 103 | out: |
| 102 | IP_VS_DBG_BUF(6, "NQ: server %s:%u " | 104 | IP_VS_DBG_BUF(6, "NQ: server %s:%u " |
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c index 8fb51c169eb8..2d16ab7f8c1e 100644 --- a/net/netfilter/ipvs/ip_vs_rr.c +++ b/net/netfilter/ipvs/ip_vs_rr.c | |||
| @@ -69,6 +69,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 69 | q = q->next; | 69 | q = q->next; |
| 70 | } while (q != p); | 70 | } while (q != p); |
| 71 | write_unlock(&svc->sched_lock); | 71 | write_unlock(&svc->sched_lock); |
| 72 | IP_VS_ERR_RL("RR: no destination available\n"); | ||
| 72 | return NULL; | 73 | return NULL; |
| 73 | 74 | ||
| 74 | out: | 75 | out: |
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c index 691a6a0086e1..20e4657d2f3b 100644 --- a/net/netfilter/ipvs/ip_vs_sed.c +++ b/net/netfilter/ipvs/ip_vs_sed.c | |||
| @@ -84,6 +84,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 84 | goto nextstage; | 84 | goto nextstage; |
| 85 | } | 85 | } |
| 86 | } | 86 | } |
| 87 | IP_VS_ERR_RL("SED: no destination available\n"); | ||
| 87 | return NULL; | 88 | return NULL; |
| 88 | 89 | ||
| 89 | /* | 90 | /* |
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index 0e53955ef139..75709ebeb630 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c | |||
| @@ -219,6 +219,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 219 | || !(dest->flags & IP_VS_DEST_F_AVAILABLE) | 219 | || !(dest->flags & IP_VS_DEST_F_AVAILABLE) |
| 220 | || atomic_read(&dest->weight) <= 0 | 220 | || atomic_read(&dest->weight) <= 0 |
| 221 | || is_overloaded(dest)) { | 221 | || is_overloaded(dest)) { |
| 222 | IP_VS_ERR_RL("SH: no destination available\n"); | ||
| 222 | return NULL; | 223 | return NULL; |
| 223 | } | 224 | } |
| 224 | 225 | ||
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c index 57b452bbb4ea..8e942565b47d 100644 --- a/net/netfilter/ipvs/ip_vs_wlc.c +++ b/net/netfilter/ipvs/ip_vs_wlc.c | |||
| @@ -72,6 +72,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 72 | goto nextstage; | 72 | goto nextstage; |
| 73 | } | 73 | } |
| 74 | } | 74 | } |
| 75 | IP_VS_ERR_RL("WLC: no destination available\n"); | ||
| 75 | return NULL; | 76 | return NULL; |
| 76 | 77 | ||
| 77 | /* | 78 | /* |
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index 2f618dc29c5b..f7d74ef1ecf9 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c | |||
| @@ -155,6 +155,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 155 | 155 | ||
| 156 | if (mark->cl == mark->cl->next) { | 156 | if (mark->cl == mark->cl->next) { |
| 157 | /* no dest entry */ | 157 | /* no dest entry */ |
| 158 | IP_VS_ERR_RL("WRR: no destination available: " | ||
| 159 | "no destinations present\n"); | ||
| 158 | dest = NULL; | 160 | dest = NULL; |
| 159 | goto out; | 161 | goto out; |
| 160 | } | 162 | } |
| @@ -168,8 +170,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 168 | */ | 170 | */ |
| 169 | if (mark->cw == 0) { | 171 | if (mark->cw == 0) { |
| 170 | mark->cl = &svc->destinations; | 172 | mark->cl = &svc->destinations; |
| 171 | IP_VS_ERR_RL("ip_vs_wrr_schedule(): " | 173 | IP_VS_ERR_RL("WRR: no destination " |
| 172 | "no available servers\n"); | 174 | "available\n"); |
| 173 | dest = NULL; | 175 | dest = NULL; |
| 174 | goto out; | 176 | goto out; |
| 175 | } | 177 | } |
| @@ -191,6 +193,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 191 | /* back to the start, and no dest is found. | 193 | /* back to the start, and no dest is found. |
| 192 | It is only possible when all dests are OVERLOADED */ | 194 | It is only possible when all dests are OVERLOADED */ |
| 193 | dest = NULL; | 195 | dest = NULL; |
| 196 | IP_VS_ERR_RL("WRR: no destination available: " | ||
| 197 | "all destinations are overloaded\n"); | ||
| 194 | goto out; | 198 | goto out; |
| 195 | } | 199 | } |
| 196 | } | 200 | } |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index f37b9b74c6a8..4da54b0b9233 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
| @@ -200,7 +200,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 200 | if (*pos == 0) | 200 | if (*pos == 0) |
| 201 | return SEQ_START_TOKEN; | 201 | return SEQ_START_TOKEN; |
| 202 | 202 | ||
| 203 | for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | 203 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { |
| 204 | if (!cpu_possible(cpu)) | 204 | if (!cpu_possible(cpu)) |
| 205 | continue; | 205 | continue; |
| 206 | *pos = cpu + 1; | 206 | *pos = cpu + 1; |
| @@ -215,7 +215,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 215 | struct net *net = seq_file_net(seq); | 215 | struct net *net = seq_file_net(seq); |
| 216 | int cpu; | 216 | int cpu; |
| 217 | 217 | ||
| 218 | for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | 218 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
| 219 | if (!cpu_possible(cpu)) | 219 | if (!cpu_possible(cpu)) |
| 220 | continue; | 220 | continue; |
| 221 | *pos = cpu + 1; | 221 | *pos = cpu + 1; |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 4f7ef0db302b..929218a47620 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
| @@ -335,9 +335,6 @@ config NET_CLS_CGROUP | |||
| 335 | Say Y here if you want to classify packets based on the control | 335 | Say Y here if you want to classify packets based on the control |
| 336 | cgroup of their process. | 336 | cgroup of their process. |
| 337 | 337 | ||
| 338 | To compile this code as a module, choose M here: the | ||
| 339 | module will be called cls_cgroup. | ||
| 340 | |||
| 341 | config NET_EMATCH | 338 | config NET_EMATCH |
| 342 | bool "Extended Matches" | 339 | bool "Extended Matches" |
| 343 | select NET_CLS | 340 | select NET_CLS |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 0d68b1975983..91a3db4a76f8 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
| @@ -24,10 +24,16 @@ struct cgroup_cls_state | |||
| 24 | u32 classid; | 24 | u32 classid; |
| 25 | }; | 25 | }; |
| 26 | 26 | ||
| 27 | static inline struct cgroup_cls_state *net_cls_state(struct cgroup *cgrp) | 27 | static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) |
| 28 | { | 28 | { |
| 29 | return (struct cgroup_cls_state *) | 29 | return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), |
| 30 | cgroup_subsys_state(cgrp, net_cls_subsys_id); | 30 | struct cgroup_cls_state, css); |
| 31 | } | ||
| 32 | |||
| 33 | static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p) | ||
| 34 | { | ||
| 35 | return container_of(task_subsys_state(p, net_cls_subsys_id), | ||
| 36 | struct cgroup_cls_state, css); | ||
| 31 | } | 37 | } |
| 32 | 38 | ||
| 33 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | 39 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, |
| @@ -39,19 +45,19 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | |||
| 39 | return ERR_PTR(-ENOMEM); | 45 | return ERR_PTR(-ENOMEM); |
| 40 | 46 | ||
| 41 | if (cgrp->parent) | 47 | if (cgrp->parent) |
| 42 | cs->classid = net_cls_state(cgrp->parent)->classid; | 48 | cs->classid = cgrp_cls_state(cgrp->parent)->classid; |
| 43 | 49 | ||
| 44 | return &cs->css; | 50 | return &cs->css; |
| 45 | } | 51 | } |
| 46 | 52 | ||
| 47 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | 53 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
| 48 | { | 54 | { |
| 49 | kfree(ss); | 55 | kfree(cgrp_cls_state(cgrp)); |
| 50 | } | 56 | } |
| 51 | 57 | ||
| 52 | static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) | 58 | static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) |
| 53 | { | 59 | { |
| 54 | return net_cls_state(cgrp)->classid; | 60 | return cgrp_cls_state(cgrp)->classid; |
| 55 | } | 61 | } |
| 56 | 62 | ||
| 57 | static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) | 63 | static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) |
| @@ -59,7 +65,7 @@ static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) | |||
| 59 | if (!cgroup_lock_live_group(cgrp)) | 65 | if (!cgroup_lock_live_group(cgrp)) |
| 60 | return -ENODEV; | 66 | return -ENODEV; |
| 61 | 67 | ||
| 62 | net_cls_state(cgrp)->classid = (u32) value; | 68 | cgrp_cls_state(cgrp)->classid = (u32) value; |
| 63 | 69 | ||
| 64 | cgroup_unlock(); | 70 | cgroup_unlock(); |
| 65 | 71 | ||
| @@ -115,8 +121,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
| 115 | return -1; | 121 | return -1; |
| 116 | 122 | ||
| 117 | rcu_read_lock(); | 123 | rcu_read_lock(); |
| 118 | cs = (struct cgroup_cls_state *) task_subsys_state(current, | 124 | cs = task_cls_state(current); |
| 119 | net_cls_subsys_id); | ||
| 120 | if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { | 125 | if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { |
| 121 | res->classid = cs->classid; | 126 | res->classid = cs->classid; |
| 122 | res->class = 0; | 127 | res->class = 0; |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 0443f8349458..0c431c277af5 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
| @@ -234,7 +234,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) | |||
| 234 | list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { | 234 | list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { |
| 235 | 235 | ||
| 236 | /* Enforce a 60 second garbage collection moratorium */ | 236 | /* Enforce a 60 second garbage collection moratorium */ |
| 237 | if (time_in_range(cred->cr_expire, expired, jiffies) && | 237 | if (time_in_range_open(cred->cr_expire, expired, jiffies) && |
| 238 | test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) | 238 | test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) |
| 239 | continue; | 239 | continue; |
| 240 | 240 | ||
| @@ -515,7 +515,7 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, | |||
| 515 | if (cred->cr_ops->crwrap_req) | 515 | if (cred->cr_ops->crwrap_req) |
| 516 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); | 516 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); |
| 517 | /* By default, we encode the arguments normally. */ | 517 | /* By default, we encode the arguments normally. */ |
| 518 | return rpc_call_xdrproc(encode, rqstp, data, obj); | 518 | return encode(rqstp, data, obj); |
| 519 | } | 519 | } |
| 520 | 520 | ||
| 521 | int | 521 | int |
| @@ -530,7 +530,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | |||
| 530 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, | 530 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, |
| 531 | data, obj); | 531 | data, obj); |
| 532 | /* By default, we decode the arguments normally. */ | 532 | /* By default, we decode the arguments normally. */ |
| 533 | return rpc_call_xdrproc(decode, rqstp, data, obj); | 533 | return decode(rqstp, data, obj); |
| 534 | } | 534 | } |
| 535 | 535 | ||
| 536 | int | 536 | int |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 853a4142cea1..e630b38a6047 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
| @@ -72,11 +72,25 @@ struct gss_auth { | |||
| 72 | struct gss_api_mech *mech; | 72 | struct gss_api_mech *mech; |
| 73 | enum rpc_gss_svc service; | 73 | enum rpc_gss_svc service; |
| 74 | struct rpc_clnt *client; | 74 | struct rpc_clnt *client; |
| 75 | struct dentry *dentry; | 75 | /* |
| 76 | * There are two upcall pipes; dentry[1], named "gssd", is used | ||
| 77 | * for the new text-based upcall; dentry[0] is named after the | ||
| 78 | * mechanism (for example, "krb5") and exists for | ||
| 79 | * backwards-compatibility with older gssd's. | ||
| 80 | */ | ||
| 81 | struct dentry *dentry[2]; | ||
| 76 | }; | 82 | }; |
| 77 | 83 | ||
| 84 | /* pipe_version >= 0 if and only if someone has a pipe open. */ | ||
| 85 | static int pipe_version = -1; | ||
| 86 | static atomic_t pipe_users = ATOMIC_INIT(0); | ||
| 87 | static DEFINE_SPINLOCK(pipe_version_lock); | ||
| 88 | static struct rpc_wait_queue pipe_version_rpc_waitqueue; | ||
| 89 | static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); | ||
| 90 | |||
| 78 | static void gss_free_ctx(struct gss_cl_ctx *); | 91 | static void gss_free_ctx(struct gss_cl_ctx *); |
| 79 | static struct rpc_pipe_ops gss_upcall_ops; | 92 | static struct rpc_pipe_ops gss_upcall_ops_v0; |
| 93 | static struct rpc_pipe_ops gss_upcall_ops_v1; | ||
| 80 | 94 | ||
| 81 | static inline struct gss_cl_ctx * | 95 | static inline struct gss_cl_ctx * |
| 82 | gss_get_ctx(struct gss_cl_ctx *ctx) | 96 | gss_get_ctx(struct gss_cl_ctx *ctx) |
| @@ -220,6 +234,7 @@ err: | |||
| 220 | return p; | 234 | return p; |
| 221 | } | 235 | } |
| 222 | 236 | ||
| 237 | #define UPCALL_BUF_LEN 128 | ||
| 223 | 238 | ||
| 224 | struct gss_upcall_msg { | 239 | struct gss_upcall_msg { |
| 225 | atomic_t count; | 240 | atomic_t count; |
| @@ -227,16 +242,41 @@ struct gss_upcall_msg { | |||
| 227 | struct rpc_pipe_msg msg; | 242 | struct rpc_pipe_msg msg; |
| 228 | struct list_head list; | 243 | struct list_head list; |
| 229 | struct gss_auth *auth; | 244 | struct gss_auth *auth; |
| 245 | struct rpc_inode *inode; | ||
| 230 | struct rpc_wait_queue rpc_waitqueue; | 246 | struct rpc_wait_queue rpc_waitqueue; |
| 231 | wait_queue_head_t waitqueue; | 247 | wait_queue_head_t waitqueue; |
| 232 | struct gss_cl_ctx *ctx; | 248 | struct gss_cl_ctx *ctx; |
| 249 | char databuf[UPCALL_BUF_LEN]; | ||
| 233 | }; | 250 | }; |
| 234 | 251 | ||
| 252 | static int get_pipe_version(void) | ||
| 253 | { | ||
| 254 | int ret; | ||
| 255 | |||
| 256 | spin_lock(&pipe_version_lock); | ||
| 257 | if (pipe_version >= 0) { | ||
| 258 | atomic_inc(&pipe_users); | ||
| 259 | ret = pipe_version; | ||
| 260 | } else | ||
| 261 | ret = -EAGAIN; | ||
| 262 | spin_unlock(&pipe_version_lock); | ||
| 263 | return ret; | ||
| 264 | } | ||
| 265 | |||
| 266 | static void put_pipe_version(void) | ||
| 267 | { | ||
| 268 | if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) { | ||
| 269 | pipe_version = -1; | ||
| 270 | spin_unlock(&pipe_version_lock); | ||
| 271 | } | ||
| 272 | } | ||
| 273 | |||
| 235 | static void | 274 | static void |
| 236 | gss_release_msg(struct gss_upcall_msg *gss_msg) | 275 | gss_release_msg(struct gss_upcall_msg *gss_msg) |
| 237 | { | 276 | { |
| 238 | if (!atomic_dec_and_test(&gss_msg->count)) | 277 | if (!atomic_dec_and_test(&gss_msg->count)) |
| 239 | return; | 278 | return; |
| 279 | put_pipe_version(); | ||
| 240 | BUG_ON(!list_empty(&gss_msg->list)); | 280 | BUG_ON(!list_empty(&gss_msg->list)); |
| 241 | if (gss_msg->ctx != NULL) | 281 | if (gss_msg->ctx != NULL) |
| 242 | gss_put_ctx(gss_msg->ctx); | 282 | gss_put_ctx(gss_msg->ctx); |
| @@ -266,8 +306,8 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid) | |||
| 266 | static inline struct gss_upcall_msg * | 306 | static inline struct gss_upcall_msg * |
| 267 | gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) | 307 | gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) |
| 268 | { | 308 | { |
| 269 | struct inode *inode = gss_auth->dentry->d_inode; | 309 | struct rpc_inode *rpci = gss_msg->inode; |
| 270 | struct rpc_inode *rpci = RPC_I(inode); | 310 | struct inode *inode = &rpci->vfs_inode; |
| 271 | struct gss_upcall_msg *old; | 311 | struct gss_upcall_msg *old; |
| 272 | 312 | ||
| 273 | spin_lock(&inode->i_lock); | 313 | spin_lock(&inode->i_lock); |
| @@ -293,8 +333,7 @@ __gss_unhash_msg(struct gss_upcall_msg *gss_msg) | |||
| 293 | static void | 333 | static void |
| 294 | gss_unhash_msg(struct gss_upcall_msg *gss_msg) | 334 | gss_unhash_msg(struct gss_upcall_msg *gss_msg) |
| 295 | { | 335 | { |
| 296 | struct gss_auth *gss_auth = gss_msg->auth; | 336 | struct inode *inode = &gss_msg->inode->vfs_inode; |
| 297 | struct inode *inode = gss_auth->dentry->d_inode; | ||
| 298 | 337 | ||
| 299 | if (list_empty(&gss_msg->list)) | 338 | if (list_empty(&gss_msg->list)) |
| 300 | return; | 339 | return; |
| @@ -310,7 +349,7 @@ gss_upcall_callback(struct rpc_task *task) | |||
| 310 | struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, | 349 | struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, |
| 311 | struct gss_cred, gc_base); | 350 | struct gss_cred, gc_base); |
| 312 | struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; | 351 | struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; |
| 313 | struct inode *inode = gss_msg->auth->dentry->d_inode; | 352 | struct inode *inode = &gss_msg->inode->vfs_inode; |
| 314 | 353 | ||
| 315 | spin_lock(&inode->i_lock); | 354 | spin_lock(&inode->i_lock); |
| 316 | if (gss_msg->ctx) | 355 | if (gss_msg->ctx) |
| @@ -323,22 +362,75 @@ gss_upcall_callback(struct rpc_task *task) | |||
| 323 | gss_release_msg(gss_msg); | 362 | gss_release_msg(gss_msg); |
| 324 | } | 363 | } |
| 325 | 364 | ||
| 365 | static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) | ||
| 366 | { | ||
| 367 | gss_msg->msg.data = &gss_msg->uid; | ||
| 368 | gss_msg->msg.len = sizeof(gss_msg->uid); | ||
| 369 | } | ||
| 370 | |||
| 371 | static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, | ||
| 372 | struct rpc_clnt *clnt, int machine_cred) | ||
| 373 | { | ||
| 374 | char *p = gss_msg->databuf; | ||
| 375 | int len = 0; | ||
| 376 | |||
| 377 | gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", | ||
| 378 | gss_msg->auth->mech->gm_name, | ||
| 379 | gss_msg->uid); | ||
| 380 | p += gss_msg->msg.len; | ||
| 381 | if (clnt->cl_principal) { | ||
| 382 | len = sprintf(p, "target=%s ", clnt->cl_principal); | ||
| 383 | p += len; | ||
| 384 | gss_msg->msg.len += len; | ||
| 385 | } | ||
| 386 | if (machine_cred) { | ||
| 387 | len = sprintf(p, "service=* "); | ||
| 388 | p += len; | ||
| 389 | gss_msg->msg.len += len; | ||
| 390 | } else if (!strcmp(clnt->cl_program->name, "nfs4_cb")) { | ||
| 391 | len = sprintf(p, "service=nfs "); | ||
| 392 | p += len; | ||
| 393 | gss_msg->msg.len += len; | ||
| 394 | } | ||
| 395 | len = sprintf(p, "\n"); | ||
| 396 | gss_msg->msg.len += len; | ||
| 397 | |||
| 398 | gss_msg->msg.data = gss_msg->databuf; | ||
| 399 | BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN); | ||
| 400 | } | ||
| 401 | |||
| 402 | static void gss_encode_msg(struct gss_upcall_msg *gss_msg, | ||
| 403 | struct rpc_clnt *clnt, int machine_cred) | ||
| 404 | { | ||
| 405 | if (pipe_version == 0) | ||
| 406 | gss_encode_v0_msg(gss_msg); | ||
| 407 | else /* pipe_version == 1 */ | ||
| 408 | gss_encode_v1_msg(gss_msg, clnt, machine_cred); | ||
| 409 | } | ||
| 410 | |||
| 326 | static inline struct gss_upcall_msg * | 411 | static inline struct gss_upcall_msg * |
| 327 | gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid) | 412 | gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid, struct rpc_clnt *clnt, |
| 413 | int machine_cred) | ||
| 328 | { | 414 | { |
| 329 | struct gss_upcall_msg *gss_msg; | 415 | struct gss_upcall_msg *gss_msg; |
| 416 | int vers; | ||
| 330 | 417 | ||
| 331 | gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); | 418 | gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); |
| 332 | if (gss_msg != NULL) { | 419 | if (gss_msg == NULL) |
| 333 | INIT_LIST_HEAD(&gss_msg->list); | 420 | return ERR_PTR(-ENOMEM); |
| 334 | rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); | 421 | vers = get_pipe_version(); |
| 335 | init_waitqueue_head(&gss_msg->waitqueue); | 422 | if (vers < 0) { |
| 336 | atomic_set(&gss_msg->count, 1); | 423 | kfree(gss_msg); |
| 337 | gss_msg->msg.data = &gss_msg->uid; | 424 | return ERR_PTR(vers); |
| 338 | gss_msg->msg.len = sizeof(gss_msg->uid); | ||
| 339 | gss_msg->uid = uid; | ||
| 340 | gss_msg->auth = gss_auth; | ||
| 341 | } | 425 | } |
| 426 | gss_msg->inode = RPC_I(gss_auth->dentry[vers]->d_inode); | ||
| 427 | INIT_LIST_HEAD(&gss_msg->list); | ||
| 428 | rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); | ||
| 429 | init_waitqueue_head(&gss_msg->waitqueue); | ||
| 430 | atomic_set(&gss_msg->count, 1); | ||
| 431 | gss_msg->uid = uid; | ||
| 432 | gss_msg->auth = gss_auth; | ||
| 433 | gss_encode_msg(gss_msg, clnt, machine_cred); | ||
| 342 | return gss_msg; | 434 | return gss_msg; |
| 343 | } | 435 | } |
| 344 | 436 | ||
| @@ -350,16 +442,13 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr | |||
| 350 | struct gss_upcall_msg *gss_new, *gss_msg; | 442 | struct gss_upcall_msg *gss_new, *gss_msg; |
| 351 | uid_t uid = cred->cr_uid; | 443 | uid_t uid = cred->cr_uid; |
| 352 | 444 | ||
| 353 | /* Special case: rpc.gssd assumes that uid == 0 implies machine creds */ | 445 | gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred); |
| 354 | if (gss_cred->gc_machine_cred != 0) | 446 | if (IS_ERR(gss_new)) |
| 355 | uid = 0; | 447 | return gss_new; |
| 356 | |||
| 357 | gss_new = gss_alloc_msg(gss_auth, uid); | ||
| 358 | if (gss_new == NULL) | ||
| 359 | return ERR_PTR(-ENOMEM); | ||
| 360 | gss_msg = gss_add_msg(gss_auth, gss_new); | 448 | gss_msg = gss_add_msg(gss_auth, gss_new); |
| 361 | if (gss_msg == gss_new) { | 449 | if (gss_msg == gss_new) { |
| 362 | int res = rpc_queue_upcall(gss_auth->dentry->d_inode, &gss_new->msg); | 450 | struct inode *inode = &gss_new->inode->vfs_inode; |
| 451 | int res = rpc_queue_upcall(inode, &gss_new->msg); | ||
| 363 | if (res) { | 452 | if (res) { |
| 364 | gss_unhash_msg(gss_new); | 453 | gss_unhash_msg(gss_new); |
| 365 | gss_msg = ERR_PTR(res); | 454 | gss_msg = ERR_PTR(res); |
| @@ -369,6 +458,18 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr | |||
| 369 | return gss_msg; | 458 | return gss_msg; |
| 370 | } | 459 | } |
| 371 | 460 | ||
| 461 | static void warn_gssd(void) | ||
| 462 | { | ||
| 463 | static unsigned long ratelimit; | ||
| 464 | unsigned long now = jiffies; | ||
| 465 | |||
| 466 | if (time_after(now, ratelimit)) { | ||
| 467 | printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n" | ||
| 468 | "Please check user daemon is running.\n"); | ||
| 469 | ratelimit = now + 15*HZ; | ||
| 470 | } | ||
| 471 | } | ||
| 472 | |||
| 372 | static inline int | 473 | static inline int |
| 373 | gss_refresh_upcall(struct rpc_task *task) | 474 | gss_refresh_upcall(struct rpc_task *task) |
| 374 | { | 475 | { |
| @@ -378,16 +479,25 @@ gss_refresh_upcall(struct rpc_task *task) | |||
| 378 | struct gss_cred *gss_cred = container_of(cred, | 479 | struct gss_cred *gss_cred = container_of(cred, |
| 379 | struct gss_cred, gc_base); | 480 | struct gss_cred, gc_base); |
| 380 | struct gss_upcall_msg *gss_msg; | 481 | struct gss_upcall_msg *gss_msg; |
| 381 | struct inode *inode = gss_auth->dentry->d_inode; | 482 | struct inode *inode; |
| 382 | int err = 0; | 483 | int err = 0; |
| 383 | 484 | ||
| 384 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, | 485 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, |
| 385 | cred->cr_uid); | 486 | cred->cr_uid); |
| 386 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); | 487 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); |
| 488 | if (IS_ERR(gss_msg) == -EAGAIN) { | ||
| 489 | /* XXX: warning on the first, under the assumption we | ||
| 490 | * shouldn't normally hit this case on a refresh. */ | ||
| 491 | warn_gssd(); | ||
| 492 | task->tk_timeout = 15*HZ; | ||
| 493 | rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); | ||
| 494 | return 0; | ||
| 495 | } | ||
| 387 | if (IS_ERR(gss_msg)) { | 496 | if (IS_ERR(gss_msg)) { |
| 388 | err = PTR_ERR(gss_msg); | 497 | err = PTR_ERR(gss_msg); |
| 389 | goto out; | 498 | goto out; |
| 390 | } | 499 | } |
| 500 | inode = &gss_msg->inode->vfs_inode; | ||
| 391 | spin_lock(&inode->i_lock); | 501 | spin_lock(&inode->i_lock); |
| 392 | if (gss_cred->gc_upcall != NULL) | 502 | if (gss_cred->gc_upcall != NULL) |
| 393 | rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); | 503 | rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); |
| @@ -414,18 +524,29 @@ out: | |||
| 414 | static inline int | 524 | static inline int |
| 415 | gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | 525 | gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) |
| 416 | { | 526 | { |
| 417 | struct inode *inode = gss_auth->dentry->d_inode; | 527 | struct inode *inode; |
| 418 | struct rpc_cred *cred = &gss_cred->gc_base; | 528 | struct rpc_cred *cred = &gss_cred->gc_base; |
| 419 | struct gss_upcall_msg *gss_msg; | 529 | struct gss_upcall_msg *gss_msg; |
| 420 | DEFINE_WAIT(wait); | 530 | DEFINE_WAIT(wait); |
| 421 | int err = 0; | 531 | int err = 0; |
| 422 | 532 | ||
| 423 | dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid); | 533 | dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid); |
| 534 | retry: | ||
| 424 | gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); | 535 | gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); |
| 536 | if (PTR_ERR(gss_msg) == -EAGAIN) { | ||
| 537 | err = wait_event_interruptible_timeout(pipe_version_waitqueue, | ||
| 538 | pipe_version >= 0, 15*HZ); | ||
| 539 | if (err) | ||
| 540 | goto out; | ||
| 541 | if (pipe_version < 0) | ||
| 542 | warn_gssd(); | ||
| 543 | goto retry; | ||
| 544 | } | ||
| 425 | if (IS_ERR(gss_msg)) { | 545 | if (IS_ERR(gss_msg)) { |
| 426 | err = PTR_ERR(gss_msg); | 546 | err = PTR_ERR(gss_msg); |
| 427 | goto out; | 547 | goto out; |
| 428 | } | 548 | } |
| 549 | inode = &gss_msg->inode->vfs_inode; | ||
| 429 | for (;;) { | 550 | for (;;) { |
| 430 | prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); | 551 | prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); |
| 431 | spin_lock(&inode->i_lock); | 552 | spin_lock(&inode->i_lock); |
| @@ -543,6 +664,38 @@ out: | |||
| 543 | return err; | 664 | return err; |
| 544 | } | 665 | } |
| 545 | 666 | ||
| 667 | static int gss_pipe_open(struct inode *inode, int new_version) | ||
| 668 | { | ||
| 669 | int ret = 0; | ||
| 670 | |||
| 671 | spin_lock(&pipe_version_lock); | ||
| 672 | if (pipe_version < 0) { | ||
| 673 | /* First open of any gss pipe determines the version: */ | ||
| 674 | pipe_version = new_version; | ||
| 675 | rpc_wake_up(&pipe_version_rpc_waitqueue); | ||
| 676 | wake_up(&pipe_version_waitqueue); | ||
| 677 | } else if (pipe_version != new_version) { | ||
| 678 | /* Trying to open a pipe of a different version */ | ||
| 679 | ret = -EBUSY; | ||
| 680 | goto out; | ||
| 681 | } | ||
| 682 | atomic_inc(&pipe_users); | ||
| 683 | out: | ||
| 684 | spin_unlock(&pipe_version_lock); | ||
| 685 | return ret; | ||
| 686 | |||
| 687 | } | ||
| 688 | |||
| 689 | static int gss_pipe_open_v0(struct inode *inode) | ||
| 690 | { | ||
| 691 | return gss_pipe_open(inode, 0); | ||
| 692 | } | ||
| 693 | |||
| 694 | static int gss_pipe_open_v1(struct inode *inode) | ||
| 695 | { | ||
| 696 | return gss_pipe_open(inode, 1); | ||
| 697 | } | ||
| 698 | |||
| 546 | static void | 699 | static void |
| 547 | gss_pipe_release(struct inode *inode) | 700 | gss_pipe_release(struct inode *inode) |
| 548 | { | 701 | { |
| @@ -562,27 +715,22 @@ gss_pipe_release(struct inode *inode) | |||
| 562 | spin_lock(&inode->i_lock); | 715 | spin_lock(&inode->i_lock); |
| 563 | } | 716 | } |
| 564 | spin_unlock(&inode->i_lock); | 717 | spin_unlock(&inode->i_lock); |
| 718 | |||
| 719 | put_pipe_version(); | ||
| 565 | } | 720 | } |
| 566 | 721 | ||
| 567 | static void | 722 | static void |
| 568 | gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) | 723 | gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) |
| 569 | { | 724 | { |
| 570 | struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); | 725 | struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); |
| 571 | static unsigned long ratelimit; | ||
| 572 | 726 | ||
| 573 | if (msg->errno < 0) { | 727 | if (msg->errno < 0) { |
| 574 | dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n", | 728 | dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n", |
| 575 | gss_msg); | 729 | gss_msg); |
| 576 | atomic_inc(&gss_msg->count); | 730 | atomic_inc(&gss_msg->count); |
| 577 | gss_unhash_msg(gss_msg); | 731 | gss_unhash_msg(gss_msg); |
| 578 | if (msg->errno == -ETIMEDOUT) { | 732 | if (msg->errno == -ETIMEDOUT) |
| 579 | unsigned long now = jiffies; | 733 | warn_gssd(); |
| 580 | if (time_after(now, ratelimit)) { | ||
| 581 | printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n" | ||
| 582 | "Please check user daemon is running!\n"); | ||
| 583 | ratelimit = now + 15*HZ; | ||
| 584 | } | ||
| 585 | } | ||
| 586 | gss_release_msg(gss_msg); | 734 | gss_release_msg(gss_msg); |
| 587 | } | 735 | } |
| 588 | } | 736 | } |
| @@ -623,20 +771,38 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | |||
| 623 | atomic_set(&auth->au_count, 1); | 771 | atomic_set(&auth->au_count, 1); |
| 624 | kref_init(&gss_auth->kref); | 772 | kref_init(&gss_auth->kref); |
| 625 | 773 | ||
| 626 | gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, | 774 | /* |
| 627 | clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); | 775 | * Note: if we created the old pipe first, then someone who |
| 628 | if (IS_ERR(gss_auth->dentry)) { | 776 | * examined the directory at the right moment might conclude |
| 629 | err = PTR_ERR(gss_auth->dentry); | 777 | * that we supported only the old pipe. So we instead create |
| 778 | * the new pipe first. | ||
| 779 | */ | ||
| 780 | gss_auth->dentry[1] = rpc_mkpipe(clnt->cl_dentry, | ||
| 781 | "gssd", | ||
| 782 | clnt, &gss_upcall_ops_v1, | ||
| 783 | RPC_PIPE_WAIT_FOR_OPEN); | ||
| 784 | if (IS_ERR(gss_auth->dentry[1])) { | ||
| 785 | err = PTR_ERR(gss_auth->dentry[1]); | ||
| 630 | goto err_put_mech; | 786 | goto err_put_mech; |
| 631 | } | 787 | } |
| 632 | 788 | ||
| 789 | gss_auth->dentry[0] = rpc_mkpipe(clnt->cl_dentry, | ||
| 790 | gss_auth->mech->gm_name, | ||
| 791 | clnt, &gss_upcall_ops_v0, | ||
| 792 | RPC_PIPE_WAIT_FOR_OPEN); | ||
| 793 | if (IS_ERR(gss_auth->dentry[0])) { | ||
| 794 | err = PTR_ERR(gss_auth->dentry[0]); | ||
| 795 | goto err_unlink_pipe_1; | ||
| 796 | } | ||
| 633 | err = rpcauth_init_credcache(auth); | 797 | err = rpcauth_init_credcache(auth); |
| 634 | if (err) | 798 | if (err) |
| 635 | goto err_unlink_pipe; | 799 | goto err_unlink_pipe_0; |
| 636 | 800 | ||
| 637 | return auth; | 801 | return auth; |
| 638 | err_unlink_pipe: | 802 | err_unlink_pipe_0: |
| 639 | rpc_unlink(gss_auth->dentry); | 803 | rpc_unlink(gss_auth->dentry[0]); |
| 804 | err_unlink_pipe_1: | ||
| 805 | rpc_unlink(gss_auth->dentry[1]); | ||
| 640 | err_put_mech: | 806 | err_put_mech: |
| 641 | gss_mech_put(gss_auth->mech); | 807 | gss_mech_put(gss_auth->mech); |
| 642 | err_free: | 808 | err_free: |
| @@ -649,8 +815,8 @@ out_dec: | |||
| 649 | static void | 815 | static void |
| 650 | gss_free(struct gss_auth *gss_auth) | 816 | gss_free(struct gss_auth *gss_auth) |
| 651 | { | 817 | { |
| 652 | rpc_unlink(gss_auth->dentry); | 818 | rpc_unlink(gss_auth->dentry[1]); |
| 653 | gss_auth->dentry = NULL; | 819 | rpc_unlink(gss_auth->dentry[0]); |
| 654 | gss_mech_put(gss_auth->mech); | 820 | gss_mech_put(gss_auth->mech); |
| 655 | 821 | ||
| 656 | kfree(gss_auth); | 822 | kfree(gss_auth); |
| @@ -693,7 +859,7 @@ gss_destroying_context(struct rpc_cred *cred) | |||
| 693 | struct rpc_task *task; | 859 | struct rpc_task *task; |
| 694 | 860 | ||
| 695 | if (gss_cred->gc_ctx == NULL || | 861 | if (gss_cred->gc_ctx == NULL || |
| 696 | test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) | 862 | test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) |
| 697 | return 0; | 863 | return 0; |
| 698 | 864 | ||
| 699 | gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; | 865 | gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; |
| @@ -757,14 +923,12 @@ gss_free_cred_callback(struct rcu_head *head) | |||
| 757 | } | 923 | } |
| 758 | 924 | ||
| 759 | static void | 925 | static void |
| 760 | gss_destroy_cred(struct rpc_cred *cred) | 926 | gss_destroy_nullcred(struct rpc_cred *cred) |
| 761 | { | 927 | { |
| 762 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | 928 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); |
| 763 | struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); | 929 | struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); |
| 764 | struct gss_cl_ctx *ctx = gss_cred->gc_ctx; | 930 | struct gss_cl_ctx *ctx = gss_cred->gc_ctx; |
| 765 | 931 | ||
| 766 | if (gss_destroying_context(cred)) | ||
| 767 | return; | ||
| 768 | rcu_assign_pointer(gss_cred->gc_ctx, NULL); | 932 | rcu_assign_pointer(gss_cred->gc_ctx, NULL); |
| 769 | call_rcu(&cred->cr_rcu, gss_free_cred_callback); | 933 | call_rcu(&cred->cr_rcu, gss_free_cred_callback); |
| 770 | if (ctx) | 934 | if (ctx) |
| @@ -772,6 +936,15 @@ gss_destroy_cred(struct rpc_cred *cred) | |||
| 772 | kref_put(&gss_auth->kref, gss_free_callback); | 936 | kref_put(&gss_auth->kref, gss_free_callback); |
| 773 | } | 937 | } |
| 774 | 938 | ||
| 939 | static void | ||
| 940 | gss_destroy_cred(struct rpc_cred *cred) | ||
| 941 | { | ||
| 942 | |||
| 943 | if (gss_destroying_context(cred)) | ||
| 944 | return; | ||
| 945 | gss_destroy_nullcred(cred); | ||
| 946 | } | ||
| 947 | |||
| 775 | /* | 948 | /* |
| 776 | * Lookup RPCSEC_GSS cred for the current process | 949 | * Lookup RPCSEC_GSS cred for the current process |
| 777 | */ | 950 | */ |
| @@ -1017,7 +1190,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
| 1017 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; | 1190 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; |
| 1018 | *p++ = htonl(rqstp->rq_seqno); | 1191 | *p++ = htonl(rqstp->rq_seqno); |
| 1019 | 1192 | ||
| 1020 | status = rpc_call_xdrproc(encode, rqstp, p, obj); | 1193 | status = encode(rqstp, p, obj); |
| 1021 | if (status) | 1194 | if (status) |
| 1022 | return status; | 1195 | return status; |
| 1023 | 1196 | ||
| @@ -1111,7 +1284,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
| 1111 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; | 1284 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; |
| 1112 | *p++ = htonl(rqstp->rq_seqno); | 1285 | *p++ = htonl(rqstp->rq_seqno); |
| 1113 | 1286 | ||
| 1114 | status = rpc_call_xdrproc(encode, rqstp, p, obj); | 1287 | status = encode(rqstp, p, obj); |
| 1115 | if (status) | 1288 | if (status) |
| 1116 | return status; | 1289 | return status; |
| 1117 | 1290 | ||
| @@ -1170,12 +1343,12 @@ gss_wrap_req(struct rpc_task *task, | |||
| 1170 | /* The spec seems a little ambiguous here, but I think that not | 1343 | /* The spec seems a little ambiguous here, but I think that not |
| 1171 | * wrapping context destruction requests makes the most sense. | 1344 | * wrapping context destruction requests makes the most sense. |
| 1172 | */ | 1345 | */ |
| 1173 | status = rpc_call_xdrproc(encode, rqstp, p, obj); | 1346 | status = encode(rqstp, p, obj); |
| 1174 | goto out; | 1347 | goto out; |
| 1175 | } | 1348 | } |
| 1176 | switch (gss_cred->gc_service) { | 1349 | switch (gss_cred->gc_service) { |
| 1177 | case RPC_GSS_SVC_NONE: | 1350 | case RPC_GSS_SVC_NONE: |
| 1178 | status = rpc_call_xdrproc(encode, rqstp, p, obj); | 1351 | status = encode(rqstp, p, obj); |
| 1179 | break; | 1352 | break; |
| 1180 | case RPC_GSS_SVC_INTEGRITY: | 1353 | case RPC_GSS_SVC_INTEGRITY: |
| 1181 | status = gss_wrap_req_integ(cred, ctx, encode, | 1354 | status = gss_wrap_req_integ(cred, ctx, encode, |
| @@ -1291,7 +1464,7 @@ gss_unwrap_resp(struct rpc_task *task, | |||
| 1291 | cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) | 1464 | cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) |
| 1292 | + (savedlen - head->iov_len); | 1465 | + (savedlen - head->iov_len); |
| 1293 | out_decode: | 1466 | out_decode: |
| 1294 | status = rpc_call_xdrproc(decode, rqstp, p, obj); | 1467 | status = decode(rqstp, p, obj); |
| 1295 | out: | 1468 | out: |
| 1296 | gss_put_ctx(ctx); | 1469 | gss_put_ctx(ctx); |
| 1297 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, | 1470 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, |
| @@ -1324,7 +1497,7 @@ static const struct rpc_credops gss_credops = { | |||
| 1324 | 1497 | ||
| 1325 | static const struct rpc_credops gss_nullops = { | 1498 | static const struct rpc_credops gss_nullops = { |
| 1326 | .cr_name = "AUTH_GSS", | 1499 | .cr_name = "AUTH_GSS", |
| 1327 | .crdestroy = gss_destroy_cred, | 1500 | .crdestroy = gss_destroy_nullcred, |
| 1328 | .crbind = rpcauth_generic_bind_cred, | 1501 | .crbind = rpcauth_generic_bind_cred, |
| 1329 | .crmatch = gss_match, | 1502 | .crmatch = gss_match, |
| 1330 | .crmarshal = gss_marshal, | 1503 | .crmarshal = gss_marshal, |
| @@ -1334,10 +1507,19 @@ static const struct rpc_credops gss_nullops = { | |||
| 1334 | .crunwrap_resp = gss_unwrap_resp, | 1507 | .crunwrap_resp = gss_unwrap_resp, |
| 1335 | }; | 1508 | }; |
| 1336 | 1509 | ||
| 1337 | static struct rpc_pipe_ops gss_upcall_ops = { | 1510 | static struct rpc_pipe_ops gss_upcall_ops_v0 = { |
| 1511 | .upcall = gss_pipe_upcall, | ||
| 1512 | .downcall = gss_pipe_downcall, | ||
| 1513 | .destroy_msg = gss_pipe_destroy_msg, | ||
| 1514 | .open_pipe = gss_pipe_open_v0, | ||
| 1515 | .release_pipe = gss_pipe_release, | ||
| 1516 | }; | ||
| 1517 | |||
| 1518 | static struct rpc_pipe_ops gss_upcall_ops_v1 = { | ||
| 1338 | .upcall = gss_pipe_upcall, | 1519 | .upcall = gss_pipe_upcall, |
| 1339 | .downcall = gss_pipe_downcall, | 1520 | .downcall = gss_pipe_downcall, |
| 1340 | .destroy_msg = gss_pipe_destroy_msg, | 1521 | .destroy_msg = gss_pipe_destroy_msg, |
| 1522 | .open_pipe = gss_pipe_open_v1, | ||
| 1341 | .release_pipe = gss_pipe_release, | 1523 | .release_pipe = gss_pipe_release, |
| 1342 | }; | 1524 | }; |
| 1343 | 1525 | ||
| @@ -1354,6 +1536,7 @@ static int __init init_rpcsec_gss(void) | |||
| 1354 | err = gss_svc_init(); | 1536 | err = gss_svc_init(); |
| 1355 | if (err) | 1537 | if (err) |
| 1356 | goto out_unregister; | 1538 | goto out_unregister; |
| 1539 | rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version"); | ||
| 1357 | return 0; | 1540 | return 0; |
| 1358 | out_unregister: | 1541 | out_unregister: |
| 1359 | rpcauth_unregister(&authgss_ops); | 1542 | rpcauth_unregister(&authgss_ops); |
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c index d83b881685fe..c0ba39c4f5f2 100644 --- a/net/sunrpc/auth_gss/gss_generic_token.c +++ b/net/sunrpc/auth_gss/gss_generic_token.c | |||
| @@ -152,7 +152,7 @@ g_token_size(struct xdr_netobj *mech, unsigned int body_size) | |||
| 152 | return(1 + der_length_size(body_size) + body_size); | 152 | return(1 + der_length_size(body_size) + body_size); |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | EXPORT_SYMBOL(g_token_size); | 155 | EXPORT_SYMBOL_GPL(g_token_size); |
| 156 | 156 | ||
| 157 | /* fills in a buffer with the token header. The buffer is assumed to | 157 | /* fills in a buffer with the token header. The buffer is assumed to |
| 158 | be the right size. buf is advanced past the token header */ | 158 | be the right size. buf is advanced past the token header */ |
| @@ -167,7 +167,7 @@ g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf) | |||
| 167 | TWRITE_STR(*buf, mech->data, ((int) mech->len)); | 167 | TWRITE_STR(*buf, mech->data, ((int) mech->len)); |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | EXPORT_SYMBOL(g_make_token_header); | 170 | EXPORT_SYMBOL_GPL(g_make_token_header); |
| 171 | 171 | ||
| 172 | /* | 172 | /* |
| 173 | * Given a buffer containing a token, reads and verifies the token, | 173 | * Given a buffer containing a token, reads and verifies the token, |
| @@ -231,5 +231,5 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size, | |||
| 231 | return(ret); | 231 | return(ret); |
| 232 | } | 232 | } |
| 233 | 233 | ||
| 234 | EXPORT_SYMBOL(g_verify_token_header); | 234 | EXPORT_SYMBOL_GPL(g_verify_token_header); |
| 235 | 235 | ||
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index bce9d527af08..6efbb0cd3c7c 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c | |||
| @@ -117,7 +117,7 @@ gss_mech_register(struct gss_api_mech *gm) | |||
| 117 | return 0; | 117 | return 0; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | EXPORT_SYMBOL(gss_mech_register); | 120 | EXPORT_SYMBOL_GPL(gss_mech_register); |
| 121 | 121 | ||
| 122 | void | 122 | void |
| 123 | gss_mech_unregister(struct gss_api_mech *gm) | 123 | gss_mech_unregister(struct gss_api_mech *gm) |
| @@ -129,7 +129,7 @@ gss_mech_unregister(struct gss_api_mech *gm) | |||
| 129 | gss_mech_free(gm); | 129 | gss_mech_free(gm); |
| 130 | } | 130 | } |
| 131 | 131 | ||
| 132 | EXPORT_SYMBOL(gss_mech_unregister); | 132 | EXPORT_SYMBOL_GPL(gss_mech_unregister); |
| 133 | 133 | ||
| 134 | struct gss_api_mech * | 134 | struct gss_api_mech * |
| 135 | gss_mech_get(struct gss_api_mech *gm) | 135 | gss_mech_get(struct gss_api_mech *gm) |
| @@ -138,7 +138,7 @@ gss_mech_get(struct gss_api_mech *gm) | |||
| 138 | return gm; | 138 | return gm; |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | EXPORT_SYMBOL(gss_mech_get); | 141 | EXPORT_SYMBOL_GPL(gss_mech_get); |
| 142 | 142 | ||
| 143 | struct gss_api_mech * | 143 | struct gss_api_mech * |
| 144 | gss_mech_get_by_name(const char *name) | 144 | gss_mech_get_by_name(const char *name) |
| @@ -158,7 +158,7 @@ gss_mech_get_by_name(const char *name) | |||
| 158 | 158 | ||
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | EXPORT_SYMBOL(gss_mech_get_by_name); | 161 | EXPORT_SYMBOL_GPL(gss_mech_get_by_name); |
| 162 | 162 | ||
| 163 | static inline int | 163 | static inline int |
| 164 | mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor) | 164 | mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor) |
| @@ -191,7 +191,7 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor) | |||
| 191 | return gm; | 191 | return gm; |
| 192 | } | 192 | } |
| 193 | 193 | ||
| 194 | EXPORT_SYMBOL(gss_mech_get_by_pseudoflavor); | 194 | EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor); |
| 195 | 195 | ||
| 196 | u32 | 196 | u32 |
| 197 | gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service) | 197 | gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service) |
| @@ -205,7 +205,7 @@ gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service) | |||
| 205 | } | 205 | } |
| 206 | return RPC_AUTH_MAXFLAVOR; /* illegal value */ | 206 | return RPC_AUTH_MAXFLAVOR; /* illegal value */ |
| 207 | } | 207 | } |
| 208 | EXPORT_SYMBOL(gss_svc_to_pseudoflavor); | 208 | EXPORT_SYMBOL_GPL(gss_svc_to_pseudoflavor); |
| 209 | 209 | ||
| 210 | u32 | 210 | u32 |
| 211 | gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor) | 211 | gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor) |
| @@ -219,7 +219,7 @@ gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor) | |||
| 219 | return 0; | 219 | return 0; |
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | EXPORT_SYMBOL(gss_pseudoflavor_to_service); | 222 | EXPORT_SYMBOL_GPL(gss_pseudoflavor_to_service); |
| 223 | 223 | ||
| 224 | char * | 224 | char * |
| 225 | gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service) | 225 | gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service) |
| @@ -233,7 +233,7 @@ gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service) | |||
| 233 | return NULL; | 233 | return NULL; |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | EXPORT_SYMBOL(gss_service_to_auth_domain_name); | 236 | EXPORT_SYMBOL_GPL(gss_service_to_auth_domain_name); |
| 237 | 237 | ||
| 238 | void | 238 | void |
| 239 | gss_mech_put(struct gss_api_mech * gm) | 239 | gss_mech_put(struct gss_api_mech * gm) |
| @@ -242,7 +242,7 @@ gss_mech_put(struct gss_api_mech * gm) | |||
| 242 | module_put(gm->gm_owner); | 242 | module_put(gm->gm_owner); |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | EXPORT_SYMBOL(gss_mech_put); | 245 | EXPORT_SYMBOL_GPL(gss_mech_put); |
| 246 | 246 | ||
| 247 | /* The mech could probably be determined from the token instead, but it's just | 247 | /* The mech could probably be determined from the token instead, but it's just |
| 248 | * as easy for now to pass it in. */ | 248 | * as easy for now to pass it in. */ |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 81ae3d62a0cc..2278a50c6444 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
| @@ -332,6 +332,7 @@ struct rsc { | |||
| 332 | struct svc_cred cred; | 332 | struct svc_cred cred; |
| 333 | struct gss_svc_seq_data seqdata; | 333 | struct gss_svc_seq_data seqdata; |
| 334 | struct gss_ctx *mechctx; | 334 | struct gss_ctx *mechctx; |
| 335 | char *client_name; | ||
| 335 | }; | 336 | }; |
| 336 | 337 | ||
| 337 | static struct cache_head *rsc_table[RSC_HASHMAX]; | 338 | static struct cache_head *rsc_table[RSC_HASHMAX]; |
| @@ -346,6 +347,7 @@ static void rsc_free(struct rsc *rsci) | |||
| 346 | gss_delete_sec_context(&rsci->mechctx); | 347 | gss_delete_sec_context(&rsci->mechctx); |
| 347 | if (rsci->cred.cr_group_info) | 348 | if (rsci->cred.cr_group_info) |
| 348 | put_group_info(rsci->cred.cr_group_info); | 349 | put_group_info(rsci->cred.cr_group_info); |
| 350 | kfree(rsci->client_name); | ||
| 349 | } | 351 | } |
| 350 | 352 | ||
| 351 | static void rsc_put(struct kref *ref) | 353 | static void rsc_put(struct kref *ref) |
| @@ -383,6 +385,7 @@ rsc_init(struct cache_head *cnew, struct cache_head *ctmp) | |||
| 383 | tmp->handle.data = NULL; | 385 | tmp->handle.data = NULL; |
| 384 | new->mechctx = NULL; | 386 | new->mechctx = NULL; |
| 385 | new->cred.cr_group_info = NULL; | 387 | new->cred.cr_group_info = NULL; |
| 388 | new->client_name = NULL; | ||
| 386 | } | 389 | } |
| 387 | 390 | ||
| 388 | static void | 391 | static void |
| @@ -397,6 +400,8 @@ update_rsc(struct cache_head *cnew, struct cache_head *ctmp) | |||
| 397 | spin_lock_init(&new->seqdata.sd_lock); | 400 | spin_lock_init(&new->seqdata.sd_lock); |
| 398 | new->cred = tmp->cred; | 401 | new->cred = tmp->cred; |
| 399 | tmp->cred.cr_group_info = NULL; | 402 | tmp->cred.cr_group_info = NULL; |
| 403 | new->client_name = tmp->client_name; | ||
| 404 | tmp->client_name = NULL; | ||
| 400 | } | 405 | } |
| 401 | 406 | ||
| 402 | static struct cache_head * | 407 | static struct cache_head * |
| @@ -486,6 +491,15 @@ static int rsc_parse(struct cache_detail *cd, | |||
| 486 | status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); | 491 | status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); |
| 487 | if (status) | 492 | if (status) |
| 488 | goto out; | 493 | goto out; |
| 494 | |||
| 495 | /* get client name */ | ||
| 496 | len = qword_get(&mesg, buf, mlen); | ||
| 497 | if (len > 0) { | ||
| 498 | rsci.client_name = kstrdup(buf, GFP_KERNEL); | ||
| 499 | if (!rsci.client_name) | ||
| 500 | goto out; | ||
| 501 | } | ||
| 502 | |||
| 489 | } | 503 | } |
| 490 | rsci.h.expiry_time = expiry; | 504 | rsci.h.expiry_time = expiry; |
| 491 | rscp = rsc_update(&rsci, rscp); | 505 | rscp = rsc_update(&rsci, rscp); |
| @@ -746,7 +760,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) | |||
| 746 | return gd->pseudoflavor; | 760 | return gd->pseudoflavor; |
| 747 | } | 761 | } |
| 748 | 762 | ||
| 749 | EXPORT_SYMBOL(svcauth_gss_flavor); | 763 | EXPORT_SYMBOL_GPL(svcauth_gss_flavor); |
| 750 | 764 | ||
| 751 | int | 765 | int |
| 752 | svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) | 766 | svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) |
| @@ -780,7 +794,7 @@ out: | |||
| 780 | return stat; | 794 | return stat; |
| 781 | } | 795 | } |
| 782 | 796 | ||
| 783 | EXPORT_SYMBOL(svcauth_gss_register_pseudoflavor); | 797 | EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); |
| 784 | 798 | ||
| 785 | static inline int | 799 | static inline int |
| 786 | read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj) | 800 | read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj) |
| @@ -913,6 +927,16 @@ struct gss_svc_data { | |||
| 913 | struct rsc *rsci; | 927 | struct rsc *rsci; |
| 914 | }; | 928 | }; |
| 915 | 929 | ||
| 930 | char *svc_gss_principal(struct svc_rqst *rqstp) | ||
| 931 | { | ||
| 932 | struct gss_svc_data *gd = (struct gss_svc_data *)rqstp->rq_auth_data; | ||
| 933 | |||
| 934 | if (gd && gd->rsci) | ||
| 935 | return gd->rsci->client_name; | ||
| 936 | return NULL; | ||
| 937 | } | ||
| 938 | EXPORT_SYMBOL_GPL(svc_gss_principal); | ||
| 939 | |||
| 916 | static int | 940 | static int |
| 917 | svcauth_gss_set_client(struct svc_rqst *rqstp) | 941 | svcauth_gss_set_client(struct svc_rqst *rqstp) |
| 918 | { | 942 | { |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 3ca518386d15..836f15c0c4a3 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -197,6 +197,12 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru | |||
| 197 | 197 | ||
| 198 | clnt->cl_rtt = &clnt->cl_rtt_default; | 198 | clnt->cl_rtt = &clnt->cl_rtt_default; |
| 199 | rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); | 199 | rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); |
| 200 | clnt->cl_principal = NULL; | ||
| 201 | if (args->client_name) { | ||
| 202 | clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL); | ||
| 203 | if (!clnt->cl_principal) | ||
| 204 | goto out_no_principal; | ||
| 205 | } | ||
| 200 | 206 | ||
| 201 | kref_init(&clnt->cl_kref); | 207 | kref_init(&clnt->cl_kref); |
| 202 | 208 | ||
| @@ -226,6 +232,8 @@ out_no_auth: | |||
| 226 | rpc_put_mount(); | 232 | rpc_put_mount(); |
| 227 | } | 233 | } |
| 228 | out_no_path: | 234 | out_no_path: |
| 235 | kfree(clnt->cl_principal); | ||
| 236 | out_no_principal: | ||
| 229 | rpc_free_iostats(clnt->cl_metrics); | 237 | rpc_free_iostats(clnt->cl_metrics); |
| 230 | out_no_stats: | 238 | out_no_stats: |
| 231 | if (clnt->cl_server != clnt->cl_inline_name) | 239 | if (clnt->cl_server != clnt->cl_inline_name) |
| @@ -354,6 +362,11 @@ rpc_clone_client(struct rpc_clnt *clnt) | |||
| 354 | new->cl_metrics = rpc_alloc_iostats(clnt); | 362 | new->cl_metrics = rpc_alloc_iostats(clnt); |
| 355 | if (new->cl_metrics == NULL) | 363 | if (new->cl_metrics == NULL) |
| 356 | goto out_no_stats; | 364 | goto out_no_stats; |
| 365 | if (clnt->cl_principal) { | ||
| 366 | new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL); | ||
| 367 | if (new->cl_principal == NULL) | ||
| 368 | goto out_no_principal; | ||
| 369 | } | ||
| 357 | kref_init(&new->cl_kref); | 370 | kref_init(&new->cl_kref); |
| 358 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); | 371 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); |
| 359 | if (err != 0) | 372 | if (err != 0) |
| @@ -366,6 +379,8 @@ rpc_clone_client(struct rpc_clnt *clnt) | |||
| 366 | rpciod_up(); | 379 | rpciod_up(); |
| 367 | return new; | 380 | return new; |
| 368 | out_no_path: | 381 | out_no_path: |
| 382 | kfree(new->cl_principal); | ||
| 383 | out_no_principal: | ||
| 369 | rpc_free_iostats(new->cl_metrics); | 384 | rpc_free_iostats(new->cl_metrics); |
| 370 | out_no_stats: | 385 | out_no_stats: |
| 371 | kfree(new); | 386 | kfree(new); |
| @@ -417,6 +432,7 @@ rpc_free_client(struct kref *kref) | |||
| 417 | out_free: | 432 | out_free: |
| 418 | rpc_unregister_client(clnt); | 433 | rpc_unregister_client(clnt); |
| 419 | rpc_free_iostats(clnt->cl_metrics); | 434 | rpc_free_iostats(clnt->cl_metrics); |
| 435 | kfree(clnt->cl_principal); | ||
| 420 | clnt->cl_metrics = NULL; | 436 | clnt->cl_metrics = NULL; |
| 421 | xprt_put(clnt->cl_xprt); | 437 | xprt_put(clnt->cl_xprt); |
| 422 | rpciod_down(); | 438 | rpciod_down(); |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 23a2b8f6dc49..192453248870 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -113,7 +113,7 @@ out: | |||
| 113 | wake_up(&rpci->waitq); | 113 | wake_up(&rpci->waitq); |
| 114 | return res; | 114 | return res; |
| 115 | } | 115 | } |
| 116 | EXPORT_SYMBOL(rpc_queue_upcall); | 116 | EXPORT_SYMBOL_GPL(rpc_queue_upcall); |
| 117 | 117 | ||
| 118 | static inline void | 118 | static inline void |
| 119 | rpc_inode_setowner(struct inode *inode, void *private) | 119 | rpc_inode_setowner(struct inode *inode, void *private) |
| @@ -126,13 +126,14 @@ rpc_close_pipes(struct inode *inode) | |||
| 126 | { | 126 | { |
| 127 | struct rpc_inode *rpci = RPC_I(inode); | 127 | struct rpc_inode *rpci = RPC_I(inode); |
| 128 | struct rpc_pipe_ops *ops; | 128 | struct rpc_pipe_ops *ops; |
| 129 | int need_release; | ||
| 129 | 130 | ||
| 130 | mutex_lock(&inode->i_mutex); | 131 | mutex_lock(&inode->i_mutex); |
| 131 | ops = rpci->ops; | 132 | ops = rpci->ops; |
| 132 | if (ops != NULL) { | 133 | if (ops != NULL) { |
| 133 | LIST_HEAD(free_list); | 134 | LIST_HEAD(free_list); |
| 134 | |||
| 135 | spin_lock(&inode->i_lock); | 135 | spin_lock(&inode->i_lock); |
| 136 | need_release = rpci->nreaders != 0 || rpci->nwriters != 0; | ||
| 136 | rpci->nreaders = 0; | 137 | rpci->nreaders = 0; |
| 137 | list_splice_init(&rpci->in_upcall, &free_list); | 138 | list_splice_init(&rpci->in_upcall, &free_list); |
| 138 | list_splice_init(&rpci->pipe, &free_list); | 139 | list_splice_init(&rpci->pipe, &free_list); |
| @@ -141,7 +142,7 @@ rpc_close_pipes(struct inode *inode) | |||
| 141 | spin_unlock(&inode->i_lock); | 142 | spin_unlock(&inode->i_lock); |
| 142 | rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); | 143 | rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); |
| 143 | rpci->nwriters = 0; | 144 | rpci->nwriters = 0; |
| 144 | if (ops->release_pipe) | 145 | if (need_release && ops->release_pipe) |
| 145 | ops->release_pipe(inode); | 146 | ops->release_pipe(inode); |
| 146 | cancel_delayed_work_sync(&rpci->queue_timeout); | 147 | cancel_delayed_work_sync(&rpci->queue_timeout); |
| 147 | } | 148 | } |
| @@ -169,16 +170,24 @@ static int | |||
| 169 | rpc_pipe_open(struct inode *inode, struct file *filp) | 170 | rpc_pipe_open(struct inode *inode, struct file *filp) |
| 170 | { | 171 | { |
| 171 | struct rpc_inode *rpci = RPC_I(inode); | 172 | struct rpc_inode *rpci = RPC_I(inode); |
| 173 | int first_open; | ||
| 172 | int res = -ENXIO; | 174 | int res = -ENXIO; |
| 173 | 175 | ||
| 174 | mutex_lock(&inode->i_mutex); | 176 | mutex_lock(&inode->i_mutex); |
| 175 | if (rpci->ops != NULL) { | 177 | if (rpci->ops == NULL) |
| 176 | if (filp->f_mode & FMODE_READ) | 178 | goto out; |
| 177 | rpci->nreaders ++; | 179 | first_open = rpci->nreaders == 0 && rpci->nwriters == 0; |
| 178 | if (filp->f_mode & FMODE_WRITE) | 180 | if (first_open && rpci->ops->open_pipe) { |
| 179 | rpci->nwriters ++; | 181 | res = rpci->ops->open_pipe(inode); |
| 180 | res = 0; | 182 | if (res) |
| 183 | goto out; | ||
| 181 | } | 184 | } |
| 185 | if (filp->f_mode & FMODE_READ) | ||
| 186 | rpci->nreaders++; | ||
| 187 | if (filp->f_mode & FMODE_WRITE) | ||
| 188 | rpci->nwriters++; | ||
| 189 | res = 0; | ||
| 190 | out: | ||
| 182 | mutex_unlock(&inode->i_mutex); | 191 | mutex_unlock(&inode->i_mutex); |
| 183 | return res; | 192 | return res; |
| 184 | } | 193 | } |
| @@ -188,6 +197,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp) | |||
| 188 | { | 197 | { |
| 189 | struct rpc_inode *rpci = RPC_I(inode); | 198 | struct rpc_inode *rpci = RPC_I(inode); |
| 190 | struct rpc_pipe_msg *msg; | 199 | struct rpc_pipe_msg *msg; |
| 200 | int last_close; | ||
| 191 | 201 | ||
| 192 | mutex_lock(&inode->i_mutex); | 202 | mutex_lock(&inode->i_mutex); |
| 193 | if (rpci->ops == NULL) | 203 | if (rpci->ops == NULL) |
| @@ -214,7 +224,8 @@ rpc_pipe_release(struct inode *inode, struct file *filp) | |||
| 214 | rpci->ops->destroy_msg, -EAGAIN); | 224 | rpci->ops->destroy_msg, -EAGAIN); |
| 215 | } | 225 | } |
| 216 | } | 226 | } |
| 217 | if (rpci->ops->release_pipe) | 227 | last_close = rpci->nwriters == 0 && rpci->nreaders == 0; |
| 228 | if (last_close && rpci->ops->release_pipe) | ||
| 218 | rpci->ops->release_pipe(inode); | 229 | rpci->ops->release_pipe(inode); |
| 219 | out: | 230 | out: |
| 220 | mutex_unlock(&inode->i_mutex); | 231 | mutex_unlock(&inode->i_mutex); |
| @@ -396,6 +407,7 @@ enum { | |||
| 396 | RPCAUTH_nfs, | 407 | RPCAUTH_nfs, |
| 397 | RPCAUTH_portmap, | 408 | RPCAUTH_portmap, |
| 398 | RPCAUTH_statd, | 409 | RPCAUTH_statd, |
| 410 | RPCAUTH_nfsd4_cb, | ||
| 399 | RPCAUTH_RootEOF | 411 | RPCAUTH_RootEOF |
| 400 | }; | 412 | }; |
| 401 | 413 | ||
| @@ -429,6 +441,10 @@ static struct rpc_filelist files[] = { | |||
| 429 | .name = "statd", | 441 | .name = "statd", |
| 430 | .mode = S_IFDIR | S_IRUGO | S_IXUGO, | 442 | .mode = S_IFDIR | S_IRUGO | S_IXUGO, |
| 431 | }, | 443 | }, |
| 444 | [RPCAUTH_nfsd4_cb] = { | ||
| 445 | .name = "nfsd4_cb", | ||
| 446 | .mode = S_IFDIR | S_IRUGO | S_IXUGO, | ||
| 447 | }, | ||
| 432 | }; | 448 | }; |
| 433 | 449 | ||
| 434 | enum { | 450 | enum { |
| @@ -748,7 +764,7 @@ rpc_rmdir(struct dentry *dentry) | |||
| 748 | * @name: name of pipe | 764 | * @name: name of pipe |
| 749 | * @private: private data to associate with the pipe, for the caller's use | 765 | * @private: private data to associate with the pipe, for the caller's use |
| 750 | * @ops: operations defining the behavior of the pipe: upcall, downcall, | 766 | * @ops: operations defining the behavior of the pipe: upcall, downcall, |
| 751 | * release_pipe, and destroy_msg. | 767 | * release_pipe, open_pipe, and destroy_msg. |
| 752 | * @flags: rpc_inode flags | 768 | * @flags: rpc_inode flags |
| 753 | * | 769 | * |
| 754 | * Data is made available for userspace to read by calls to | 770 | * Data is made available for userspace to read by calls to |
| @@ -808,7 +824,7 @@ err_dput: | |||
| 808 | -ENOMEM); | 824 | -ENOMEM); |
| 809 | goto out; | 825 | goto out; |
| 810 | } | 826 | } |
| 811 | EXPORT_SYMBOL(rpc_mkpipe); | 827 | EXPORT_SYMBOL_GPL(rpc_mkpipe); |
| 812 | 828 | ||
| 813 | /** | 829 | /** |
| 814 | * rpc_unlink - remove a pipe | 830 | * rpc_unlink - remove a pipe |
| @@ -839,7 +855,7 @@ rpc_unlink(struct dentry *dentry) | |||
| 839 | dput(parent); | 855 | dput(parent); |
| 840 | return error; | 856 | return error; |
| 841 | } | 857 | } |
| 842 | EXPORT_SYMBOL(rpc_unlink); | 858 | EXPORT_SYMBOL_GPL(rpc_unlink); |
| 843 | 859 | ||
| 844 | /* | 860 | /* |
| 845 | * populate the filesystem | 861 | * populate the filesystem |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 79a55d56cc98..406e26de584e 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
| @@ -28,7 +28,7 @@ xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) | |||
| 28 | memcpy(p, obj->data, obj->len); | 28 | memcpy(p, obj->data, obj->len); |
| 29 | return p + XDR_QUADLEN(obj->len); | 29 | return p + XDR_QUADLEN(obj->len); |
| 30 | } | 30 | } |
| 31 | EXPORT_SYMBOL(xdr_encode_netobj); | 31 | EXPORT_SYMBOL_GPL(xdr_encode_netobj); |
| 32 | 32 | ||
| 33 | __be32 * | 33 | __be32 * |
| 34 | xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) | 34 | xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) |
| @@ -41,7 +41,7 @@ xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) | |||
| 41 | obj->data = (u8 *) p; | 41 | obj->data = (u8 *) p; |
| 42 | return p + XDR_QUADLEN(len); | 42 | return p + XDR_QUADLEN(len); |
| 43 | } | 43 | } |
| 44 | EXPORT_SYMBOL(xdr_decode_netobj); | 44 | EXPORT_SYMBOL_GPL(xdr_decode_netobj); |
| 45 | 45 | ||
| 46 | /** | 46 | /** |
| 47 | * xdr_encode_opaque_fixed - Encode fixed length opaque data | 47 | * xdr_encode_opaque_fixed - Encode fixed length opaque data |
| @@ -71,7 +71,7 @@ __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) | |||
| 71 | } | 71 | } |
| 72 | return p; | 72 | return p; |
| 73 | } | 73 | } |
| 74 | EXPORT_SYMBOL(xdr_encode_opaque_fixed); | 74 | EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); |
| 75 | 75 | ||
| 76 | /** | 76 | /** |
| 77 | * xdr_encode_opaque - Encode variable length opaque data | 77 | * xdr_encode_opaque - Encode variable length opaque data |
| @@ -86,14 +86,14 @@ __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) | |||
| 86 | *p++ = htonl(nbytes); | 86 | *p++ = htonl(nbytes); |
| 87 | return xdr_encode_opaque_fixed(p, ptr, nbytes); | 87 | return xdr_encode_opaque_fixed(p, ptr, nbytes); |
| 88 | } | 88 | } |
| 89 | EXPORT_SYMBOL(xdr_encode_opaque); | 89 | EXPORT_SYMBOL_GPL(xdr_encode_opaque); |
| 90 | 90 | ||
| 91 | __be32 * | 91 | __be32 * |
| 92 | xdr_encode_string(__be32 *p, const char *string) | 92 | xdr_encode_string(__be32 *p, const char *string) |
| 93 | { | 93 | { |
| 94 | return xdr_encode_array(p, string, strlen(string)); | 94 | return xdr_encode_array(p, string, strlen(string)); |
| 95 | } | 95 | } |
| 96 | EXPORT_SYMBOL(xdr_encode_string); | 96 | EXPORT_SYMBOL_GPL(xdr_encode_string); |
| 97 | 97 | ||
| 98 | __be32 * | 98 | __be32 * |
| 99 | xdr_decode_string_inplace(__be32 *p, char **sp, | 99 | xdr_decode_string_inplace(__be32 *p, char **sp, |
| @@ -108,7 +108,7 @@ xdr_decode_string_inplace(__be32 *p, char **sp, | |||
| 108 | *sp = (char *) p; | 108 | *sp = (char *) p; |
| 109 | return p + XDR_QUADLEN(len); | 109 | return p + XDR_QUADLEN(len); |
| 110 | } | 110 | } |
| 111 | EXPORT_SYMBOL(xdr_decode_string_inplace); | 111 | EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); |
| 112 | 112 | ||
| 113 | void | 113 | void |
| 114 | xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, | 114 | xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, |
| @@ -136,7 +136,7 @@ xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, | |||
| 136 | xdr->buflen += len; | 136 | xdr->buflen += len; |
| 137 | xdr->len += len; | 137 | xdr->len += len; |
| 138 | } | 138 | } |
| 139 | EXPORT_SYMBOL(xdr_encode_pages); | 139 | EXPORT_SYMBOL_GPL(xdr_encode_pages); |
| 140 | 140 | ||
| 141 | void | 141 | void |
| 142 | xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, | 142 | xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, |
| @@ -158,7 +158,7 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, | |||
| 158 | 158 | ||
| 159 | xdr->buflen += len; | 159 | xdr->buflen += len; |
| 160 | } | 160 | } |
| 161 | EXPORT_SYMBOL(xdr_inline_pages); | 161 | EXPORT_SYMBOL_GPL(xdr_inline_pages); |
| 162 | 162 | ||
| 163 | /* | 163 | /* |
| 164 | * Helper routines for doing 'memmove' like operations on a struct xdr_buf | 164 | * Helper routines for doing 'memmove' like operations on a struct xdr_buf |
| @@ -428,7 +428,7 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len) | |||
| 428 | { | 428 | { |
| 429 | xdr_shrink_bufhead(buf, len); | 429 | xdr_shrink_bufhead(buf, len); |
| 430 | } | 430 | } |
| 431 | EXPORT_SYMBOL(xdr_shift_buf); | 431 | EXPORT_SYMBOL_GPL(xdr_shift_buf); |
| 432 | 432 | ||
| 433 | /** | 433 | /** |
| 434 | * xdr_init_encode - Initialize a struct xdr_stream for sending data. | 434 | * xdr_init_encode - Initialize a struct xdr_stream for sending data. |
| @@ -465,7 +465,7 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) | |||
| 465 | iov->iov_len += len; | 465 | iov->iov_len += len; |
| 466 | } | 466 | } |
| 467 | } | 467 | } |
| 468 | EXPORT_SYMBOL(xdr_init_encode); | 468 | EXPORT_SYMBOL_GPL(xdr_init_encode); |
| 469 | 469 | ||
| 470 | /** | 470 | /** |
| 471 | * xdr_reserve_space - Reserve buffer space for sending | 471 | * xdr_reserve_space - Reserve buffer space for sending |
| @@ -492,7 +492,7 @@ __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) | |||
| 492 | xdr->buf->len += nbytes; | 492 | xdr->buf->len += nbytes; |
| 493 | return p; | 493 | return p; |
| 494 | } | 494 | } |
| 495 | EXPORT_SYMBOL(xdr_reserve_space); | 495 | EXPORT_SYMBOL_GPL(xdr_reserve_space); |
| 496 | 496 | ||
| 497 | /** | 497 | /** |
| 498 | * xdr_write_pages - Insert a list of pages into an XDR buffer for sending | 498 | * xdr_write_pages - Insert a list of pages into an XDR buffer for sending |
| @@ -527,7 +527,7 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b | |||
| 527 | buf->buflen += len; | 527 | buf->buflen += len; |
| 528 | buf->len += len; | 528 | buf->len += len; |
| 529 | } | 529 | } |
| 530 | EXPORT_SYMBOL(xdr_write_pages); | 530 | EXPORT_SYMBOL_GPL(xdr_write_pages); |
| 531 | 531 | ||
| 532 | /** | 532 | /** |
| 533 | * xdr_init_decode - Initialize an xdr_stream for decoding data. | 533 | * xdr_init_decode - Initialize an xdr_stream for decoding data. |
| @@ -547,7 +547,7 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) | |||
| 547 | xdr->p = p; | 547 | xdr->p = p; |
| 548 | xdr->end = (__be32 *)((char *)iov->iov_base + len); | 548 | xdr->end = (__be32 *)((char *)iov->iov_base + len); |
| 549 | } | 549 | } |
| 550 | EXPORT_SYMBOL(xdr_init_decode); | 550 | EXPORT_SYMBOL_GPL(xdr_init_decode); |
| 551 | 551 | ||
| 552 | /** | 552 | /** |
| 553 | * xdr_inline_decode - Retrieve non-page XDR data to decode | 553 | * xdr_inline_decode - Retrieve non-page XDR data to decode |
| @@ -569,7 +569,7 @@ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) | |||
| 569 | xdr->p = q; | 569 | xdr->p = q; |
| 570 | return p; | 570 | return p; |
| 571 | } | 571 | } |
| 572 | EXPORT_SYMBOL(xdr_inline_decode); | 572 | EXPORT_SYMBOL_GPL(xdr_inline_decode); |
| 573 | 573 | ||
| 574 | /** | 574 | /** |
| 575 | * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position | 575 | * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position |
| @@ -613,7 +613,7 @@ void xdr_read_pages(struct xdr_stream *xdr, unsigned int len) | |||
| 613 | xdr->p = (__be32 *)((char *)iov->iov_base + padding); | 613 | xdr->p = (__be32 *)((char *)iov->iov_base + padding); |
| 614 | xdr->end = (__be32 *)((char *)iov->iov_base + end); | 614 | xdr->end = (__be32 *)((char *)iov->iov_base + end); |
| 615 | } | 615 | } |
| 616 | EXPORT_SYMBOL(xdr_read_pages); | 616 | EXPORT_SYMBOL_GPL(xdr_read_pages); |
| 617 | 617 | ||
| 618 | /** | 618 | /** |
| 619 | * xdr_enter_page - decode data from the XDR page | 619 | * xdr_enter_page - decode data from the XDR page |
| @@ -638,7 +638,7 @@ void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) | |||
| 638 | xdr->p = (__be32 *)(kaddr + xdr->buf->page_base); | 638 | xdr->p = (__be32 *)(kaddr + xdr->buf->page_base); |
| 639 | xdr->end = (__be32 *)((char *)xdr->p + len); | 639 | xdr->end = (__be32 *)((char *)xdr->p + len); |
| 640 | } | 640 | } |
| 641 | EXPORT_SYMBOL(xdr_enter_page); | 641 | EXPORT_SYMBOL_GPL(xdr_enter_page); |
| 642 | 642 | ||
| 643 | static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; | 643 | static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; |
| 644 | 644 | ||
| @@ -650,7 +650,7 @@ xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) | |||
| 650 | buf->page_len = 0; | 650 | buf->page_len = 0; |
| 651 | buf->buflen = buf->len = iov->iov_len; | 651 | buf->buflen = buf->len = iov->iov_len; |
| 652 | } | 652 | } |
| 653 | EXPORT_SYMBOL(xdr_buf_from_iov); | 653 | EXPORT_SYMBOL_GPL(xdr_buf_from_iov); |
| 654 | 654 | ||
| 655 | /* Sets subbuf to the portion of buf of length len beginning base bytes | 655 | /* Sets subbuf to the portion of buf of length len beginning base bytes |
| 656 | * from the start of buf. Returns -1 if base of length are out of bounds. */ | 656 | * from the start of buf. Returns -1 if base of length are out of bounds. */ |
| @@ -699,7 +699,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | |||
| 699 | return -1; | 699 | return -1; |
| 700 | return 0; | 700 | return 0; |
| 701 | } | 701 | } |
| 702 | EXPORT_SYMBOL(xdr_buf_subsegment); | 702 | EXPORT_SYMBOL_GPL(xdr_buf_subsegment); |
| 703 | 703 | ||
| 704 | static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) | 704 | static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) |
| 705 | { | 705 | { |
| @@ -730,7 +730,7 @@ int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, u | |||
| 730 | __read_bytes_from_xdr_buf(&subbuf, obj, len); | 730 | __read_bytes_from_xdr_buf(&subbuf, obj, len); |
| 731 | return 0; | 731 | return 0; |
| 732 | } | 732 | } |
| 733 | EXPORT_SYMBOL(read_bytes_from_xdr_buf); | 733 | EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); |
| 734 | 734 | ||
| 735 | static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) | 735 | static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) |
| 736 | { | 736 | { |
| @@ -774,7 +774,7 @@ xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) | |||
| 774 | *obj = ntohl(raw); | 774 | *obj = ntohl(raw); |
| 775 | return 0; | 775 | return 0; |
| 776 | } | 776 | } |
| 777 | EXPORT_SYMBOL(xdr_decode_word); | 777 | EXPORT_SYMBOL_GPL(xdr_decode_word); |
| 778 | 778 | ||
| 779 | int | 779 | int |
| 780 | xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) | 780 | xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) |
| @@ -783,7 +783,7 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) | |||
| 783 | 783 | ||
| 784 | return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); | 784 | return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); |
| 785 | } | 785 | } |
| 786 | EXPORT_SYMBOL(xdr_encode_word); | 786 | EXPORT_SYMBOL_GPL(xdr_encode_word); |
| 787 | 787 | ||
| 788 | /* If the netobj starting offset bytes from the start of xdr_buf is contained | 788 | /* If the netobj starting offset bytes from the start of xdr_buf is contained |
| 789 | * entirely in the head or the tail, set object to point to it; otherwise | 789 | * entirely in the head or the tail, set object to point to it; otherwise |
| @@ -821,7 +821,7 @@ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned in | |||
| 821 | __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len); | 821 | __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len); |
| 822 | return 0; | 822 | return 0; |
| 823 | } | 823 | } |
| 824 | EXPORT_SYMBOL(xdr_buf_read_netobj); | 824 | EXPORT_SYMBOL_GPL(xdr_buf_read_netobj); |
| 825 | 825 | ||
| 826 | /* Returns 0 on success, or else a negative error code. */ | 826 | /* Returns 0 on success, or else a negative error code. */ |
| 827 | static int | 827 | static int |
| @@ -1027,7 +1027,7 @@ xdr_decode_array2(struct xdr_buf *buf, unsigned int base, | |||
| 1027 | 1027 | ||
| 1028 | return xdr_xcode_array2(buf, base, desc, 0); | 1028 | return xdr_xcode_array2(buf, base, desc, 0); |
| 1029 | } | 1029 | } |
| 1030 | EXPORT_SYMBOL(xdr_decode_array2); | 1030 | EXPORT_SYMBOL_GPL(xdr_decode_array2); |
| 1031 | 1031 | ||
| 1032 | int | 1032 | int |
| 1033 | xdr_encode_array2(struct xdr_buf *buf, unsigned int base, | 1033 | xdr_encode_array2(struct xdr_buf *buf, unsigned int base, |
| @@ -1039,7 +1039,7 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base, | |||
| 1039 | 1039 | ||
| 1040 | return xdr_xcode_array2(buf, base, desc, 1); | 1040 | return xdr_xcode_array2(buf, base, desc, 1); |
| 1041 | } | 1041 | } |
| 1042 | EXPORT_SYMBOL(xdr_encode_array2); | 1042 | EXPORT_SYMBOL_GPL(xdr_encode_array2); |
| 1043 | 1043 | ||
| 1044 | int | 1044 | int |
| 1045 | xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, | 1045 | xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, |
| @@ -1106,5 +1106,5 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, | |||
| 1106 | out: | 1106 | out: |
| 1107 | return ret; | 1107 | return ret; |
| 1108 | } | 1108 | } |
| 1109 | EXPORT_SYMBOL(xdr_process_buf); | 1109 | EXPORT_SYMBOL_GPL(xdr_process_buf); |
| 1110 | 1110 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index c6250d0055d2..d1b89820ab4f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -836,7 +836,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 836 | err = mnt_want_write(nd.path.mnt); | 836 | err = mnt_want_write(nd.path.mnt); |
| 837 | if (err) | 837 | if (err) |
| 838 | goto out_mknod_dput; | 838 | goto out_mknod_dput; |
| 839 | err = security_path_mknod(&nd.path, dentry, mode, 0); | ||
| 840 | if (err) | ||
| 841 | goto out_mknod_drop_write; | ||
| 839 | err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0); | 842 | err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0); |
| 843 | out_mknod_drop_write: | ||
| 840 | mnt_drop_write(nd.path.mnt); | 844 | mnt_drop_write(nd.path.mnt); |
| 841 | if (err) | 845 | if (err) |
| 842 | goto out_mknod_dput; | 846 | goto out_mknod_dput; |
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c index 284eaef1dbf2..a2adb51849a9 100644 --- a/net/xfrm/xfrm_proc.c +++ b/net/xfrm/xfrm_proc.c | |||
| @@ -44,27 +44,14 @@ static struct snmp_mib xfrm_mib_list[] = { | |||
| 44 | SNMP_MIB_SENTINEL | 44 | SNMP_MIB_SENTINEL |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | static unsigned long | ||
| 48 | fold_field(void *mib[], int offt) | ||
| 49 | { | ||
| 50 | unsigned long res = 0; | ||
| 51 | int i; | ||
| 52 | |||
| 53 | for_each_possible_cpu(i) { | ||
| 54 | res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); | ||
| 55 | res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); | ||
| 56 | } | ||
| 57 | return res; | ||
| 58 | } | ||
| 59 | |||
| 60 | static int xfrm_statistics_seq_show(struct seq_file *seq, void *v) | 47 | static int xfrm_statistics_seq_show(struct seq_file *seq, void *v) |
| 61 | { | 48 | { |
| 62 | struct net *net = seq->private; | 49 | struct net *net = seq->private; |
| 63 | int i; | 50 | int i; |
| 64 | for (i=0; xfrm_mib_list[i].name; i++) | 51 | for (i=0; xfrm_mib_list[i].name; i++) |
| 65 | seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, | 52 | seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, |
| 66 | fold_field((void **)net->mib.xfrm_statistics, | 53 | snmp_fold_field((void **)net->mib.xfrm_statistics, |
| 67 | xfrm_mib_list[i].entry)); | 54 | xfrm_mib_list[i].entry)); |
| 68 | return 0; | 55 | return 0; |
| 69 | } | 56 | } |
| 70 | 57 | ||
