aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2011-01-10 14:48:02 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2011-01-10 14:48:02 -0500
commit68c404b18f6fba404b2753622d0459c68ee128ae (patch)
treec1ec0bb12f19d91071b461cc2831d9d3dd4c74f3 /net
parentd035c36c58dd9183ad6aa7875dea89893faedb55 (diff)
parent6650239a4b01077e80d5a4468562756d77afaa59 (diff)
Merge branch 'bugfixes' into nfs-for-2.6.38
Conflicts: fs/nfs/nfs2xdr.c fs/nfs/nfs3xdr.c fs/nfs/nfs4xdr.c
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/rfcomm/core.c1
-rw-r--r--net/bridge/br_multicast.c30
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/ceph/messenger.c8
-rw-r--r--net/ceph/pagevec.c15
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/sock.c47
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/route.c15
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/udp.c1
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/ip6_output.c12
-rw-r--r--net/ipv6/route.c7
-rw-r--r--net/ipv6/udp.c1
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_output.c16
-rw-r--r--net/irda/af_irda.c18
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/work.c5
-rw-r--r--net/sched/sch_sfq.c20
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sunrpc/xdr.c155
26 files changed, 278 insertions, 113 deletions
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index fa642aa652b..432a9a633e8 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -311,6 +311,7 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d)
311 d->state = BT_OPEN; 311 d->state = BT_OPEN;
312 d->flags = 0; 312 d->flags = 0;
313 d->mscex = 0; 313 d->mscex = 0;
314 d->sec_level = BT_SECURITY_LOW;
314 d->mtu = RFCOMM_DEFAULT_MTU; 315 d->mtu = RFCOMM_DEFAULT_MTU;
315 d->v24_sig = RFCOMM_V24_RTC | RFCOMM_V24_RTR | RFCOMM_V24_DV; 316 d->v24_sig = RFCOMM_V24_RTC | RFCOMM_V24_RTR | RFCOMM_V24_DV;
316 317
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index eb5b256ffc8..543b3262d00 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -437,7 +437,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
437 ip6h = ipv6_hdr(skb); 437 ip6h = ipv6_hdr(skb);
438 438
439 *(__force __be32 *)ip6h = htonl(0x60000000); 439 *(__force __be32 *)ip6h = htonl(0x60000000);
440 ip6h->payload_len = 8 + sizeof(*mldq); 440 ip6h->payload_len = htons(8 + sizeof(*mldq));
441 ip6h->nexthdr = IPPROTO_HOPOPTS; 441 ip6h->nexthdr = IPPROTO_HOPOPTS;
442 ip6h->hop_limit = 1; 442 ip6h->hop_limit = 1;
443 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); 443 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
@@ -1430,7 +1430,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1430 struct net_bridge_port *port, 1430 struct net_bridge_port *port,
1431 struct sk_buff *skb) 1431 struct sk_buff *skb)
1432{ 1432{
1433 struct sk_buff *skb2 = skb; 1433 struct sk_buff *skb2;
1434 struct ipv6hdr *ip6h; 1434 struct ipv6hdr *ip6h;
1435 struct icmp6hdr *icmp6h; 1435 struct icmp6hdr *icmp6h;
1436 u8 nexthdr; 1436 u8 nexthdr;
@@ -1469,15 +1469,15 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1469 if (!skb2) 1469 if (!skb2)
1470 return -ENOMEM; 1470 return -ENOMEM;
1471 1471
1472 err = -EINVAL;
1473 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
1474 goto out;
1475
1472 len -= offset - skb_network_offset(skb2); 1476 len -= offset - skb_network_offset(skb2);
1473 1477
1474 __skb_pull(skb2, offset); 1478 __skb_pull(skb2, offset);
1475 skb_reset_transport_header(skb2); 1479 skb_reset_transport_header(skb2);
1476 1480
1477 err = -EINVAL;
1478 if (!pskb_may_pull(skb2, sizeof(*icmp6h)))
1479 goto out;
1480
1481 icmp6h = icmp6_hdr(skb2); 1481 icmp6h = icmp6_hdr(skb2);
1482 1482
1483 switch (icmp6h->icmp6_type) { 1483 switch (icmp6h->icmp6_type) {
@@ -1516,7 +1516,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1516 switch (icmp6h->icmp6_type) { 1516 switch (icmp6h->icmp6_type) {
1517 case ICMPV6_MGM_REPORT: 1517 case ICMPV6_MGM_REPORT:
1518 { 1518 {
1519 struct mld_msg *mld = (struct mld_msg *)icmp6h; 1519 struct mld_msg *mld;
1520 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1521 err = -EINVAL;
1522 goto out;
1523 }
1524 mld = (struct mld_msg *)skb_transport_header(skb2);
1520 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1525 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
1521 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1526 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1522 break; 1527 break;
@@ -1529,15 +1534,18 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1529 break; 1534 break;
1530 case ICMPV6_MGM_REDUCTION: 1535 case ICMPV6_MGM_REDUCTION:
1531 { 1536 {
1532 struct mld_msg *mld = (struct mld_msg *)icmp6h; 1537 struct mld_msg *mld;
1538 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1539 err = -EINVAL;
1540 goto out;
1541 }
1542 mld = (struct mld_msg *)skb_transport_header(skb2);
1533 br_ip6_multicast_leave_group(br, port, &mld->mld_mca); 1543 br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
1534 } 1544 }
1535 } 1545 }
1536 1546
1537out: 1547out:
1538 __skb_push(skb2, offset); 1548 kfree_skb(skb2);
1539 if (skb2 != skb)
1540 kfree_skb(skb2);
1541 return err; 1549 return err;
1542} 1550}
1543#endif 1551#endif
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 35cf27087b5..e3d7aefa918 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -50,6 +50,8 @@ static void br_send_bpdu(struct net_bridge_port *p,
50 50
51 llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr); 51 llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
52 52
53 skb_reset_mac_header(skb);
54
53 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 55 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
54 dev_queue_xmit); 56 dev_queue_xmit);
55} 57}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6faa8256e10..9d5e8accfab 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -125,7 +125,7 @@ struct bcm_sock {
125 struct list_head tx_ops; 125 struct list_head tx_ops;
126 unsigned long dropped_usr_msgs; 126 unsigned long dropped_usr_msgs;
127 struct proc_dir_entry *bcm_proc_read; 127 struct proc_dir_entry *bcm_proc_read;
128 char procname [20]; /* pointer printed in ASCII with \0 */ 128 char procname [32]; /* inode number in decimal with \0 */
129}; 129};
130 130
131static inline struct bcm_sock *bcm_sk(const struct sock *sk) 131static inline struct bcm_sock *bcm_sk(const struct sock *sk)
@@ -1521,7 +1521,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1521 1521
1522 if (proc_dir) { 1522 if (proc_dir) {
1523 /* unique socket address as filename */ 1523 /* unique socket address as filename */
1524 sprintf(bo->procname, "%p", sock); 1524 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1525 bo->bcm_proc_read = proc_create_data(bo->procname, 0644, 1525 bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1526 proc_dir, 1526 proc_dir,
1527 &bcm_proc_fops, sk); 1527 &bcm_proc_fops, sk);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1c7a2ec4f3c..b6ff4a1519a 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -97,11 +97,9 @@ struct workqueue_struct *ceph_msgr_wq;
97int ceph_msgr_init(void) 97int ceph_msgr_init(void)
98{ 98{
99 ceph_msgr_wq = create_workqueue("ceph-msgr"); 99 ceph_msgr_wq = create_workqueue("ceph-msgr");
100 if (IS_ERR(ceph_msgr_wq)) { 100 if (!ceph_msgr_wq) {
101 int ret = PTR_ERR(ceph_msgr_wq); 101 pr_err("msgr_init failed to create workqueue\n");
102 pr_err("msgr_init failed to create workqueue: %d\n", ret); 102 return -ENOMEM;
103 ceph_msgr_wq = NULL;
104 return ret;
105 } 103 }
106 return 0; 104 return 0;
107} 105}
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index ac34feeb2b3..1a040e64c69 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -13,7 +13,7 @@
13 * build a vector of user pages 13 * build a vector of user pages
14 */ 14 */
15struct page **ceph_get_direct_page_vector(const char __user *data, 15struct page **ceph_get_direct_page_vector(const char __user *data,
16 int num_pages) 16 int num_pages, bool write_page)
17{ 17{
18 struct page **pages; 18 struct page **pages;
19 int rc; 19 int rc;
@@ -24,24 +24,27 @@ struct page **ceph_get_direct_page_vector(const char __user *data,
24 24
25 down_read(&current->mm->mmap_sem); 25 down_read(&current->mm->mmap_sem);
26 rc = get_user_pages(current, current->mm, (unsigned long)data, 26 rc = get_user_pages(current, current->mm, (unsigned long)data,
27 num_pages, 0, 0, pages, NULL); 27 num_pages, write_page, 0, pages, NULL);
28 up_read(&current->mm->mmap_sem); 28 up_read(&current->mm->mmap_sem);
29 if (rc < 0) 29 if (rc < num_pages)
30 goto fail; 30 goto fail;
31 return pages; 31 return pages;
32 32
33fail: 33fail:
34 kfree(pages); 34 ceph_put_page_vector(pages, rc > 0 ? rc : 0, false);
35 return ERR_PTR(rc); 35 return ERR_PTR(rc);
36} 36}
37EXPORT_SYMBOL(ceph_get_direct_page_vector); 37EXPORT_SYMBOL(ceph_get_direct_page_vector);
38 38
39void ceph_put_page_vector(struct page **pages, int num_pages) 39void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
40{ 40{
41 int i; 41 int i;
42 42
43 for (i = 0; i < num_pages; i++) 43 for (i = 0; i < num_pages; i++) {
44 if (dirty)
45 set_page_dirty_lock(pages[i]);
44 put_page(pages[i]); 46 put_page(pages[i]);
47 }
45 kfree(pages); 48 kfree(pages);
46} 49}
47EXPORT_SYMBOL(ceph_put_page_vector); 50EXPORT_SYMBOL(ceph_put_page_vector);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 82a4369ae15..a20e5d3bbfa 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -181,8 +181,7 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
181{ 181{
182 int ret = 0; 182 int ret = 0;
183 183
184 if (rule->iifindex && (rule->iifindex != fl->iif) && 184 if (rule->iifindex && (rule->iifindex != fl->iif))
185 !(fl->flags & FLOWI_FLAG_MATCH_ANY_IIF))
186 goto out; 185 goto out;
187 186
188 if (rule->oifindex && (rule->oifindex != fl->oif)) 187 if (rule->oifindex && (rule->oifindex != fl->oif))
diff --git a/net/core/sock.c b/net/core/sock.c
index fb608011146..e5af8d5d5b5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1009,6 +1009,36 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
1009#endif 1009#endif
1010} 1010}
1011 1011
1012/*
1013 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1014 * un-modified. Special care is taken when initializing object to zero.
1015 */
1016static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1017{
1018 if (offsetof(struct sock, sk_node.next) != 0)
1019 memset(sk, 0, offsetof(struct sock, sk_node.next));
1020 memset(&sk->sk_node.pprev, 0,
1021 size - offsetof(struct sock, sk_node.pprev));
1022}
1023
1024void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1025{
1026 unsigned long nulls1, nulls2;
1027
1028 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1029 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1030 if (nulls1 > nulls2)
1031 swap(nulls1, nulls2);
1032
1033 if (nulls1 != 0)
1034 memset((char *)sk, 0, nulls1);
1035 memset((char *)sk + nulls1 + sizeof(void *), 0,
1036 nulls2 - nulls1 - sizeof(void *));
1037 memset((char *)sk + nulls2 + sizeof(void *), 0,
1038 size - nulls2 - sizeof(void *));
1039}
1040EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1041
1012static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1042static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1013 int family) 1043 int family)
1014{ 1044{
@@ -1021,19 +1051,12 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1021 if (!sk) 1051 if (!sk)
1022 return sk; 1052 return sk;
1023 if (priority & __GFP_ZERO) { 1053 if (priority & __GFP_ZERO) {
1024 /* 1054 if (prot->clear_sk)
1025 * caches using SLAB_DESTROY_BY_RCU should let 1055 prot->clear_sk(sk, prot->obj_size);
1026 * sk_node.next un-modified. Special care is taken 1056 else
1027 * when initializing object to zero. 1057 sk_prot_clear_nulls(sk, prot->obj_size);
1028 */
1029 if (offsetof(struct sock, sk_node.next) != 0)
1030 memset(sk, 0, offsetof(struct sock, sk_node.next));
1031 memset(&sk->sk_node.pprev, 0,
1032 prot->obj_size - offsetof(struct sock,
1033 sk_node.pprev));
1034 } 1058 }
1035 } 1059 } else
1036 else
1037 sk = kmalloc(prot->obj_size, priority); 1060 sk = kmalloc(prot->obj_size, priority);
1038 1061
1039 if (sk != NULL) { 1062 if (sk != NULL) {
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index eb6f69a8f27..c19c1f739fb 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -163,13 +163,19 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
163 .daddr = addr 163 .daddr = addr
164 } 164 }
165 }, 165 },
166 .flags = FLOWI_FLAG_MATCH_ANY_IIF
167 }; 166 };
168 struct fib_result res = { 0 }; 167 struct fib_result res = { 0 };
169 struct net_device *dev = NULL; 168 struct net_device *dev = NULL;
169 struct fib_table *local_table;
170
171#ifdef CONFIG_IP_MULTIPLE_TABLES
172 res.r = NULL;
173#endif
170 174
171 rcu_read_lock(); 175 rcu_read_lock();
172 if (fib_lookup(net, &fl, &res)) { 176 local_table = fib_get_table(net, RT_TABLE_LOCAL);
177 if (!local_table ||
178 fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
173 rcu_read_unlock(); 179 rcu_read_unlock();
174 return NULL; 180 return NULL;
175 } 181 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 987bf9adb31..93bfd95584f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2585,9 +2585,10 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2585 goto out; 2585 goto out;
2586 2586
2587 /* RACE: Check return value of inet_select_addr instead. */ 2587 /* RACE: Check return value of inet_select_addr instead. */
2588 if (rcu_dereference(dev_out->ip_ptr) == NULL) 2588 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2589 goto out; /* Wrong error code */ 2589 err = -ENETUNREACH;
2590 2590 goto out;
2591 }
2591 if (ipv4_is_local_multicast(oldflp->fl4_dst) || 2592 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
2592 ipv4_is_lbcast(oldflp->fl4_dst)) { 2593 ipv4_is_lbcast(oldflp->fl4_dst)) {
2593 if (!fl.fl4_src) 2594 if (!fl.fl4_src)
@@ -2648,8 +2649,12 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2648 } 2649 }
2649 2650
2650 if (res.type == RTN_LOCAL) { 2651 if (res.type == RTN_LOCAL) {
2651 if (!fl.fl4_src) 2652 if (!fl.fl4_src) {
2652 fl.fl4_src = fl.fl4_dst; 2653 if (res.fi->fib_prefsrc)
2654 fl.fl4_src = res.fi->fib_prefsrc;
2655 else
2656 fl.fl4_src = fl.fl4_dst;
2657 }
2653 dev_out = net->loopback_dev; 2658 dev_out = net->loopback_dev;
2654 fl.oif = dev_out->ifindex; 2659 fl.oif = dev_out->ifindex;
2655 res.fi = NULL; 2660 res.fi = NULL;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index e13da6de1fc..d978bb2f748 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2030,7 +2030,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
2030get_req: 2030get_req:
2031 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; 2031 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2032 } 2032 }
2033 sk = sk_next(st->syn_wait_sk); 2033 sk = sk_nulls_next(st->syn_wait_sk);
2034 st->state = TCP_SEQ_STATE_LISTENING; 2034 st->state = TCP_SEQ_STATE_LISTENING;
2035 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 2035 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2036 } else { 2036 } else {
@@ -2039,7 +2039,7 @@ get_req:
2039 if (reqsk_queue_len(&icsk->icsk_accept_queue)) 2039 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2040 goto start_req; 2040 goto start_req;
2041 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 2041 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2042 sk = sk_next(sk); 2042 sk = sk_nulls_next(sk);
2043 } 2043 }
2044get_sk: 2044get_sk:
2045 sk_nulls_for_each_from(sk, node) { 2045 sk_nulls_for_each_from(sk, node) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5e0a3a582a5..2d3ded4d078 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1899,6 +1899,7 @@ struct proto udp_prot = {
1899 .compat_setsockopt = compat_udp_setsockopt, 1899 .compat_setsockopt = compat_udp_setsockopt,
1900 .compat_getsockopt = compat_udp_getsockopt, 1900 .compat_getsockopt = compat_udp_getsockopt,
1901#endif 1901#endif
1902 .clear_sk = sk_prot_clear_portaddr_nulls,
1902}; 1903};
1903EXPORT_SYMBOL(udp_prot); 1904EXPORT_SYMBOL(udp_prot);
1904 1905
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index ab76aa928fa..aee9963f7f5 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -57,6 +57,7 @@ struct proto udplite_prot = {
57 .compat_setsockopt = compat_udp_setsockopt, 57 .compat_setsockopt = compat_udp_setsockopt,
58 .compat_getsockopt = compat_udp_getsockopt, 58 .compat_getsockopt = compat_udp_getsockopt,
59#endif 59#endif
60 .clear_sk = sk_prot_clear_portaddr_nulls,
60}; 61};
61EXPORT_SYMBOL(udplite_prot); 62EXPORT_SYMBOL(udplite_prot);
62 63
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 93b7a933a77..848b3559104 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2669,7 +2669,9 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2669 2669
2670 ASSERT_RTNL(); 2670 ASSERT_RTNL();
2671 2671
2672 rt6_ifdown(net, dev); 2672 /* Flush routes if device is being removed or it is not loopback */
2673 if (how || !(dev->flags & IFF_LOOPBACK))
2674 rt6_ifdown(net, dev);
2673 neigh_ifdown(&nd_tbl, dev); 2675 neigh_ifdown(&nd_tbl, dev);
2674 2676
2675 idev = __in6_dev_get(dev); 2677 idev = __in6_dev_get(dev);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 99157b4cd56..94b5bf132b2 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -56,7 +56,7 @@
56#include <net/checksum.h> 56#include <net/checksum.h>
57#include <linux/mroute6.h> 57#include <linux/mroute6.h>
58 58
59static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); 59int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60 60
61int __ip6_local_out(struct sk_buff *skb) 61int __ip6_local_out(struct sk_buff *skb)
62{ 62{
@@ -145,14 +145,6 @@ static int ip6_finish_output2(struct sk_buff *skb)
145 return -EINVAL; 145 return -EINVAL;
146} 146}
147 147
148static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
149{
150 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
151
152 return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
153 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
154}
155
156static int ip6_finish_output(struct sk_buff *skb) 148static int ip6_finish_output(struct sk_buff *skb)
157{ 149{
158 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || 150 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
@@ -601,7 +593,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
601 return offset; 593 return offset;
602} 594}
603 595
604static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 596int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
605{ 597{
606 struct sk_buff *frag; 598 struct sk_buff *frag;
607 struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); 599 struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 96455ffb76f..7659d6f16e6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1565,11 +1565,16 @@ static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
1565{ 1565{
1566 struct rt6_info *rt, *nrt; 1566 struct rt6_info *rt, *nrt;
1567 int allfrag = 0; 1567 int allfrag = 0;
1568 1568again:
1569 rt = rt6_lookup(net, daddr, saddr, ifindex, 0); 1569 rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
1570 if (rt == NULL) 1570 if (rt == NULL)
1571 return; 1571 return;
1572 1572
1573 if (rt6_check_expired(rt)) {
1574 ip6_del_rt(rt);
1575 goto again;
1576 }
1577
1573 if (pmtu >= dst_mtu(&rt->dst)) 1578 if (pmtu >= dst_mtu(&rt->dst))
1574 goto out; 1579 goto out;
1575 1580
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 91def93bec8..cd6cb7c3e56 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1477,6 +1477,7 @@ struct proto udpv6_prot = {
1477 .compat_setsockopt = compat_udpv6_setsockopt, 1477 .compat_setsockopt = compat_udpv6_setsockopt,
1478 .compat_getsockopt = compat_udpv6_getsockopt, 1478 .compat_getsockopt = compat_udpv6_getsockopt,
1479#endif 1479#endif
1480 .clear_sk = sk_prot_clear_portaddr_nulls,
1480}; 1481};
1481 1482
1482static struct inet_protosw udpv6_protosw = { 1483static struct inet_protosw udpv6_protosw = {
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 5f48fadc27f..986c4de5292 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -55,6 +55,7 @@ struct proto udplitev6_prot = {
55 .compat_setsockopt = compat_udpv6_setsockopt, 55 .compat_setsockopt = compat_udpv6_setsockopt,
56 .compat_getsockopt = compat_udpv6_getsockopt, 56 .compat_getsockopt = compat_udpv6_getsockopt,
57#endif 57#endif
58 .clear_sk = sk_prot_clear_portaddr_nulls,
58}; 59};
59 60
60static struct inet_protosw udplite6_protosw = { 61static struct inet_protosw udplite6_protosw = {
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 6434bd5ce08..8e688b3de9a 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -17,6 +17,7 @@
17#include <linux/netfilter_ipv6.h> 17#include <linux/netfilter_ipv6.h>
18#include <net/dst.h> 18#include <net/dst.h>
19#include <net/ipv6.h> 19#include <net/ipv6.h>
20#include <net/ip6_route.h>
20#include <net/xfrm.h> 21#include <net/xfrm.h>
21 22
22int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 23int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
@@ -88,8 +89,21 @@ static int xfrm6_output_finish(struct sk_buff *skb)
88 return xfrm_output(skb); 89 return xfrm_output(skb);
89} 90}
90 91
92static int __xfrm6_output(struct sk_buff *skb)
93{
94 struct dst_entry *dst = skb_dst(skb);
95 struct xfrm_state *x = dst->xfrm;
96
97 if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
98 ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
99 dst_allfrag(skb_dst(skb)))) {
100 return ip6_fragment(skb, xfrm6_output_finish);
101 }
102 return xfrm6_output_finish(skb);
103}
104
91int xfrm6_output(struct sk_buff *skb) 105int xfrm6_output(struct sk_buff *skb)
92{ 106{
93 return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, 107 return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
94 skb_dst(skb)->dev, xfrm6_output_finish); 108 skb_dst(skb)->dev, __xfrm6_output);
95} 109}
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index a6de3059746..c9890e25cd4 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2280,6 +2280,16 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
2280 2280
2281 switch (optname) { 2281 switch (optname) {
2282 case IRLMP_ENUMDEVICES: 2282 case IRLMP_ENUMDEVICES:
2283
2284 /* Offset to first device entry */
2285 offset = sizeof(struct irda_device_list) -
2286 sizeof(struct irda_device_info);
2287
2288 if (len < offset) {
2289 err = -EINVAL;
2290 goto out;
2291 }
2292
2283 /* Ask lmp for the current discovery log */ 2293 /* Ask lmp for the current discovery log */
2284 discoveries = irlmp_get_discoveries(&list.len, self->mask.word, 2294 discoveries = irlmp_get_discoveries(&list.len, self->mask.word,
2285 self->nslots); 2295 self->nslots);
@@ -2290,15 +2300,9 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
2290 } 2300 }
2291 2301
2292 /* Write total list length back to client */ 2302 /* Write total list length back to client */
2293 if (copy_to_user(optval, &list, 2303 if (copy_to_user(optval, &list, offset))
2294 sizeof(struct irda_device_list) -
2295 sizeof(struct irda_device_info)))
2296 err = -EFAULT; 2304 err = -EFAULT;
2297 2305
2298 /* Offset to first device entry */
2299 offset = sizeof(struct irda_device_list) -
2300 sizeof(struct irda_device_info);
2301
2302 /* Copy the list itself - watch for overflow */ 2306 /* Copy the list itself - watch for overflow */
2303 if (list.len > 2048) { 2307 if (list.len > 2048) {
2304 err = -EINVAL; 2308 err = -EINVAL;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 239c4836a94..077a93dd167 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -780,6 +780,9 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
780 780
781 mutex_lock(&sdata->u.ibss.mtx); 781 mutex_lock(&sdata->u.ibss.mtx);
782 782
783 if (!sdata->u.ibss.ssid_len)
784 goto mgmt_out; /* not ready to merge yet */
785
783 switch (fc & IEEE80211_FCTL_STYPE) { 786 switch (fc & IEEE80211_FCTL_STYPE) {
784 case IEEE80211_STYPE_PROBE_REQ: 787 case IEEE80211_STYPE_PROBE_REQ:
785 ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len); 788 ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len);
@@ -797,6 +800,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
797 break; 800 break;
798 } 801 }
799 802
803 mgmt_out:
800 mutex_unlock(&sdata->u.ibss.mtx); 804 mutex_unlock(&sdata->u.ibss.mtx);
801} 805}
802 806
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 54fb4a0e76f..b01e467b76c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1788,9 +1788,11 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1788 1788
1789 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1789 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1790 1790
1791 if (!fwd_skb && net_ratelimit()) 1791 if (!fwd_skb && net_ratelimit()) {
1792 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1792 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1793 sdata->name); 1793 sdata->name);
1794 goto out;
1795 }
1794 1796
1795 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1797 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1796 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 1798 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
@@ -1828,6 +1830,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1828 } 1830 }
1829 } 1831 }
1830 1832
1833 out:
1831 if (is_multicast_ether_addr(hdr->addr1) || 1834 if (is_multicast_ether_addr(hdr->addr1) ||
1832 sdata->dev->flags & IFF_PROMISC) 1835 sdata->dev->flags & IFF_PROMISC)
1833 return RX_CONTINUE; 1836 return RX_CONTINUE;
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index ae344d1ba05..146097cb43a 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -1051,11 +1051,13 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
1051{ 1051{
1052 struct ieee80211_local *local = sdata->local; 1052 struct ieee80211_local *local = sdata->local;
1053 struct ieee80211_work *wk; 1053 struct ieee80211_work *wk;
1054 bool cleanup = false;
1054 1055
1055 mutex_lock(&local->mtx); 1056 mutex_lock(&local->mtx);
1056 list_for_each_entry(wk, &local->work_list, list) { 1057 list_for_each_entry(wk, &local->work_list, list) {
1057 if (wk->sdata != sdata) 1058 if (wk->sdata != sdata)
1058 continue; 1059 continue;
1060 cleanup = true;
1059 wk->type = IEEE80211_WORK_ABORT; 1061 wk->type = IEEE80211_WORK_ABORT;
1060 wk->started = true; 1062 wk->started = true;
1061 wk->timeout = jiffies; 1063 wk->timeout = jiffies;
@@ -1063,7 +1065,8 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
1063 mutex_unlock(&local->mtx); 1065 mutex_unlock(&local->mtx);
1064 1066
1065 /* run cleanups etc. */ 1067 /* run cleanups etc. */
1066 ieee80211_work_work(&local->work_work); 1068 if (cleanup)
1069 ieee80211_work_work(&local->work_work);
1067 1070
1068 mutex_lock(&local->mtx); 1071 mutex_lock(&local->mtx);
1069 list_for_each_entry(wk, &local->work_list, list) { 1072 list_for_each_entry(wk, &local->work_list, list) {
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3cf478d012d..7150705f1d0 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -270,7 +270,6 @@ static unsigned int sfq_drop(struct Qdisc *sch)
270 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ 270 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
271 d = q->next[q->tail]; 271 d = q->next[q->tail];
272 q->next[q->tail] = q->next[d]; 272 q->next[q->tail] = q->next[d];
273 q->allot[q->next[d]] += q->quantum;
274 skb = q->qs[d].prev; 273 skb = q->qs[d].prev;
275 len = qdisc_pkt_len(skb); 274 len = qdisc_pkt_len(skb);
276 __skb_unlink(skb, &q->qs[d]); 275 __skb_unlink(skb, &q->qs[d]);
@@ -321,14 +320,13 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
321 sfq_inc(q, x); 320 sfq_inc(q, x);
322 if (q->qs[x].qlen == 1) { /* The flow is new */ 321 if (q->qs[x].qlen == 1) { /* The flow is new */
323 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 322 if (q->tail == SFQ_DEPTH) { /* It is the first flow */
324 q->tail = x;
325 q->next[x] = x; 323 q->next[x] = x;
326 q->allot[x] = q->quantum;
327 } else { 324 } else {
328 q->next[x] = q->next[q->tail]; 325 q->next[x] = q->next[q->tail];
329 q->next[q->tail] = x; 326 q->next[q->tail] = x;
330 q->tail = x;
331 } 327 }
328 q->tail = x;
329 q->allot[x] = q->quantum;
332 } 330 }
333 if (++sch->q.qlen <= q->limit) { 331 if (++sch->q.qlen <= q->limit) {
334 sch->bstats.bytes += qdisc_pkt_len(skb); 332 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -359,13 +357,13 @@ sfq_dequeue(struct Qdisc *sch)
359{ 357{
360 struct sfq_sched_data *q = qdisc_priv(sch); 358 struct sfq_sched_data *q = qdisc_priv(sch);
361 struct sk_buff *skb; 359 struct sk_buff *skb;
362 sfq_index a, old_a; 360 sfq_index a, next_a;
363 361
364 /* No active slots */ 362 /* No active slots */
365 if (q->tail == SFQ_DEPTH) 363 if (q->tail == SFQ_DEPTH)
366 return NULL; 364 return NULL;
367 365
368 a = old_a = q->next[q->tail]; 366 a = q->next[q->tail];
369 367
370 /* Grab packet */ 368 /* Grab packet */
371 skb = __skb_dequeue(&q->qs[a]); 369 skb = __skb_dequeue(&q->qs[a]);
@@ -376,17 +374,15 @@ sfq_dequeue(struct Qdisc *sch)
376 /* Is the slot empty? */ 374 /* Is the slot empty? */
377 if (q->qs[a].qlen == 0) { 375 if (q->qs[a].qlen == 0) {
378 q->ht[q->hash[a]] = SFQ_DEPTH; 376 q->ht[q->hash[a]] = SFQ_DEPTH;
379 a = q->next[a]; 377 next_a = q->next[a];
380 if (a == old_a) { 378 if (a == next_a) {
381 q->tail = SFQ_DEPTH; 379 q->tail = SFQ_DEPTH;
382 return skb; 380 return skb;
383 } 381 }
384 q->next[q->tail] = a; 382 q->next[q->tail] = next_a;
385 q->allot[a] += q->quantum;
386 } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) { 383 } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) {
387 q->tail = a;
388 a = q->next[a];
389 q->allot[a] += q->quantum; 384 q->allot[a] += q->quantum;
385 q->tail = a;
390 } 386 }
391 return skb; 387 return skb;
392} 388}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0b9ee34ad35..fff0926b111 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5053,7 +5053,7 @@ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
5053 if (copy_to_user(optval, &val, len)) 5053 if (copy_to_user(optval, &val, len))
5054 return -EFAULT; 5054 return -EFAULT;
5055 5055
5056 return -ENOTSUPP; 5056 return 0;
5057} 5057}
5058 5058
5059/* 5059/*
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index cd9e841e749..679cd674b81 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -552,6 +552,74 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
552} 552}
553EXPORT_SYMBOL_GPL(xdr_write_pages); 553EXPORT_SYMBOL_GPL(xdr_write_pages);
554 554
555static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
556 __be32 *p, unsigned int len)
557{
558 if (len > iov->iov_len)
559 len = iov->iov_len;
560 if (p == NULL)
561 p = (__be32*)iov->iov_base;
562 xdr->p = p;
563 xdr->end = (__be32*)(iov->iov_base + len);
564 xdr->iov = iov;
565 xdr->page_ptr = NULL;
566}
567
568static int xdr_set_page_base(struct xdr_stream *xdr,
569 unsigned int base, unsigned int len)
570{
571 unsigned int pgnr;
572 unsigned int maxlen;
573 unsigned int pgoff;
574 unsigned int pgend;
575 void *kaddr;
576
577 maxlen = xdr->buf->page_len;
578 if (base >= maxlen)
579 return -EINVAL;
580 maxlen -= base;
581 if (len > maxlen)
582 len = maxlen;
583
584 base += xdr->buf->page_base;
585
586 pgnr = base >> PAGE_SHIFT;
587 xdr->page_ptr = &xdr->buf->pages[pgnr];
588 kaddr = page_address(*xdr->page_ptr);
589
590 pgoff = base & ~PAGE_MASK;
591 xdr->p = (__be32*)(kaddr + pgoff);
592
593 pgend = pgoff + len;
594 if (pgend > PAGE_SIZE)
595 pgend = PAGE_SIZE;
596 xdr->end = (__be32*)(kaddr + pgend);
597 xdr->iov = NULL;
598 return 0;
599}
600
601static void xdr_set_next_page(struct xdr_stream *xdr)
602{
603 unsigned int newbase;
604
605 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
606 newbase -= xdr->buf->page_base;
607
608 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
609 xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
610}
611
612static bool xdr_set_next_buffer(struct xdr_stream *xdr)
613{
614 if (xdr->page_ptr != NULL)
615 xdr_set_next_page(xdr);
616 else if (xdr->iov == xdr->buf->head) {
617 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
618 xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
619 }
620 return xdr->p != xdr->end;
621}
622
555/** 623/**
556 * xdr_init_decode - Initialize an xdr_stream for decoding data. 624 * xdr_init_decode - Initialize an xdr_stream for decoding data.
557 * @xdr: pointer to xdr_stream struct 625 * @xdr: pointer to xdr_stream struct
@@ -560,41 +628,67 @@ EXPORT_SYMBOL_GPL(xdr_write_pages);
560 */ 628 */
561void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) 629void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
562{ 630{
563 struct kvec *iov = buf->head;
564 unsigned int len = iov->iov_len;
565
566 if (len > buf->len)
567 len = buf->len;
568 xdr->buf = buf; 631 xdr->buf = buf;
569 xdr->iov = iov; 632 xdr->scratch.iov_base = NULL;
570 xdr->p = p; 633 xdr->scratch.iov_len = 0;
571 xdr->end = (__be32 *)((char *)iov->iov_base + len); 634 if (buf->head[0].iov_len != 0)
635 xdr_set_iov(xdr, buf->head, p, buf->len);
636 else if (buf->page_len != 0)
637 xdr_set_page_base(xdr, 0, buf->len);
572} 638}
573EXPORT_SYMBOL_GPL(xdr_init_decode); 639EXPORT_SYMBOL_GPL(xdr_init_decode);
574 640
575/** 641static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
576 * xdr_inline_peek - Allow read-ahead in the XDR data stream
577 * @xdr: pointer to xdr_stream struct
578 * @nbytes: number of bytes of data to decode
579 *
580 * Check if the input buffer is long enough to enable us to decode
581 * 'nbytes' more bytes of data starting at the current position.
582 * If so return the current pointer without updating the current
583 * pointer position.
584 */
585__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
586{ 642{
587 __be32 *p = xdr->p; 643 __be32 *p = xdr->p;
588 __be32 *q = p + XDR_QUADLEN(nbytes); 644 __be32 *q = p + XDR_QUADLEN(nbytes);
589 645
590 if (unlikely(q > xdr->end || q < p)) 646 if (unlikely(q > xdr->end || q < p))
591 return NULL; 647 return NULL;
648 xdr->p = q;
592 return p; 649 return p;
593} 650}
594EXPORT_SYMBOL_GPL(xdr_inline_peek);
595 651
596/** 652/**
597 * xdr_inline_decode - Retrieve non-page XDR data to decode 653 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
654 * @xdr: pointer to xdr_stream struct
655 * @buf: pointer to an empty buffer
656 * @buflen: size of 'buf'
657 *
658 * The scratch buffer is used when decoding from an array of pages.
659 * If an xdr_inline_decode() call spans across page boundaries, then
660 * we copy the data into the scratch buffer in order to allow linear
661 * access.
662 */
663void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
664{
665 xdr->scratch.iov_base = buf;
666 xdr->scratch.iov_len = buflen;
667}
668EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
669
670static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
671{
672 __be32 *p;
673 void *cpdest = xdr->scratch.iov_base;
674 size_t cplen = (char *)xdr->end - (char *)xdr->p;
675
676 if (nbytes > xdr->scratch.iov_len)
677 return NULL;
678 memcpy(cpdest, xdr->p, cplen);
679 cpdest += cplen;
680 nbytes -= cplen;
681 if (!xdr_set_next_buffer(xdr))
682 return NULL;
683 p = __xdr_inline_decode(xdr, nbytes);
684 if (p == NULL)
685 return NULL;
686 memcpy(cpdest, p, nbytes);
687 return xdr->scratch.iov_base;
688}
689
690/**
691 * xdr_inline_decode - Retrieve XDR data to decode
598 * @xdr: pointer to xdr_stream struct 692 * @xdr: pointer to xdr_stream struct
599 * @nbytes: number of bytes of data to decode 693 * @nbytes: number of bytes of data to decode
600 * 694 *
@@ -605,13 +699,16 @@ EXPORT_SYMBOL_GPL(xdr_inline_peek);
605 */ 699 */
606__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 700__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
607{ 701{
608 __be32 *p = xdr->p; 702 __be32 *p;
609 __be32 *q = p + XDR_QUADLEN(nbytes);
610 703
611 if (unlikely(q > xdr->end || q < p)) 704 if (nbytes == 0)
705 return xdr->p;
706 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
612 return NULL; 707 return NULL;
613 xdr->p = q; 708 p = __xdr_inline_decode(xdr, nbytes);
614 return p; 709 if (p != NULL)
710 return p;
711 return xdr_copy_to_scratch(xdr, nbytes);
615} 712}
616EXPORT_SYMBOL_GPL(xdr_inline_decode); 713EXPORT_SYMBOL_GPL(xdr_inline_decode);
617 714
@@ -671,16 +768,12 @@ EXPORT_SYMBOL_GPL(xdr_read_pages);
671 */ 768 */
672void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 769void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
673{ 770{
674 char * kaddr = page_address(xdr->buf->pages[0]);
675 xdr_read_pages(xdr, len); 771 xdr_read_pages(xdr, len);
676 /* 772 /*
677 * Position current pointer at beginning of tail, and 773 * Position current pointer at beginning of tail, and
678 * set remaining message length. 774 * set remaining message length.
679 */ 775 */
680 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base) 776 xdr_set_page_base(xdr, 0, len);
681 len = PAGE_CACHE_SIZE - xdr->buf->page_base;
682 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
683 xdr->end = (__be32 *)((char *)xdr->p + len);
684} 777}
685EXPORT_SYMBOL_GPL(xdr_enter_page); 778EXPORT_SYMBOL_GPL(xdr_enter_page);
686 779