diff options
Diffstat (limited to 'net')
99 files changed, 1412 insertions, 1006 deletions
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 798beac7f100..1e308f210928 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c | |||
@@ -178,27 +178,24 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
178 | break; | 178 | break; |
179 | case 's':{ | 179 | case 's':{ |
180 | char **sptr = va_arg(ap, char **); | 180 | char **sptr = va_arg(ap, char **); |
181 | int16_t len; | 181 | uint16_t len; |
182 | int size; | ||
183 | 182 | ||
184 | errcode = p9pdu_readf(pdu, proto_version, | 183 | errcode = p9pdu_readf(pdu, proto_version, |
185 | "w", &len); | 184 | "w", &len); |
186 | if (errcode) | 185 | if (errcode) |
187 | break; | 186 | break; |
188 | 187 | ||
189 | size = max_t(int16_t, len, 0); | 188 | *sptr = kmalloc(len + 1, GFP_KERNEL); |
190 | |||
191 | *sptr = kmalloc(size + 1, GFP_KERNEL); | ||
192 | if (*sptr == NULL) { | 189 | if (*sptr == NULL) { |
193 | errcode = -EFAULT; | 190 | errcode = -EFAULT; |
194 | break; | 191 | break; |
195 | } | 192 | } |
196 | if (pdu_read(pdu, *sptr, size)) { | 193 | if (pdu_read(pdu, *sptr, len)) { |
197 | errcode = -EFAULT; | 194 | errcode = -EFAULT; |
198 | kfree(*sptr); | 195 | kfree(*sptr); |
199 | *sptr = NULL; | 196 | *sptr = NULL; |
200 | } else | 197 | } else |
201 | (*sptr)[size] = 0; | 198 | (*sptr)[len] = 0; |
202 | } | 199 | } |
203 | break; | 200 | break; |
204 | case 'Q':{ | 201 | case 'Q':{ |
@@ -234,14 +231,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
234 | } | 231 | } |
235 | break; | 232 | break; |
236 | case 'D':{ | 233 | case 'D':{ |
237 | int32_t *count = va_arg(ap, int32_t *); | 234 | uint32_t *count = va_arg(ap, uint32_t *); |
238 | void **data = va_arg(ap, void **); | 235 | void **data = va_arg(ap, void **); |
239 | 236 | ||
240 | errcode = | 237 | errcode = |
241 | p9pdu_readf(pdu, proto_version, "d", count); | 238 | p9pdu_readf(pdu, proto_version, "d", count); |
242 | if (!errcode) { | 239 | if (!errcode) { |
243 | *count = | 240 | *count = |
244 | min_t(int32_t, *count, | 241 | min_t(uint32_t, *count, |
245 | pdu->size - pdu->offset); | 242 | pdu->size - pdu->offset); |
246 | *data = &pdu->sdata[pdu->offset]; | 243 | *data = &pdu->sdata[pdu->offset]; |
247 | } | 244 | } |
@@ -404,9 +401,10 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
404 | break; | 401 | break; |
405 | case 's':{ | 402 | case 's':{ |
406 | const char *sptr = va_arg(ap, const char *); | 403 | const char *sptr = va_arg(ap, const char *); |
407 | int16_t len = 0; | 404 | uint16_t len = 0; |
408 | if (sptr) | 405 | if (sptr) |
409 | len = min_t(int16_t, strlen(sptr), USHRT_MAX); | 406 | len = min_t(uint16_t, strlen(sptr), |
407 | USHRT_MAX); | ||
410 | 408 | ||
411 | errcode = p9pdu_writef(pdu, proto_version, | 409 | errcode = p9pdu_writef(pdu, proto_version, |
412 | "w", len); | 410 | "w", len); |
@@ -438,7 +436,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
438 | stbuf->n_gid, stbuf->n_muid); | 436 | stbuf->n_gid, stbuf->n_muid); |
439 | } break; | 437 | } break; |
440 | case 'D':{ | 438 | case 'D':{ |
441 | int32_t count = va_arg(ap, int32_t); | 439 | uint32_t count = va_arg(ap, uint32_t); |
442 | const void *data = va_arg(ap, const void *); | 440 | const void *data = va_arg(ap, const void *); |
443 | 441 | ||
444 | errcode = p9pdu_writef(pdu, proto_version, "d", | 442 | errcode = p9pdu_writef(pdu, proto_version, "d", |
diff --git a/net/Kconfig b/net/Kconfig index ad0aafe903f8..72840626284b 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -253,7 +253,9 @@ config NET_TCPPROBE | |||
253 | what was just said, you don't need it: say N. | 253 | what was just said, you don't need it: say N. |
254 | 254 | ||
255 | Documentation on how to use TCP connection probing can be found | 255 | Documentation on how to use TCP connection probing can be found |
256 | at http://linux-net.osdl.org/index.php/TcpProbe | 256 | at: |
257 | |||
258 | http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe | ||
257 | 259 | ||
258 | To compile this code as a module, choose M here: the | 260 | To compile this code as a module, choose M here: the |
259 | module will be called tcp_probe. | 261 | module will be called tcp_probe. |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index bb86d2932394..6da5daeebab7 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1392,7 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1392 | ax25_cb *ax25; | 1392 | ax25_cb *ax25; |
1393 | int err = 0; | 1393 | int err = 0; |
1394 | 1394 | ||
1395 | memset(fsa, 0, sizeof(fsa)); | 1395 | memset(fsa, 0, sizeof(*fsa)); |
1396 | lock_sock(sk); | 1396 | lock_sock(sk); |
1397 | ax25 = ax25_sk(sk); | 1397 | ax25 = ax25_sk(sk); |
1398 | 1398 | ||
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 1bf0cf503796..8184c031d028 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock, | |||
740 | if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) | 740 | if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) |
741 | return -ENOPROTOOPT; | 741 | return -ENOPROTOOPT; |
742 | lock_sock(&(cf_sk->sk)); | 742 | lock_sock(&(cf_sk->sk)); |
743 | cf_sk->conn_req.param.size = ol; | ||
744 | if (ol > sizeof(cf_sk->conn_req.param.data) || | 743 | if (ol > sizeof(cf_sk->conn_req.param.data) || |
745 | copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { | 744 | copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { |
746 | release_sock(&cf_sk->sk); | 745 | release_sock(&cf_sk->sk); |
747 | return -EINVAL; | 746 | return -EINVAL; |
748 | } | 747 | } |
748 | cf_sk->conn_req.param.size = ol; | ||
749 | release_sock(&cf_sk->sk); | 749 | release_sock(&cf_sk->sk); |
750 | return 0; | 750 | return 0; |
751 | 751 | ||
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 84a422c98941..fa9dab372b68 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c | |||
@@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) | |||
76 | struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); | 76 | struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); |
77 | int pktlen; | 77 | int pktlen; |
78 | int err = 0; | 78 | int err = 0; |
79 | const u8 *ip_version; | ||
80 | u8 buf; | ||
79 | 81 | ||
80 | priv = container_of(layr, struct chnl_net, chnl); | 82 | priv = container_of(layr, struct chnl_net, chnl); |
81 | 83 | ||
@@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) | |||
90 | * send the packet to the net stack. | 92 | * send the packet to the net stack. |
91 | */ | 93 | */ |
92 | skb->dev = priv->netdev; | 94 | skb->dev = priv->netdev; |
93 | skb->protocol = htons(ETH_P_IP); | 95 | |
96 | /* check the version of IP */ | ||
97 | ip_version = skb_header_pointer(skb, 0, 1, &buf); | ||
98 | if (!ip_version) | ||
99 | return -EINVAL; | ||
100 | switch (*ip_version >> 4) { | ||
101 | case 4: | ||
102 | skb->protocol = htons(ETH_P_IP); | ||
103 | break; | ||
104 | case 6: | ||
105 | skb->protocol = htons(ETH_P_IPV6); | ||
106 | break; | ||
107 | default: | ||
108 | return -EINVAL; | ||
109 | } | ||
94 | 110 | ||
95 | /* If we change the header in loop mode, the checksum is corrupted. */ | 111 | /* If we change the header in loop mode, the checksum is corrupted. */ |
96 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) | 112 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) |
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 815ef8826796..0a1b53bce76d 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c | |||
@@ -1,5 +1,6 @@ | |||
1 | 1 | ||
2 | #include <linux/ceph/types.h> | 2 | #include <linux/ceph/types.h> |
3 | #include <linux/module.h> | ||
3 | 4 | ||
4 | /* | 5 | /* |
5 | * Robert Jenkin's hash function. | 6 | * Robert Jenkin's hash function. |
@@ -104,6 +105,7 @@ unsigned ceph_str_hash(int type, const char *s, unsigned len) | |||
104 | return -1; | 105 | return -1; |
105 | } | 106 | } |
106 | } | 107 | } |
108 | EXPORT_SYMBOL(ceph_str_hash); | ||
107 | 109 | ||
108 | const char *ceph_str_hash_name(int type) | 110 | const char *ceph_str_hash_name(int type) |
109 | { | 111 | { |
@@ -116,3 +118,4 @@ const char *ceph_str_hash_name(int type) | |||
116 | return "unknown"; | 118 | return "unknown"; |
117 | } | 119 | } |
118 | } | 120 | } |
121 | EXPORT_SYMBOL(ceph_str_hash_name); | ||
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index b6ff4a1519ab..dff633d62e5b 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -96,7 +96,7 @@ struct workqueue_struct *ceph_msgr_wq; | |||
96 | 96 | ||
97 | int ceph_msgr_init(void) | 97 | int ceph_msgr_init(void) |
98 | { | 98 | { |
99 | ceph_msgr_wq = create_workqueue("ceph-msgr"); | 99 | ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); |
100 | if (!ceph_msgr_wq) { | 100 | if (!ceph_msgr_wq) { |
101 | pr_err("msgr_init failed to create workqueue\n"); | 101 | pr_err("msgr_init failed to create workqueue\n"); |
102 | return -ENOMEM; | 102 | return -ENOMEM; |
@@ -1920,20 +1920,6 @@ bad_tag: | |||
1920 | /* | 1920 | /* |
1921 | * Atomically queue work on a connection. Bump @con reference to | 1921 | * Atomically queue work on a connection. Bump @con reference to |
1922 | * avoid races with connection teardown. | 1922 | * avoid races with connection teardown. |
1923 | * | ||
1924 | * There is some trickery going on with QUEUED and BUSY because we | ||
1925 | * only want a _single_ thread operating on each connection at any | ||
1926 | * point in time, but we want to use all available CPUs. | ||
1927 | * | ||
1928 | * The worker thread only proceeds if it can atomically set BUSY. It | ||
1929 | * clears QUEUED and does it's thing. When it thinks it's done, it | ||
1930 | * clears BUSY, then rechecks QUEUED.. if it's set again, it loops | ||
1931 | * (tries again to set BUSY). | ||
1932 | * | ||
1933 | * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we | ||
1934 | * try to queue work. If that fails (work is already queued, or BUSY) | ||
1935 | * we give up (work also already being done or is queued) but leave QUEUED | ||
1936 | * set so that the worker thread will loop if necessary. | ||
1937 | */ | 1923 | */ |
1938 | static void queue_con(struct ceph_connection *con) | 1924 | static void queue_con(struct ceph_connection *con) |
1939 | { | 1925 | { |
@@ -1948,11 +1934,7 @@ static void queue_con(struct ceph_connection *con) | |||
1948 | return; | 1934 | return; |
1949 | } | 1935 | } |
1950 | 1936 | ||
1951 | set_bit(QUEUED, &con->state); | 1937 | if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { |
1952 | if (test_bit(BUSY, &con->state)) { | ||
1953 | dout("queue_con %p - already BUSY\n", con); | ||
1954 | con->ops->put(con); | ||
1955 | } else if (!queue_work(ceph_msgr_wq, &con->work.work)) { | ||
1956 | dout("queue_con %p - already queued\n", con); | 1938 | dout("queue_con %p - already queued\n", con); |
1957 | con->ops->put(con); | 1939 | con->ops->put(con); |
1958 | } else { | 1940 | } else { |
@@ -1967,15 +1949,6 @@ static void con_work(struct work_struct *work) | |||
1967 | { | 1949 | { |
1968 | struct ceph_connection *con = container_of(work, struct ceph_connection, | 1950 | struct ceph_connection *con = container_of(work, struct ceph_connection, |
1969 | work.work); | 1951 | work.work); |
1970 | int backoff = 0; | ||
1971 | |||
1972 | more: | ||
1973 | if (test_and_set_bit(BUSY, &con->state) != 0) { | ||
1974 | dout("con_work %p BUSY already set\n", con); | ||
1975 | goto out; | ||
1976 | } | ||
1977 | dout("con_work %p start, clearing QUEUED\n", con); | ||
1978 | clear_bit(QUEUED, &con->state); | ||
1979 | 1952 | ||
1980 | mutex_lock(&con->mutex); | 1953 | mutex_lock(&con->mutex); |
1981 | 1954 | ||
@@ -1994,28 +1967,13 @@ more: | |||
1994 | try_read(con) < 0 || | 1967 | try_read(con) < 0 || |
1995 | try_write(con) < 0) { | 1968 | try_write(con) < 0) { |
1996 | mutex_unlock(&con->mutex); | 1969 | mutex_unlock(&con->mutex); |
1997 | backoff = 1; | ||
1998 | ceph_fault(con); /* error/fault path */ | 1970 | ceph_fault(con); /* error/fault path */ |
1999 | goto done_unlocked; | 1971 | goto done_unlocked; |
2000 | } | 1972 | } |
2001 | 1973 | ||
2002 | done: | 1974 | done: |
2003 | mutex_unlock(&con->mutex); | 1975 | mutex_unlock(&con->mutex); |
2004 | |||
2005 | done_unlocked: | 1976 | done_unlocked: |
2006 | clear_bit(BUSY, &con->state); | ||
2007 | dout("con->state=%lu\n", con->state); | ||
2008 | if (test_bit(QUEUED, &con->state)) { | ||
2009 | if (!backoff || test_bit(OPENING, &con->state)) { | ||
2010 | dout("con_work %p QUEUED reset, looping\n", con); | ||
2011 | goto more; | ||
2012 | } | ||
2013 | dout("con_work %p QUEUED reset, but just faulted\n", con); | ||
2014 | clear_bit(QUEUED, &con->state); | ||
2015 | } | ||
2016 | dout("con_work %p done\n", con); | ||
2017 | |||
2018 | out: | ||
2019 | con->ops->put(con); | 1977 | con->ops->put(con); |
2020 | } | 1978 | } |
2021 | 1979 | ||
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index d73f3f6efa36..71603ac3dff5 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
@@ -605,8 +605,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
605 | goto bad; | 605 | goto bad; |
606 | } | 606 | } |
607 | err = __decode_pool(p, end, pi); | 607 | err = __decode_pool(p, end, pi); |
608 | if (err < 0) | 608 | if (err < 0) { |
609 | kfree(pi); | ||
609 | goto bad; | 610 | goto bad; |
611 | } | ||
610 | __insert_pg_pool(&map->pg_pools, pi); | 612 | __insert_pg_pool(&map->pg_pools, pi); |
611 | } | 613 | } |
612 | 614 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 3fe443be4b15..54277df0f735 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2297,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2297 | */ | 2297 | */ |
2298 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) | 2298 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) |
2299 | skb_dst_force(skb); | 2299 | skb_dst_force(skb); |
2300 | __qdisc_update_bstats(q, skb->len); | 2300 | |
2301 | qdisc_skb_cb(skb)->pkt_len = skb->len; | ||
2302 | qdisc_bstats_update(q, skb); | ||
2303 | |||
2301 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { | 2304 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { |
2302 | if (unlikely(contended)) { | 2305 | if (unlikely(contended)) { |
2303 | spin_unlock(&q->busylock); | 2306 | spin_unlock(&q->busylock); |
@@ -5520,34 +5523,6 @@ void netdev_run_todo(void) | |||
5520 | } | 5523 | } |
5521 | } | 5524 | } |
5522 | 5525 | ||
5523 | /** | ||
5524 | * dev_txq_stats_fold - fold tx_queues stats | ||
5525 | * @dev: device to get statistics from | ||
5526 | * @stats: struct rtnl_link_stats64 to hold results | ||
5527 | */ | ||
5528 | void dev_txq_stats_fold(const struct net_device *dev, | ||
5529 | struct rtnl_link_stats64 *stats) | ||
5530 | { | ||
5531 | u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0; | ||
5532 | unsigned int i; | ||
5533 | struct netdev_queue *txq; | ||
5534 | |||
5535 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
5536 | txq = netdev_get_tx_queue(dev, i); | ||
5537 | spin_lock_bh(&txq->_xmit_lock); | ||
5538 | tx_bytes += txq->tx_bytes; | ||
5539 | tx_packets += txq->tx_packets; | ||
5540 | tx_dropped += txq->tx_dropped; | ||
5541 | spin_unlock_bh(&txq->_xmit_lock); | ||
5542 | } | ||
5543 | if (tx_bytes || tx_packets || tx_dropped) { | ||
5544 | stats->tx_bytes = tx_bytes; | ||
5545 | stats->tx_packets = tx_packets; | ||
5546 | stats->tx_dropped = tx_dropped; | ||
5547 | } | ||
5548 | } | ||
5549 | EXPORT_SYMBOL(dev_txq_stats_fold); | ||
5550 | |||
5551 | /* Convert net_device_stats to rtnl_link_stats64. They have the same | 5526 | /* Convert net_device_stats to rtnl_link_stats64. They have the same |
5552 | * fields in the same order, with only the type differing. | 5527 | * fields in the same order, with only the type differing. |
5553 | */ | 5528 | */ |
@@ -5591,7 +5566,6 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | |||
5591 | netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); | 5566 | netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); |
5592 | } else { | 5567 | } else { |
5593 | netdev_stats_to_stats64(storage, &dev->stats); | 5568 | netdev_stats_to_stats64(storage, &dev->stats); |
5594 | dev_txq_stats_fold(dev, storage); | ||
5595 | } | 5569 | } |
5596 | storage->rx_dropped += atomic_long_read(&dev->rx_dropped); | 5570 | storage->rx_dropped += atomic_long_read(&dev->rx_dropped); |
5597 | return storage; | 5571 | return storage; |
@@ -5617,18 +5591,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) | |||
5617 | } | 5591 | } |
5618 | 5592 | ||
5619 | /** | 5593 | /** |
5620 | * alloc_netdev_mq - allocate network device | 5594 | * alloc_netdev_mqs - allocate network device |
5621 | * @sizeof_priv: size of private data to allocate space for | 5595 | * @sizeof_priv: size of private data to allocate space for |
5622 | * @name: device name format string | 5596 | * @name: device name format string |
5623 | * @setup: callback to initialize device | 5597 | * @setup: callback to initialize device |
5624 | * @queue_count: the number of subqueues to allocate | 5598 | * @txqs: the number of TX subqueues to allocate |
5599 | * @rxqs: the number of RX subqueues to allocate | ||
5625 | * | 5600 | * |
5626 | * Allocates a struct net_device with private data area for driver use | 5601 | * Allocates a struct net_device with private data area for driver use |
5627 | * and performs basic initialization. Also allocates subquue structs | 5602 | * and performs basic initialization. Also allocates subquue structs |
5628 | * for each queue on the device at the end of the netdevice. | 5603 | * for each queue on the device. |
5629 | */ | 5604 | */ |
5630 | struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | 5605 | struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, |
5631 | void (*setup)(struct net_device *), unsigned int queue_count) | 5606 | void (*setup)(struct net_device *), |
5607 | unsigned int txqs, unsigned int rxqs) | ||
5632 | { | 5608 | { |
5633 | struct net_device *dev; | 5609 | struct net_device *dev; |
5634 | size_t alloc_size; | 5610 | size_t alloc_size; |
@@ -5636,12 +5612,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5636 | 5612 | ||
5637 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5613 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
5638 | 5614 | ||
5639 | if (queue_count < 1) { | 5615 | if (txqs < 1) { |
5640 | pr_err("alloc_netdev: Unable to allocate device " | 5616 | pr_err("alloc_netdev: Unable to allocate device " |
5641 | "with zero queues.\n"); | 5617 | "with zero queues.\n"); |
5642 | return NULL; | 5618 | return NULL; |
5643 | } | 5619 | } |
5644 | 5620 | ||
5621 | #ifdef CONFIG_RPS | ||
5622 | if (rxqs < 1) { | ||
5623 | pr_err("alloc_netdev: Unable to allocate device " | ||
5624 | "with zero RX queues.\n"); | ||
5625 | return NULL; | ||
5626 | } | ||
5627 | #endif | ||
5628 | |||
5645 | alloc_size = sizeof(struct net_device); | 5629 | alloc_size = sizeof(struct net_device); |
5646 | if (sizeof_priv) { | 5630 | if (sizeof_priv) { |
5647 | /* ensure 32-byte alignment of private area */ | 5631 | /* ensure 32-byte alignment of private area */ |
@@ -5672,14 +5656,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5672 | 5656 | ||
5673 | dev_net_set(dev, &init_net); | 5657 | dev_net_set(dev, &init_net); |
5674 | 5658 | ||
5675 | dev->num_tx_queues = queue_count; | 5659 | dev->num_tx_queues = txqs; |
5676 | dev->real_num_tx_queues = queue_count; | 5660 | dev->real_num_tx_queues = txqs; |
5677 | if (netif_alloc_netdev_queues(dev)) | 5661 | if (netif_alloc_netdev_queues(dev)) |
5678 | goto free_pcpu; | 5662 | goto free_pcpu; |
5679 | 5663 | ||
5680 | #ifdef CONFIG_RPS | 5664 | #ifdef CONFIG_RPS |
5681 | dev->num_rx_queues = queue_count; | 5665 | dev->num_rx_queues = rxqs; |
5682 | dev->real_num_rx_queues = queue_count; | 5666 | dev->real_num_rx_queues = rxqs; |
5683 | if (netif_alloc_rx_queues(dev)) | 5667 | if (netif_alloc_rx_queues(dev)) |
5684 | goto free_pcpu; | 5668 | goto free_pcpu; |
5685 | #endif | 5669 | #endif |
@@ -5707,7 +5691,7 @@ free_p: | |||
5707 | kfree(p); | 5691 | kfree(p); |
5708 | return NULL; | 5692 | return NULL; |
5709 | } | 5693 | } |
5710 | EXPORT_SYMBOL(alloc_netdev_mq); | 5694 | EXPORT_SYMBOL(alloc_netdev_mqs); |
5711 | 5695 | ||
5712 | /** | 5696 | /** |
5713 | * free_netdev - free network device | 5697 | * free_netdev - free network device |
@@ -6205,7 +6189,7 @@ static void __net_exit default_device_exit(struct net *net) | |||
6205 | static void __net_exit default_device_exit_batch(struct list_head *net_list) | 6189 | static void __net_exit default_device_exit_batch(struct list_head *net_list) |
6206 | { | 6190 | { |
6207 | /* At exit all network devices most be removed from a network | 6191 | /* At exit all network devices most be removed from a network |
6208 | * namespace. Do this in the reverse order of registeration. | 6192 | * namespace. Do this in the reverse order of registration. |
6209 | * Do this across as many network namespaces as possible to | 6193 | * Do this across as many network namespaces as possible to |
6210 | * improve batching efficiency. | 6194 | * improve batching efficiency. |
6211 | */ | 6195 | */ |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 19d6c21220fd..d31bb36ae0dc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -380,6 +380,8 @@ static void skb_release_head_state(struct sk_buff *skb) | |||
380 | } | 380 | } |
381 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 381 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
382 | nf_conntrack_put(skb->nfct); | 382 | nf_conntrack_put(skb->nfct); |
383 | #endif | ||
384 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED | ||
383 | nf_conntrack_put_reasm(skb->nfct_reasm); | 385 | nf_conntrack_put_reasm(skb->nfct_reasm); |
384 | #endif | 386 | #endif |
385 | #ifdef CONFIG_BRIDGE_NETFILTER | 387 | #ifdef CONFIG_BRIDGE_NETFILTER |
diff --git a/net/core/sock.c b/net/core/sock.c index a658aeb6d554..7dfed792434d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -157,7 +157,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = { | |||
157 | "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , | 157 | "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , |
158 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , | 158 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , |
159 | "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , | 159 | "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , |
160 | "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , | 160 | "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , |
161 | "sk_lock-AF_MAX" | 161 | "sk_lock-AF_MAX" |
162 | }; | 162 | }; |
163 | static const char *const af_family_slock_key_strings[AF_MAX+1] = { | 163 | static const char *const af_family_slock_key_strings[AF_MAX+1] = { |
@@ -173,7 +173,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = { | |||
173 | "slock-27" , "slock-28" , "slock-AF_CAN" , | 173 | "slock-27" , "slock-28" , "slock-AF_CAN" , |
174 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , | 174 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , |
175 | "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , | 175 | "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , |
176 | "slock-AF_IEEE802154", "slock-AF_CAIF" , | 176 | "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , |
177 | "slock-AF_MAX" | 177 | "slock-AF_MAX" |
178 | }; | 178 | }; |
179 | static const char *const af_family_clock_key_strings[AF_MAX+1] = { | 179 | static const char *const af_family_clock_key_strings[AF_MAX+1] = { |
@@ -189,7 +189,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { | |||
189 | "clock-27" , "clock-28" , "clock-AF_CAN" , | 189 | "clock-27" , "clock-28" , "clock-AF_CAN" , |
190 | "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , | 190 | "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , |
191 | "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , | 191 | "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , |
192 | "clock-AF_IEEE802154", "clock-AF_CAIF" , | 192 | "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , |
193 | "clock-AF_MAX" | 193 | "clock-AF_MAX" |
194 | }; | 194 | }; |
195 | 195 | ||
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig index ad6dffd9070e..b75968a04017 100644 --- a/net/dccp/Kconfig +++ b/net/dccp/Kconfig | |||
@@ -49,7 +49,9 @@ config NET_DCCPPROBE | |||
49 | what was just said, you don't need it: say N. | 49 | what was just said, you don't need it: say N. |
50 | 50 | ||
51 | Documentation on how to use DCCP connection probing can be found | 51 | Documentation on how to use DCCP connection probing can be found |
52 | at http://linux-net.osdl.org/index.php/DccpProbe | 52 | at: |
53 | |||
54 | http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe | ||
53 | 55 | ||
54 | To compile this code as a module, choose M here: the | 56 | To compile this code as a module, choose M here: the |
55 | module will be called dccp_probe. | 57 | module will be called dccp_probe. |
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 0ba15633c418..0dcaa903e00e 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -1130,7 +1130,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err) | |||
1130 | /* | 1130 | /* |
1131 | * This processes a device up event. We only start up | 1131 | * This processes a device up event. We only start up |
1132 | * the loopback device & ethernet devices with correct | 1132 | * the loopback device & ethernet devices with correct |
1133 | * MAC addreses automatically. Others must be started | 1133 | * MAC addresses automatically. Others must be started |
1134 | * specifically. | 1134 | * specifically. |
1135 | * | 1135 | * |
1136 | * FIXME: How should we configure the loopback address ? If we could dispense | 1136 | * FIXME: How should we configure the loopback address ? If we could dispense |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index f00ef2f1d814..44d2b42fda56 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev) | |||
347 | EXPORT_SYMBOL(ether_setup); | 347 | EXPORT_SYMBOL(ether_setup); |
348 | 348 | ||
349 | /** | 349 | /** |
350 | * alloc_etherdev_mq - Allocates and sets up an Ethernet device | 350 | * alloc_etherdev_mqs - Allocates and sets up an Ethernet device |
351 | * @sizeof_priv: Size of additional driver-private structure to be allocated | 351 | * @sizeof_priv: Size of additional driver-private structure to be allocated |
352 | * for this Ethernet device | 352 | * for this Ethernet device |
353 | * @queue_count: The number of queues this device has. | 353 | * @txqs: The number of TX queues this device has. |
354 | * @rxqs: The number of RX queues this device has. | ||
354 | * | 355 | * |
355 | * Fill in the fields of the device structure with Ethernet-generic | 356 | * Fill in the fields of the device structure with Ethernet-generic |
356 | * values. Basically does everything except registering the device. | 357 | * values. Basically does everything except registering the device. |
@@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup); | |||
360 | * this private data area. | 361 | * this private data area. |
361 | */ | 362 | */ |
362 | 363 | ||
363 | struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) | 364 | struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, |
365 | unsigned int rxqs) | ||
364 | { | 366 | { |
365 | return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); | 367 | return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs); |
366 | } | 368 | } |
367 | EXPORT_SYMBOL(alloc_etherdev_mq); | 369 | EXPORT_SYMBOL(alloc_etherdev_mqs); |
368 | 370 | ||
369 | static size_t _format_mac_addr(char *buf, int buflen, | 371 | static size_t _format_mac_addr(char *buf, int buflen, |
370 | const unsigned char *addr, int len) | 372 | const unsigned char *addr, int len) |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 9e95d7fb6d5a..a5a1050595d1 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -432,7 +432,9 @@ config INET_DIAG | |||
432 | ---help--- | 432 | ---help--- |
433 | Support for INET (TCP, DCCP, etc) socket monitoring interface used by | 433 | Support for INET (TCP, DCCP, etc) socket monitoring interface used by |
434 | native Linux tools such as ss. ss is included in iproute2, currently | 434 | native Linux tools such as ss. ss is included in iproute2, currently |
435 | downloadable at <http://linux-net.osdl.org/index.php/Iproute2>. | 435 | downloadable at: |
436 | |||
437 | http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2 | ||
436 | 438 | ||
437 | If unsure, say Y. | 439 | If unsure, say Y. |
438 | 440 | ||
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 880a5ec6dce0..86961bec70ab 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
314 | 314 | ||
315 | skb->ip_summed = CHECKSUM_NONE; | 315 | skb->ip_summed = CHECKSUM_NONE; |
316 | 316 | ||
317 | ah = (struct ip_auth_hdr *)skb->data; | ||
318 | iph = ip_hdr(skb); | ||
319 | ihl = ip_hdrlen(skb); | ||
320 | 317 | ||
321 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | 318 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) |
322 | goto out; | 319 | goto out; |
323 | nfrags = err; | 320 | nfrags = err; |
324 | 321 | ||
322 | ah = (struct ip_auth_hdr *)skb->data; | ||
323 | iph = ip_hdr(skb); | ||
324 | ihl = ip_hdrlen(skb); | ||
325 | |||
325 | work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); | 326 | work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); |
326 | if (!work_iph) | 327 | if (!work_iph) |
327 | goto out; | 328 | goto out; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index a2fc7b961dbc..04c8b69fd426 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) | |||
1143 | return err; | 1143 | return err; |
1144 | } | 1144 | } |
1145 | 1145 | ||
1146 | int arp_invalidate(struct net_device *dev, __be32 ip) | ||
1147 | { | ||
1148 | struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev); | ||
1149 | int err = -ENXIO; | ||
1150 | |||
1151 | if (neigh) { | ||
1152 | if (neigh->nud_state & ~NUD_NOARP) | ||
1153 | err = neigh_update(neigh, NULL, NUD_FAILED, | ||
1154 | NEIGH_UPDATE_F_OVERRIDE| | ||
1155 | NEIGH_UPDATE_F_ADMIN); | ||
1156 | neigh_release(neigh); | ||
1157 | } | ||
1158 | |||
1159 | return err; | ||
1160 | } | ||
1161 | EXPORT_SYMBOL(arp_invalidate); | ||
1162 | |||
1146 | static int arp_req_delete_public(struct net *net, struct arpreq *r, | 1163 | static int arp_req_delete_public(struct net *net, struct arpreq *r, |
1147 | struct net_device *dev) | 1164 | struct net_device *dev) |
1148 | { | 1165 | { |
@@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1163 | { | 1180 | { |
1164 | int err; | 1181 | int err; |
1165 | __be32 ip; | 1182 | __be32 ip; |
1166 | struct neighbour *neigh; | ||
1167 | 1183 | ||
1168 | if (r->arp_flags & ATF_PUBL) | 1184 | if (r->arp_flags & ATF_PUBL) |
1169 | return arp_req_delete_public(net, r, dev); | 1185 | return arp_req_delete_public(net, r, dev); |
@@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1181 | if (!dev) | 1197 | if (!dev) |
1182 | return -EINVAL; | 1198 | return -EINVAL; |
1183 | } | 1199 | } |
1184 | err = -ENXIO; | 1200 | return arp_invalidate(dev, ip); |
1185 | neigh = neigh_lookup(&arp_tbl, &ip, dev); | ||
1186 | if (neigh) { | ||
1187 | if (neigh->nud_state & ~NUD_NOARP) | ||
1188 | err = neigh_update(neigh, NULL, NUD_FAILED, | ||
1189 | NEIGH_UPDATE_F_OVERRIDE| | ||
1190 | NEIGH_UPDATE_F_ADMIN); | ||
1191 | neigh_release(neigh); | ||
1192 | } | ||
1193 | return err; | ||
1194 | } | 1201 | } |
1195 | 1202 | ||
1196 | /* | 1203 | /* |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 25e318153f14..97e5fb765265 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
73 | !sk2->sk_bound_dev_if || | 73 | !sk2->sk_bound_dev_if || |
74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | 74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { |
75 | if (!reuse || !sk2->sk_reuse || | 75 | if (!reuse || !sk2->sk_reuse || |
76 | sk2->sk_state == TCP_LISTEN) { | 76 | ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { |
77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); | 77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); |
78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || | 78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || |
79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) | 79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) |
@@ -122,7 +122,8 @@ again: | |||
122 | (tb->num_owners < smallest_size || smallest_size == -1)) { | 122 | (tb->num_owners < smallest_size || smallest_size == -1)) { |
123 | smallest_size = tb->num_owners; | 123 | smallest_size = tb->num_owners; |
124 | smallest_rover = rover; | 124 | smallest_rover = rover; |
125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { | 125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && |
126 | !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { | ||
126 | spin_unlock(&head->lock); | 127 | spin_unlock(&head->lock); |
127 | snum = smallest_rover; | 128 | snum = smallest_rover; |
128 | goto have_snum; | 129 | goto have_snum; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 3fac340a28d5..e855fffaed95 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t, | |||
710 | struct arpt_entry *iter; | 710 | struct arpt_entry *iter; |
711 | unsigned int cpu; | 711 | unsigned int cpu; |
712 | unsigned int i; | 712 | unsigned int i; |
713 | unsigned int curcpu = get_cpu(); | ||
714 | |||
715 | /* Instead of clearing (by a previous call to memset()) | ||
716 | * the counters and using adds, we set the counters | ||
717 | * with data used by 'current' CPU | ||
718 | * | ||
719 | * Bottom half has to be disabled to prevent deadlock | ||
720 | * if new softirq were to run and call ipt_do_table | ||
721 | */ | ||
722 | local_bh_disable(); | ||
723 | i = 0; | ||
724 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
725 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
726 | iter->counters.pcnt); | ||
727 | ++i; | ||
728 | } | ||
729 | local_bh_enable(); | ||
730 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
731 | * (preemption is disabled) | ||
732 | */ | ||
733 | 713 | ||
734 | for_each_possible_cpu(cpu) { | 714 | for_each_possible_cpu(cpu) { |
735 | if (cpu == curcpu) | 715 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
736 | continue; | 716 | |
737 | i = 0; | 717 | i = 0; |
738 | local_bh_disable(); | ||
739 | xt_info_wrlock(cpu); | ||
740 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 718 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
741 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 719 | u64 bcnt, pcnt; |
742 | iter->counters.pcnt); | 720 | unsigned int start; |
721 | |||
722 | do { | ||
723 | start = read_seqbegin(lock); | ||
724 | bcnt = iter->counters.bcnt; | ||
725 | pcnt = iter->counters.pcnt; | ||
726 | } while (read_seqretry(lock, start)); | ||
727 | |||
728 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
743 | ++i; | 729 | ++i; |
744 | } | 730 | } |
745 | xt_info_wrunlock(cpu); | ||
746 | local_bh_enable(); | ||
747 | } | 731 | } |
748 | put_cpu(); | ||
749 | } | 732 | } |
750 | 733 | ||
751 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 734 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
@@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) | |||
759 | * about). | 742 | * about). |
760 | */ | 743 | */ |
761 | countersize = sizeof(struct xt_counters) * private->number; | 744 | countersize = sizeof(struct xt_counters) * private->number; |
762 | counters = vmalloc(countersize); | 745 | counters = vzalloc(countersize); |
763 | 746 | ||
764 | if (counters == NULL) | 747 | if (counters == NULL) |
765 | return ERR_PTR(-ENOMEM); | 748 | return ERR_PTR(-ENOMEM); |
@@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name, | |||
1007 | struct arpt_entry *iter; | 990 | struct arpt_entry *iter; |
1008 | 991 | ||
1009 | ret = 0; | 992 | ret = 0; |
1010 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 993 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1011 | if (!counters) { | 994 | if (!counters) { |
1012 | ret = -ENOMEM; | 995 | ret = -ENOMEM; |
1013 | goto out; | 996 | goto out; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index a846d633b3b6..652efea013dc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t, | |||
884 | struct ipt_entry *iter; | 884 | struct ipt_entry *iter; |
885 | unsigned int cpu; | 885 | unsigned int cpu; |
886 | unsigned int i; | 886 | unsigned int i; |
887 | unsigned int curcpu = get_cpu(); | ||
888 | |||
889 | /* Instead of clearing (by a previous call to memset()) | ||
890 | * the counters and using adds, we set the counters | ||
891 | * with data used by 'current' CPU. | ||
892 | * | ||
893 | * Bottom half has to be disabled to prevent deadlock | ||
894 | * if new softirq were to run and call ipt_do_table | ||
895 | */ | ||
896 | local_bh_disable(); | ||
897 | i = 0; | ||
898 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
899 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
900 | iter->counters.pcnt); | ||
901 | ++i; | ||
902 | } | ||
903 | local_bh_enable(); | ||
904 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
905 | * (preemption is disabled) | ||
906 | */ | ||
907 | 887 | ||
908 | for_each_possible_cpu(cpu) { | 888 | for_each_possible_cpu(cpu) { |
909 | if (cpu == curcpu) | 889 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
910 | continue; | 890 | |
911 | i = 0; | 891 | i = 0; |
912 | local_bh_disable(); | ||
913 | xt_info_wrlock(cpu); | ||
914 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 892 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
915 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 893 | u64 bcnt, pcnt; |
916 | iter->counters.pcnt); | 894 | unsigned int start; |
895 | |||
896 | do { | ||
897 | start = read_seqbegin(lock); | ||
898 | bcnt = iter->counters.bcnt; | ||
899 | pcnt = iter->counters.pcnt; | ||
900 | } while (read_seqretry(lock, start)); | ||
901 | |||
902 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
917 | ++i; /* macro does multi eval of i */ | 903 | ++i; /* macro does multi eval of i */ |
918 | } | 904 | } |
919 | xt_info_wrunlock(cpu); | ||
920 | local_bh_enable(); | ||
921 | } | 905 | } |
922 | put_cpu(); | ||
923 | } | 906 | } |
924 | 907 | ||
925 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 908 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
@@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) | |||
932 | (other than comefrom, which userspace doesn't care | 915 | (other than comefrom, which userspace doesn't care |
933 | about). */ | 916 | about). */ |
934 | countersize = sizeof(struct xt_counters) * private->number; | 917 | countersize = sizeof(struct xt_counters) * private->number; |
935 | counters = vmalloc(countersize); | 918 | counters = vzalloc(countersize); |
936 | 919 | ||
937 | if (counters == NULL) | 920 | if (counters == NULL) |
938 | return ERR_PTR(-ENOMEM); | 921 | return ERR_PTR(-ENOMEM); |
@@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | |||
1203 | struct ipt_entry *iter; | 1186 | struct ipt_entry *iter; |
1204 | 1187 | ||
1205 | ret = 0; | 1188 | ret = 0; |
1206 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 1189 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1207 | if (!counters) { | 1190 | if (!counters) { |
1208 | ret = -ENOMEM; | 1191 | ret = -ENOMEM; |
1209 | goto out; | 1192 | goto out; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index dc7c096ddfef..406f320336e6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1350,7 +1350,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, | |||
1350 | return 0; | 1350 | return 0; |
1351 | } | 1351 | } |
1352 | 1352 | ||
1353 | /* Intialize TSO state of a skb. | 1353 | /* Initialize TSO state of a skb. |
1354 | * This must be invoked the first time we consider transmitting | 1354 | * This must be invoked the first time we consider transmitting |
1355 | * SKB onto the wire. | 1355 | * SKB onto the wire. |
1356 | */ | 1356 | */ |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 059a3de647db..978e80e2c4a8 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -300,7 +300,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
300 | goto out; | 300 | goto out; |
301 | } | 301 | } |
302 | 302 | ||
303 | /* Reproduce AF_INET checks to make the bindings consitant */ | 303 | /* Reproduce AF_INET checks to make the bindings consistent */ |
304 | v4addr = addr->sin6_addr.s6_addr32[3]; | 304 | v4addr = addr->sin6_addr.s6_addr32[3]; |
305 | chk_addr_ret = inet_addr_type(net, v4addr); | 305 | chk_addr_ret = inet_addr_type(net, v4addr); |
306 | if (!sysctl_ip_nonlocal_bind && | 306 | if (!sysctl_ip_nonlocal_bind && |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index ee82d4ef26ce..1aba54ae53c4 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
538 | if (!pskb_may_pull(skb, ah_hlen)) | 538 | if (!pskb_may_pull(skb, ah_hlen)) |
539 | goto out; | 539 | goto out; |
540 | 540 | ||
541 | ip6h = ipv6_hdr(skb); | ||
542 | |||
543 | skb_push(skb, hdr_len); | ||
544 | 541 | ||
545 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | 542 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) |
546 | goto out; | 543 | goto out; |
547 | nfrags = err; | 544 | nfrags = err; |
548 | 545 | ||
546 | ah = (struct ip_auth_hdr *)skb->data; | ||
547 | ip6h = ipv6_hdr(skb); | ||
548 | |||
549 | skb_push(skb, hdr_len); | ||
550 | |||
549 | work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); | 551 | work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); |
550 | if (!work_iph) | 552 | if (!work_iph) |
551 | goto out; | 553 | goto out; |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index e46305d1815a..d144e629d2b4 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
44 | !sk2->sk_bound_dev_if || | 44 | !sk2->sk_bound_dev_if || |
45 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && | 45 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && |
46 | (!sk->sk_reuse || !sk2->sk_reuse || | 46 | (!sk->sk_reuse || !sk2->sk_reuse || |
47 | sk2->sk_state == TCP_LISTEN) && | 47 | ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && |
48 | ipv6_rcv_saddr_equal(sk, sk2)) | 48 | ipv6_rcv_saddr_equal(sk, sk2)) |
49 | break; | 49 | break; |
50 | } | 50 | } |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 94b5bf132b2e..5f8d242be3f3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -401,6 +401,9 @@ int ip6_forward(struct sk_buff *skb) | |||
401 | goto drop; | 401 | goto drop; |
402 | } | 402 | } |
403 | 403 | ||
404 | if (skb->pkt_type != PACKET_HOST) | ||
405 | goto drop; | ||
406 | |||
404 | skb_forward_csum(skb); | 407 | skb_forward_csum(skb); |
405 | 408 | ||
406 | /* | 409 | /* |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 455582384ece..7d227c644f72 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t, | |||
897 | struct ip6t_entry *iter; | 897 | struct ip6t_entry *iter; |
898 | unsigned int cpu; | 898 | unsigned int cpu; |
899 | unsigned int i; | 899 | unsigned int i; |
900 | unsigned int curcpu = get_cpu(); | ||
901 | |||
902 | /* Instead of clearing (by a previous call to memset()) | ||
903 | * the counters and using adds, we set the counters | ||
904 | * with data used by 'current' CPU | ||
905 | * | ||
906 | * Bottom half has to be disabled to prevent deadlock | ||
907 | * if new softirq were to run and call ipt_do_table | ||
908 | */ | ||
909 | local_bh_disable(); | ||
910 | i = 0; | ||
911 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
912 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
913 | iter->counters.pcnt); | ||
914 | ++i; | ||
915 | } | ||
916 | local_bh_enable(); | ||
917 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
918 | * (preemption is disabled) | ||
919 | */ | ||
920 | 900 | ||
921 | for_each_possible_cpu(cpu) { | 901 | for_each_possible_cpu(cpu) { |
922 | if (cpu == curcpu) | 902 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
923 | continue; | 903 | |
924 | i = 0; | 904 | i = 0; |
925 | local_bh_disable(); | ||
926 | xt_info_wrlock(cpu); | ||
927 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 905 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
928 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 906 | u64 bcnt, pcnt; |
929 | iter->counters.pcnt); | 907 | unsigned int start; |
908 | |||
909 | do { | ||
910 | start = read_seqbegin(lock); | ||
911 | bcnt = iter->counters.bcnt; | ||
912 | pcnt = iter->counters.pcnt; | ||
913 | } while (read_seqretry(lock, start)); | ||
914 | |||
915 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
930 | ++i; | 916 | ++i; |
931 | } | 917 | } |
932 | xt_info_wrunlock(cpu); | ||
933 | local_bh_enable(); | ||
934 | } | 918 | } |
935 | put_cpu(); | ||
936 | } | 919 | } |
937 | 920 | ||
938 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 921 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
@@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) | |||
945 | (other than comefrom, which userspace doesn't care | 928 | (other than comefrom, which userspace doesn't care |
946 | about). */ | 929 | about). */ |
947 | countersize = sizeof(struct xt_counters) * private->number; | 930 | countersize = sizeof(struct xt_counters) * private->number; |
948 | counters = vmalloc(countersize); | 931 | counters = vzalloc(countersize); |
949 | 932 | ||
950 | if (counters == NULL) | 933 | if (counters == NULL) |
951 | return ERR_PTR(-ENOMEM); | 934 | return ERR_PTR(-ENOMEM); |
@@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | |||
1216 | struct ip6t_entry *iter; | 1199 | struct ip6t_entry *iter; |
1217 | 1200 | ||
1218 | ret = 0; | 1201 | ret = 0; |
1219 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 1202 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1220 | if (!counters) { | 1203 | if (!counters) { |
1221 | ret = -ENOMEM; | 1204 | ret = -ENOMEM; |
1222 | goto out; | 1205 | goto out; |
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index 99abfb53bab9..97c5b21b9674 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | |||
@@ -19,13 +19,15 @@ | |||
19 | 19 | ||
20 | #include <linux/netfilter_ipv6.h> | 20 | #include <linux/netfilter_ipv6.h> |
21 | #include <linux/netfilter_bridge.h> | 21 | #include <linux/netfilter_bridge.h> |
22 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
22 | #include <net/netfilter/nf_conntrack.h> | 23 | #include <net/netfilter/nf_conntrack.h> |
23 | #include <net/netfilter/nf_conntrack_helper.h> | 24 | #include <net/netfilter/nf_conntrack_helper.h> |
24 | #include <net/netfilter/nf_conntrack_l4proto.h> | 25 | #include <net/netfilter/nf_conntrack_l4proto.h> |
25 | #include <net/netfilter/nf_conntrack_l3proto.h> | 26 | #include <net/netfilter/nf_conntrack_l3proto.h> |
26 | #include <net/netfilter/nf_conntrack_core.h> | 27 | #include <net/netfilter/nf_conntrack_core.h> |
27 | #include <net/netfilter/nf_conntrack_zones.h> | ||
28 | #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> | 28 | #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> |
29 | #endif | ||
30 | #include <net/netfilter/nf_conntrack_zones.h> | ||
29 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> | 31 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> |
30 | 32 | ||
31 | static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | 33 | static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, |
@@ -33,8 +35,10 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | |||
33 | { | 35 | { |
34 | u16 zone = NF_CT_DEFAULT_ZONE; | 36 | u16 zone = NF_CT_DEFAULT_ZONE; |
35 | 37 | ||
38 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
36 | if (skb->nfct) | 39 | if (skb->nfct) |
37 | zone = nf_ct_zone((struct nf_conn *)skb->nfct); | 40 | zone = nf_ct_zone((struct nf_conn *)skb->nfct); |
41 | #endif | ||
38 | 42 | ||
39 | #ifdef CONFIG_BRIDGE_NETFILTER | 43 | #ifdef CONFIG_BRIDGE_NETFILTER |
40 | if (skb->nf_bridge && | 44 | if (skb->nf_bridge && |
@@ -56,9 +60,11 @@ static unsigned int ipv6_defrag(unsigned int hooknum, | |||
56 | { | 60 | { |
57 | struct sk_buff *reasm; | 61 | struct sk_buff *reasm; |
58 | 62 | ||
63 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
59 | /* Previously seen (loopback)? */ | 64 | /* Previously seen (loopback)? */ |
60 | if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) | 65 | if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) |
61 | return NF_ACCEPT; | 66 | return NF_ACCEPT; |
67 | #endif | ||
62 | 68 | ||
63 | reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); | 69 | reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); |
64 | /* queued */ | 70 | /* queued */ |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 227ca82eef72..0c9d0c07eae6 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -76,7 +76,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | |||
76 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 76 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
77 | 77 | ||
78 | if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, | 78 | if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, |
79 | &sta->sta, tid, NULL)) | 79 | &sta->sta, tid, NULL, 0)) |
80 | printk(KERN_DEBUG "HW problem - can not stop rx " | 80 | printk(KERN_DEBUG "HW problem - can not stop rx " |
81 | "aggregation for tid %d\n", tid); | 81 | "aggregation for tid %d\n", tid); |
82 | 82 | ||
@@ -232,6 +232,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
232 | if (buf_size == 0) | 232 | if (buf_size == 0) |
233 | buf_size = IEEE80211_MAX_AMPDU_BUF; | 233 | buf_size = IEEE80211_MAX_AMPDU_BUF; |
234 | 234 | ||
235 | /* make sure the size doesn't exceed the maximum supported by the hw */ | ||
236 | if (buf_size > local->hw.max_rx_aggregation_subframes) | ||
237 | buf_size = local->hw.max_rx_aggregation_subframes; | ||
235 | 238 | ||
236 | /* examine state machine */ | 239 | /* examine state machine */ |
237 | mutex_lock(&sta->ampdu_mlme.mtx); | 240 | mutex_lock(&sta->ampdu_mlme.mtx); |
@@ -287,7 +290,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
287 | } | 290 | } |
288 | 291 | ||
289 | ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, | 292 | ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, |
290 | &sta->sta, tid, &start_seq_num); | 293 | &sta->sta, tid, &start_seq_num, 0); |
291 | #ifdef CONFIG_MAC80211_HT_DEBUG | 294 | #ifdef CONFIG_MAC80211_HT_DEBUG |
292 | printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); | 295 | printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); |
293 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 296 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 9cc472c6a6a5..63d852cb4ca2 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -190,7 +190,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
190 | 190 | ||
191 | ret = drv_ampdu_action(local, sta->sdata, | 191 | ret = drv_ampdu_action(local, sta->sdata, |
192 | IEEE80211_AMPDU_TX_STOP, | 192 | IEEE80211_AMPDU_TX_STOP, |
193 | &sta->sta, tid, NULL); | 193 | &sta->sta, tid, NULL, 0); |
194 | 194 | ||
195 | /* HW shall not deny going back to legacy */ | 195 | /* HW shall not deny going back to legacy */ |
196 | if (WARN_ON(ret)) { | 196 | if (WARN_ON(ret)) { |
@@ -311,7 +311,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | |||
311 | start_seq_num = sta->tid_seq[tid] >> 4; | 311 | start_seq_num = sta->tid_seq[tid] >> 4; |
312 | 312 | ||
313 | ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, | 313 | ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, |
314 | &sta->sta, tid, &start_seq_num); | 314 | &sta->sta, tid, &start_seq_num, 0); |
315 | if (ret) { | 315 | if (ret) { |
316 | #ifdef CONFIG_MAC80211_HT_DEBUG | 316 | #ifdef CONFIG_MAC80211_HT_DEBUG |
317 | printk(KERN_DEBUG "BA request denied - HW unavailable for" | 317 | printk(KERN_DEBUG "BA request denied - HW unavailable for" |
@@ -342,7 +342,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | |||
342 | /* send AddBA request */ | 342 | /* send AddBA request */ |
343 | ieee80211_send_addba_request(sdata, sta->sta.addr, tid, | 343 | ieee80211_send_addba_request(sdata, sta->sta.addr, tid, |
344 | tid_tx->dialog_token, start_seq_num, | 344 | tid_tx->dialog_token, start_seq_num, |
345 | 0x40, tid_tx->timeout); | 345 | local->hw.max_tx_aggregation_subframes, |
346 | tid_tx->timeout); | ||
346 | } | 347 | } |
347 | 348 | ||
348 | int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | 349 | int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, |
@@ -487,7 +488,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, | |||
487 | 488 | ||
488 | drv_ampdu_action(local, sta->sdata, | 489 | drv_ampdu_action(local, sta->sdata, |
489 | IEEE80211_AMPDU_TX_OPERATIONAL, | 490 | IEEE80211_AMPDU_TX_OPERATIONAL, |
490 | &sta->sta, tid, NULL); | 491 | &sta->sta, tid, NULL, |
492 | sta->ampdu_mlme.tid_tx[tid]->buf_size); | ||
491 | 493 | ||
492 | /* | 494 | /* |
493 | * synchronize with TX path, while splicing the TX path | 495 | * synchronize with TX path, while splicing the TX path |
@@ -742,9 +744,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
742 | { | 744 | { |
743 | struct tid_ampdu_tx *tid_tx; | 745 | struct tid_ampdu_tx *tid_tx; |
744 | u16 capab, tid; | 746 | u16 capab, tid; |
747 | u8 buf_size; | ||
745 | 748 | ||
746 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); | 749 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); |
747 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | 750 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; |
751 | buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; | ||
748 | 752 | ||
749 | mutex_lock(&sta->ampdu_mlme.mtx); | 753 | mutex_lock(&sta->ampdu_mlme.mtx); |
750 | 754 | ||
@@ -767,12 +771,23 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
767 | 771 | ||
768 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) | 772 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) |
769 | == WLAN_STATUS_SUCCESS) { | 773 | == WLAN_STATUS_SUCCESS) { |
774 | /* | ||
775 | * IEEE 802.11-2007 7.3.1.14: | ||
776 | * In an ADDBA Response frame, when the Status Code field | ||
777 | * is set to 0, the Buffer Size subfield is set to a value | ||
778 | * of at least 1. | ||
779 | */ | ||
780 | if (!buf_size) | ||
781 | goto out; | ||
782 | |||
770 | if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, | 783 | if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, |
771 | &tid_tx->state)) { | 784 | &tid_tx->state)) { |
772 | /* ignore duplicate response */ | 785 | /* ignore duplicate response */ |
773 | goto out; | 786 | goto out; |
774 | } | 787 | } |
775 | 788 | ||
789 | tid_tx->buf_size = buf_size; | ||
790 | |||
776 | if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) | 791 | if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) |
777 | ieee80211_agg_tx_operational(local, sta, tid); | 792 | ieee80211_agg_tx_operational(local, sta, tid); |
778 | 793 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 4bc8a9250cfd..845c76d58d25 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1215,6 +1215,9 @@ static int ieee80211_set_channel(struct wiphy *wiphy, | |||
1215 | { | 1215 | { |
1216 | struct ieee80211_local *local = wiphy_priv(wiphy); | 1216 | struct ieee80211_local *local = wiphy_priv(wiphy); |
1217 | struct ieee80211_sub_if_data *sdata = NULL; | 1217 | struct ieee80211_sub_if_data *sdata = NULL; |
1218 | struct ieee80211_channel *old_oper; | ||
1219 | enum nl80211_channel_type old_oper_type; | ||
1220 | enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT; | ||
1218 | 1221 | ||
1219 | if (netdev) | 1222 | if (netdev) |
1220 | sdata = IEEE80211_DEV_TO_SUB_IF(netdev); | 1223 | sdata = IEEE80211_DEV_TO_SUB_IF(netdev); |
@@ -1232,13 +1235,23 @@ static int ieee80211_set_channel(struct wiphy *wiphy, | |||
1232 | break; | 1235 | break; |
1233 | } | 1236 | } |
1234 | 1237 | ||
1235 | local->oper_channel = chan; | 1238 | if (sdata) |
1239 | old_vif_oper_type = sdata->vif.bss_conf.channel_type; | ||
1240 | old_oper_type = local->_oper_channel_type; | ||
1236 | 1241 | ||
1237 | if (!ieee80211_set_channel_type(local, sdata, channel_type)) | 1242 | if (!ieee80211_set_channel_type(local, sdata, channel_type)) |
1238 | return -EBUSY; | 1243 | return -EBUSY; |
1239 | 1244 | ||
1240 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | 1245 | old_oper = local->oper_channel; |
1241 | if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) | 1246 | local->oper_channel = chan; |
1247 | |||
1248 | /* Update driver if changes were actually made. */ | ||
1249 | if ((old_oper != local->oper_channel) || | ||
1250 | (old_oper_type != local->_oper_channel_type)) | ||
1251 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | ||
1252 | |||
1253 | if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) && | ||
1254 | old_vif_oper_type != sdata->vif.bss_conf.channel_type) | ||
1242 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); | 1255 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); |
1243 | 1256 | ||
1244 | return 0; | 1257 | return 0; |
@@ -1274,8 +1287,11 @@ static int ieee80211_scan(struct wiphy *wiphy, | |||
1274 | case NL80211_IFTYPE_P2P_GO: | 1287 | case NL80211_IFTYPE_P2P_GO: |
1275 | if (sdata->local->ops->hw_scan) | 1288 | if (sdata->local->ops->hw_scan) |
1276 | break; | 1289 | break; |
1277 | /* FIXME: implement NoA while scanning in software */ | 1290 | /* |
1278 | return -EOPNOTSUPP; | 1291 | * FIXME: implement NoA while scanning in software, |
1292 | * for now fall through to allow scanning only when | ||
1293 | * beaconing hasn't been configured yet | ||
1294 | */ | ||
1279 | case NL80211_IFTYPE_AP: | 1295 | case NL80211_IFTYPE_AP: |
1280 | if (sdata->u.ap.beacon) | 1296 | if (sdata->u.ap.beacon) |
1281 | return -EOPNOTSUPP; | 1297 | return -EOPNOTSUPP; |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 2dabdf7680d0..dacace6b1393 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -36,7 +36,7 @@ static ssize_t ieee80211_if_read( | |||
36 | ret = (*format)(sdata, buf, sizeof(buf)); | 36 | ret = (*format)(sdata, buf, sizeof(buf)); |
37 | read_unlock(&dev_base_lock); | 37 | read_unlock(&dev_base_lock); |
38 | 38 | ||
39 | if (ret != -EINVAL) | 39 | if (ret >= 0) |
40 | ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret); | 40 | ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret); |
41 | 41 | ||
42 | return ret; | 42 | return ret; |
@@ -81,6 +81,8 @@ static ssize_t ieee80211_if_fmt_##name( \ | |||
81 | IEEE80211_IF_FMT(name, field, "%d\n") | 81 | IEEE80211_IF_FMT(name, field, "%d\n") |
82 | #define IEEE80211_IF_FMT_HEX(name, field) \ | 82 | #define IEEE80211_IF_FMT_HEX(name, field) \ |
83 | IEEE80211_IF_FMT(name, field, "%#x\n") | 83 | IEEE80211_IF_FMT(name, field, "%#x\n") |
84 | #define IEEE80211_IF_FMT_LHEX(name, field) \ | ||
85 | IEEE80211_IF_FMT(name, field, "%#lx\n") | ||
84 | #define IEEE80211_IF_FMT_SIZE(name, field) \ | 86 | #define IEEE80211_IF_FMT_SIZE(name, field) \ |
85 | IEEE80211_IF_FMT(name, field, "%zd\n") | 87 | IEEE80211_IF_FMT(name, field, "%zd\n") |
86 | 88 | ||
@@ -145,6 +147,9 @@ IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ], | |||
145 | HEX); | 147 | HEX); |
146 | IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ], | 148 | IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ], |
147 | HEX); | 149 | HEX); |
150 | IEEE80211_IF_FILE(flags, flags, HEX); | ||
151 | IEEE80211_IF_FILE(state, state, LHEX); | ||
152 | IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC); | ||
148 | 153 | ||
149 | /* STA attributes */ | 154 | /* STA attributes */ |
150 | IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); | 155 | IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); |
@@ -216,6 +221,104 @@ static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata, | |||
216 | 221 | ||
217 | __IEEE80211_IF_FILE_W(smps); | 222 | __IEEE80211_IF_FILE_W(smps); |
218 | 223 | ||
224 | static ssize_t ieee80211_if_fmt_tkip_mic_test( | ||
225 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) | ||
226 | { | ||
227 | return -EOPNOTSUPP; | ||
228 | } | ||
229 | |||
230 | static int hwaddr_aton(const char *txt, u8 *addr) | ||
231 | { | ||
232 | int i; | ||
233 | |||
234 | for (i = 0; i < ETH_ALEN; i++) { | ||
235 | int a, b; | ||
236 | |||
237 | a = hex_to_bin(*txt++); | ||
238 | if (a < 0) | ||
239 | return -1; | ||
240 | b = hex_to_bin(*txt++); | ||
241 | if (b < 0) | ||
242 | return -1; | ||
243 | *addr++ = (a << 4) | b; | ||
244 | if (i < 5 && *txt++ != ':') | ||
245 | return -1; | ||
246 | } | ||
247 | |||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | static ssize_t ieee80211_if_parse_tkip_mic_test( | ||
252 | struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) | ||
253 | { | ||
254 | struct ieee80211_local *local = sdata->local; | ||
255 | u8 addr[ETH_ALEN]; | ||
256 | struct sk_buff *skb; | ||
257 | struct ieee80211_hdr *hdr; | ||
258 | __le16 fc; | ||
259 | |||
260 | /* | ||
261 | * Assume colon-delimited MAC address with possible white space | ||
262 | * following. | ||
263 | */ | ||
264 | if (buflen < 3 * ETH_ALEN - 1) | ||
265 | return -EINVAL; | ||
266 | if (hwaddr_aton(buf, addr) < 0) | ||
267 | return -EINVAL; | ||
268 | |||
269 | if (!ieee80211_sdata_running(sdata)) | ||
270 | return -ENOTCONN; | ||
271 | |||
272 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100); | ||
273 | if (!skb) | ||
274 | return -ENOMEM; | ||
275 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
276 | |||
277 | hdr = (struct ieee80211_hdr *) skb_put(skb, 24); | ||
278 | memset(hdr, 0, 24); | ||
279 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); | ||
280 | |||
281 | switch (sdata->vif.type) { | ||
282 | case NL80211_IFTYPE_AP: | ||
283 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); | ||
284 | /* DA BSSID SA */ | ||
285 | memcpy(hdr->addr1, addr, ETH_ALEN); | ||
286 | memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); | ||
287 | memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN); | ||
288 | break; | ||
289 | case NL80211_IFTYPE_STATION: | ||
290 | fc |= cpu_to_le16(IEEE80211_FCTL_TODS); | ||
291 | /* BSSID SA DA */ | ||
292 | if (sdata->vif.bss_conf.bssid == NULL) { | ||
293 | dev_kfree_skb(skb); | ||
294 | return -ENOTCONN; | ||
295 | } | ||
296 | memcpy(hdr->addr1, sdata->vif.bss_conf.bssid, ETH_ALEN); | ||
297 | memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); | ||
298 | memcpy(hdr->addr3, addr, ETH_ALEN); | ||
299 | break; | ||
300 | default: | ||
301 | dev_kfree_skb(skb); | ||
302 | return -EOPNOTSUPP; | ||
303 | } | ||
304 | hdr->frame_control = fc; | ||
305 | |||
306 | /* | ||
307 | * Add some length to the test frame to make it look bit more valid. | ||
308 | * The exact contents does not matter since the recipient is required | ||
309 | * to drop this because of the Michael MIC failure. | ||
310 | */ | ||
311 | memset(skb_put(skb, 50), 0, 50); | ||
312 | |||
313 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE; | ||
314 | |||
315 | ieee80211_tx_skb(sdata, skb); | ||
316 | |||
317 | return buflen; | ||
318 | } | ||
319 | |||
320 | __IEEE80211_IF_FILE_W(tkip_mic_test); | ||
321 | |||
219 | /* AP attributes */ | 322 | /* AP attributes */ |
220 | IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); | 323 | IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); |
221 | IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); | 324 | IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); |
@@ -283,6 +386,9 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode, | |||
283 | static void add_sta_files(struct ieee80211_sub_if_data *sdata) | 386 | static void add_sta_files(struct ieee80211_sub_if_data *sdata) |
284 | { | 387 | { |
285 | DEBUGFS_ADD(drop_unencrypted); | 388 | DEBUGFS_ADD(drop_unencrypted); |
389 | DEBUGFS_ADD(flags); | ||
390 | DEBUGFS_ADD(state); | ||
391 | DEBUGFS_ADD(channel_type); | ||
286 | DEBUGFS_ADD(rc_rateidx_mask_2ghz); | 392 | DEBUGFS_ADD(rc_rateidx_mask_2ghz); |
287 | DEBUGFS_ADD(rc_rateidx_mask_5ghz); | 393 | DEBUGFS_ADD(rc_rateidx_mask_5ghz); |
288 | 394 | ||
@@ -291,22 +397,30 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) | |||
291 | DEBUGFS_ADD(last_beacon); | 397 | DEBUGFS_ADD(last_beacon); |
292 | DEBUGFS_ADD(ave_beacon); | 398 | DEBUGFS_ADD(ave_beacon); |
293 | DEBUGFS_ADD_MODE(smps, 0600); | 399 | DEBUGFS_ADD_MODE(smps, 0600); |
400 | DEBUGFS_ADD_MODE(tkip_mic_test, 0200); | ||
294 | } | 401 | } |
295 | 402 | ||
296 | static void add_ap_files(struct ieee80211_sub_if_data *sdata) | 403 | static void add_ap_files(struct ieee80211_sub_if_data *sdata) |
297 | { | 404 | { |
298 | DEBUGFS_ADD(drop_unencrypted); | 405 | DEBUGFS_ADD(drop_unencrypted); |
406 | DEBUGFS_ADD(flags); | ||
407 | DEBUGFS_ADD(state); | ||
408 | DEBUGFS_ADD(channel_type); | ||
299 | DEBUGFS_ADD(rc_rateidx_mask_2ghz); | 409 | DEBUGFS_ADD(rc_rateidx_mask_2ghz); |
300 | DEBUGFS_ADD(rc_rateidx_mask_5ghz); | 410 | DEBUGFS_ADD(rc_rateidx_mask_5ghz); |
301 | 411 | ||
302 | DEBUGFS_ADD(num_sta_ps); | 412 | DEBUGFS_ADD(num_sta_ps); |
303 | DEBUGFS_ADD(dtim_count); | 413 | DEBUGFS_ADD(dtim_count); |
304 | DEBUGFS_ADD(num_buffered_multicast); | 414 | DEBUGFS_ADD(num_buffered_multicast); |
415 | DEBUGFS_ADD_MODE(tkip_mic_test, 0200); | ||
305 | } | 416 | } |
306 | 417 | ||
307 | static void add_wds_files(struct ieee80211_sub_if_data *sdata) | 418 | static void add_wds_files(struct ieee80211_sub_if_data *sdata) |
308 | { | 419 | { |
309 | DEBUGFS_ADD(drop_unencrypted); | 420 | DEBUGFS_ADD(drop_unencrypted); |
421 | DEBUGFS_ADD(flags); | ||
422 | DEBUGFS_ADD(state); | ||
423 | DEBUGFS_ADD(channel_type); | ||
310 | DEBUGFS_ADD(rc_rateidx_mask_2ghz); | 424 | DEBUGFS_ADD(rc_rateidx_mask_2ghz); |
311 | DEBUGFS_ADD(rc_rateidx_mask_5ghz); | 425 | DEBUGFS_ADD(rc_rateidx_mask_5ghz); |
312 | 426 | ||
@@ -316,12 +430,18 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata) | |||
316 | static void add_vlan_files(struct ieee80211_sub_if_data *sdata) | 430 | static void add_vlan_files(struct ieee80211_sub_if_data *sdata) |
317 | { | 431 | { |
318 | DEBUGFS_ADD(drop_unencrypted); | 432 | DEBUGFS_ADD(drop_unencrypted); |
433 | DEBUGFS_ADD(flags); | ||
434 | DEBUGFS_ADD(state); | ||
435 | DEBUGFS_ADD(channel_type); | ||
319 | DEBUGFS_ADD(rc_rateidx_mask_2ghz); | 436 | DEBUGFS_ADD(rc_rateidx_mask_2ghz); |
320 | DEBUGFS_ADD(rc_rateidx_mask_5ghz); | 437 | DEBUGFS_ADD(rc_rateidx_mask_5ghz); |
321 | } | 438 | } |
322 | 439 | ||
323 | static void add_monitor_files(struct ieee80211_sub_if_data *sdata) | 440 | static void add_monitor_files(struct ieee80211_sub_if_data *sdata) |
324 | { | 441 | { |
442 | DEBUGFS_ADD(flags); | ||
443 | DEBUGFS_ADD(state); | ||
444 | DEBUGFS_ADD(channel_type); | ||
325 | } | 445 | } |
326 | 446 | ||
327 | #ifdef CONFIG_MAC80211_MESH | 447 | #ifdef CONFIG_MAC80211_MESH |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 98d589960a49..78af32d4bc58 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -382,17 +382,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local, | |||
382 | struct ieee80211_sub_if_data *sdata, | 382 | struct ieee80211_sub_if_data *sdata, |
383 | enum ieee80211_ampdu_mlme_action action, | 383 | enum ieee80211_ampdu_mlme_action action, |
384 | struct ieee80211_sta *sta, u16 tid, | 384 | struct ieee80211_sta *sta, u16 tid, |
385 | u16 *ssn) | 385 | u16 *ssn, u8 buf_size) |
386 | { | 386 | { |
387 | int ret = -EOPNOTSUPP; | 387 | int ret = -EOPNOTSUPP; |
388 | 388 | ||
389 | might_sleep(); | 389 | might_sleep(); |
390 | 390 | ||
391 | trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn); | 391 | trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size); |
392 | 392 | ||
393 | if (local->ops->ampdu_action) | 393 | if (local->ops->ampdu_action) |
394 | ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, | 394 | ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, |
395 | sta, tid, ssn); | 395 | sta, tid, ssn, buf_size); |
396 | 396 | ||
397 | trace_drv_return_int(local, ret); | 397 | trace_drv_return_int(local, ret); |
398 | 398 | ||
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index 49c84218b2f4..e5cce19a7d65 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h | |||
@@ -9,6 +9,11 @@ | |||
9 | #undef TRACE_EVENT | 9 | #undef TRACE_EVENT |
10 | #define TRACE_EVENT(name, proto, ...) \ | 10 | #define TRACE_EVENT(name, proto, ...) \ |
11 | static inline void trace_ ## name(proto) {} | 11 | static inline void trace_ ## name(proto) {} |
12 | #undef DECLARE_EVENT_CLASS | ||
13 | #define DECLARE_EVENT_CLASS(...) | ||
14 | #undef DEFINE_EVENT | ||
15 | #define DEFINE_EVENT(evt_class, name, proto, ...) \ | ||
16 | static inline void trace_ ## name(proto) {} | ||
12 | #endif | 17 | #endif |
13 | 18 | ||
14 | #undef TRACE_SYSTEM | 19 | #undef TRACE_SYSTEM |
@@ -38,7 +43,7 @@ static inline void trace_ ## name(proto) {} | |||
38 | * Tracing for driver callbacks. | 43 | * Tracing for driver callbacks. |
39 | */ | 44 | */ |
40 | 45 | ||
41 | TRACE_EVENT(drv_return_void, | 46 | DECLARE_EVENT_CLASS(local_only_evt, |
42 | TP_PROTO(struct ieee80211_local *local), | 47 | TP_PROTO(struct ieee80211_local *local), |
43 | TP_ARGS(local), | 48 | TP_ARGS(local), |
44 | TP_STRUCT__entry( | 49 | TP_STRUCT__entry( |
@@ -50,6 +55,11 @@ TRACE_EVENT(drv_return_void, | |||
50 | TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG) | 55 | TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG) |
51 | ); | 56 | ); |
52 | 57 | ||
58 | DEFINE_EVENT(local_only_evt, drv_return_void, | ||
59 | TP_PROTO(struct ieee80211_local *local), | ||
60 | TP_ARGS(local) | ||
61 | ); | ||
62 | |||
53 | TRACE_EVENT(drv_return_int, | 63 | TRACE_EVENT(drv_return_int, |
54 | TP_PROTO(struct ieee80211_local *local, int ret), | 64 | TP_PROTO(struct ieee80211_local *local, int ret), |
55 | TP_ARGS(local, ret), | 65 | TP_ARGS(local, ret), |
@@ -78,40 +88,14 @@ TRACE_EVENT(drv_return_u64, | |||
78 | TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret) | 88 | TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret) |
79 | ); | 89 | ); |
80 | 90 | ||
81 | TRACE_EVENT(drv_start, | 91 | DEFINE_EVENT(local_only_evt, drv_start, |
82 | TP_PROTO(struct ieee80211_local *local), | 92 | TP_PROTO(struct ieee80211_local *local), |
83 | 93 | TP_ARGS(local) | |
84 | TP_ARGS(local), | ||
85 | |||
86 | TP_STRUCT__entry( | ||
87 | LOCAL_ENTRY | ||
88 | ), | ||
89 | |||
90 | TP_fast_assign( | ||
91 | LOCAL_ASSIGN; | ||
92 | ), | ||
93 | |||
94 | TP_printk( | ||
95 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
96 | ) | ||
97 | ); | 94 | ); |
98 | 95 | ||
99 | TRACE_EVENT(drv_stop, | 96 | DEFINE_EVENT(local_only_evt, drv_stop, |
100 | TP_PROTO(struct ieee80211_local *local), | 97 | TP_PROTO(struct ieee80211_local *local), |
101 | 98 | TP_ARGS(local) | |
102 | TP_ARGS(local), | ||
103 | |||
104 | TP_STRUCT__entry( | ||
105 | LOCAL_ENTRY | ||
106 | ), | ||
107 | |||
108 | TP_fast_assign( | ||
109 | LOCAL_ASSIGN; | ||
110 | ), | ||
111 | |||
112 | TP_printk( | ||
113 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
114 | ) | ||
115 | ); | 99 | ); |
116 | 100 | ||
117 | TRACE_EVENT(drv_add_interface, | 101 | TRACE_EVENT(drv_add_interface, |
@@ -439,40 +423,14 @@ TRACE_EVENT(drv_hw_scan, | |||
439 | ) | 423 | ) |
440 | ); | 424 | ); |
441 | 425 | ||
442 | TRACE_EVENT(drv_sw_scan_start, | 426 | DEFINE_EVENT(local_only_evt, drv_sw_scan_start, |
443 | TP_PROTO(struct ieee80211_local *local), | 427 | TP_PROTO(struct ieee80211_local *local), |
444 | 428 | TP_ARGS(local) | |
445 | TP_ARGS(local), | ||
446 | |||
447 | TP_STRUCT__entry( | ||
448 | LOCAL_ENTRY | ||
449 | ), | ||
450 | |||
451 | TP_fast_assign( | ||
452 | LOCAL_ASSIGN; | ||
453 | ), | ||
454 | |||
455 | TP_printk( | ||
456 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
457 | ) | ||
458 | ); | 429 | ); |
459 | 430 | ||
460 | TRACE_EVENT(drv_sw_scan_complete, | 431 | DEFINE_EVENT(local_only_evt, drv_sw_scan_complete, |
461 | TP_PROTO(struct ieee80211_local *local), | 432 | TP_PROTO(struct ieee80211_local *local), |
462 | 433 | TP_ARGS(local) | |
463 | TP_ARGS(local), | ||
464 | |||
465 | TP_STRUCT__entry( | ||
466 | LOCAL_ENTRY | ||
467 | ), | ||
468 | |||
469 | TP_fast_assign( | ||
470 | LOCAL_ASSIGN; | ||
471 | ), | ||
472 | |||
473 | TP_printk( | ||
474 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
475 | ) | ||
476 | ); | 434 | ); |
477 | 435 | ||
478 | TRACE_EVENT(drv_get_stats, | 436 | TRACE_EVENT(drv_get_stats, |
@@ -702,23 +660,9 @@ TRACE_EVENT(drv_conf_tx, | |||
702 | ) | 660 | ) |
703 | ); | 661 | ); |
704 | 662 | ||
705 | TRACE_EVENT(drv_get_tsf, | 663 | DEFINE_EVENT(local_only_evt, drv_get_tsf, |
706 | TP_PROTO(struct ieee80211_local *local), | 664 | TP_PROTO(struct ieee80211_local *local), |
707 | 665 | TP_ARGS(local) | |
708 | TP_ARGS(local), | ||
709 | |||
710 | TP_STRUCT__entry( | ||
711 | LOCAL_ENTRY | ||
712 | ), | ||
713 | |||
714 | TP_fast_assign( | ||
715 | LOCAL_ASSIGN; | ||
716 | ), | ||
717 | |||
718 | TP_printk( | ||
719 | LOCAL_PR_FMT, | ||
720 | LOCAL_PR_ARG | ||
721 | ) | ||
722 | ); | 666 | ); |
723 | 667 | ||
724 | TRACE_EVENT(drv_set_tsf, | 668 | TRACE_EVENT(drv_set_tsf, |
@@ -742,41 +686,14 @@ TRACE_EVENT(drv_set_tsf, | |||
742 | ) | 686 | ) |
743 | ); | 687 | ); |
744 | 688 | ||
745 | TRACE_EVENT(drv_reset_tsf, | 689 | DEFINE_EVENT(local_only_evt, drv_reset_tsf, |
746 | TP_PROTO(struct ieee80211_local *local), | 690 | TP_PROTO(struct ieee80211_local *local), |
747 | 691 | TP_ARGS(local) | |
748 | TP_ARGS(local), | ||
749 | |||
750 | TP_STRUCT__entry( | ||
751 | LOCAL_ENTRY | ||
752 | ), | ||
753 | |||
754 | TP_fast_assign( | ||
755 | LOCAL_ASSIGN; | ||
756 | ), | ||
757 | |||
758 | TP_printk( | ||
759 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
760 | ) | ||
761 | ); | 692 | ); |
762 | 693 | ||
763 | TRACE_EVENT(drv_tx_last_beacon, | 694 | DEFINE_EVENT(local_only_evt, drv_tx_last_beacon, |
764 | TP_PROTO(struct ieee80211_local *local), | 695 | TP_PROTO(struct ieee80211_local *local), |
765 | 696 | TP_ARGS(local) | |
766 | TP_ARGS(local), | ||
767 | |||
768 | TP_STRUCT__entry( | ||
769 | LOCAL_ENTRY | ||
770 | ), | ||
771 | |||
772 | TP_fast_assign( | ||
773 | LOCAL_ASSIGN; | ||
774 | ), | ||
775 | |||
776 | TP_printk( | ||
777 | LOCAL_PR_FMT, | ||
778 | LOCAL_PR_ARG | ||
779 | ) | ||
780 | ); | 697 | ); |
781 | 698 | ||
782 | TRACE_EVENT(drv_ampdu_action, | 699 | TRACE_EVENT(drv_ampdu_action, |
@@ -784,9 +701,9 @@ TRACE_EVENT(drv_ampdu_action, | |||
784 | struct ieee80211_sub_if_data *sdata, | 701 | struct ieee80211_sub_if_data *sdata, |
785 | enum ieee80211_ampdu_mlme_action action, | 702 | enum ieee80211_ampdu_mlme_action action, |
786 | struct ieee80211_sta *sta, u16 tid, | 703 | struct ieee80211_sta *sta, u16 tid, |
787 | u16 *ssn), | 704 | u16 *ssn, u8 buf_size), |
788 | 705 | ||
789 | TP_ARGS(local, sdata, action, sta, tid, ssn), | 706 | TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size), |
790 | 707 | ||
791 | TP_STRUCT__entry( | 708 | TP_STRUCT__entry( |
792 | LOCAL_ENTRY | 709 | LOCAL_ENTRY |
@@ -794,6 +711,7 @@ TRACE_EVENT(drv_ampdu_action, | |||
794 | __field(u32, action) | 711 | __field(u32, action) |
795 | __field(u16, tid) | 712 | __field(u16, tid) |
796 | __field(u16, ssn) | 713 | __field(u16, ssn) |
714 | __field(u8, buf_size) | ||
797 | VIF_ENTRY | 715 | VIF_ENTRY |
798 | ), | 716 | ), |
799 | 717 | ||
@@ -804,11 +722,13 @@ TRACE_EVENT(drv_ampdu_action, | |||
804 | __entry->action = action; | 722 | __entry->action = action; |
805 | __entry->tid = tid; | 723 | __entry->tid = tid; |
806 | __entry->ssn = ssn ? *ssn : 0; | 724 | __entry->ssn = ssn ? *ssn : 0; |
725 | __entry->buf_size = buf_size; | ||
807 | ), | 726 | ), |
808 | 727 | ||
809 | TP_printk( | 728 | TP_printk( |
810 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d", | 729 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d", |
811 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid | 730 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, |
731 | __entry->tid, __entry->buf_size | ||
812 | ) | 732 | ) |
813 | ); | 733 | ); |
814 | 734 | ||
@@ -959,22 +879,9 @@ TRACE_EVENT(drv_remain_on_channel, | |||
959 | ) | 879 | ) |
960 | ); | 880 | ); |
961 | 881 | ||
962 | TRACE_EVENT(drv_cancel_remain_on_channel, | 882 | DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel, |
963 | TP_PROTO(struct ieee80211_local *local), | 883 | TP_PROTO(struct ieee80211_local *local), |
964 | 884 | TP_ARGS(local) | |
965 | TP_ARGS(local), | ||
966 | |||
967 | TP_STRUCT__entry( | ||
968 | LOCAL_ENTRY | ||
969 | ), | ||
970 | |||
971 | TP_fast_assign( | ||
972 | LOCAL_ASSIGN; | ||
973 | ), | ||
974 | |||
975 | TP_printk( | ||
976 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
977 | ) | ||
978 | ); | 885 | ); |
979 | 886 | ||
980 | /* | 887 | /* |
@@ -1069,23 +976,9 @@ TRACE_EVENT(api_stop_tx_ba_cb, | |||
1069 | ) | 976 | ) |
1070 | ); | 977 | ); |
1071 | 978 | ||
1072 | TRACE_EVENT(api_restart_hw, | 979 | DEFINE_EVENT(local_only_evt, api_restart_hw, |
1073 | TP_PROTO(struct ieee80211_local *local), | 980 | TP_PROTO(struct ieee80211_local *local), |
1074 | 981 | TP_ARGS(local) | |
1075 | TP_ARGS(local), | ||
1076 | |||
1077 | TP_STRUCT__entry( | ||
1078 | LOCAL_ENTRY | ||
1079 | ), | ||
1080 | |||
1081 | TP_fast_assign( | ||
1082 | LOCAL_ASSIGN; | ||
1083 | ), | ||
1084 | |||
1085 | TP_printk( | ||
1086 | LOCAL_PR_FMT, | ||
1087 | LOCAL_PR_ARG | ||
1088 | ) | ||
1089 | ); | 982 | ); |
1090 | 983 | ||
1091 | TRACE_EVENT(api_beacon_loss, | 984 | TRACE_EVENT(api_beacon_loss, |
@@ -1214,40 +1107,14 @@ TRACE_EVENT(api_chswitch_done, | |||
1214 | ) | 1107 | ) |
1215 | ); | 1108 | ); |
1216 | 1109 | ||
1217 | TRACE_EVENT(api_ready_on_channel, | 1110 | DEFINE_EVENT(local_only_evt, api_ready_on_channel, |
1218 | TP_PROTO(struct ieee80211_local *local), | 1111 | TP_PROTO(struct ieee80211_local *local), |
1219 | 1112 | TP_ARGS(local) | |
1220 | TP_ARGS(local), | ||
1221 | |||
1222 | TP_STRUCT__entry( | ||
1223 | LOCAL_ENTRY | ||
1224 | ), | ||
1225 | |||
1226 | TP_fast_assign( | ||
1227 | LOCAL_ASSIGN; | ||
1228 | ), | ||
1229 | |||
1230 | TP_printk( | ||
1231 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
1232 | ) | ||
1233 | ); | 1113 | ); |
1234 | 1114 | ||
1235 | TRACE_EVENT(api_remain_on_channel_expired, | 1115 | DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired, |
1236 | TP_PROTO(struct ieee80211_local *local), | 1116 | TP_PROTO(struct ieee80211_local *local), |
1237 | 1117 | TP_ARGS(local) | |
1238 | TP_ARGS(local), | ||
1239 | |||
1240 | TP_STRUCT__entry( | ||
1241 | LOCAL_ENTRY | ||
1242 | ), | ||
1243 | |||
1244 | TP_fast_assign( | ||
1245 | LOCAL_ASSIGN; | ||
1246 | ), | ||
1247 | |||
1248 | TP_printk( | ||
1249 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
1250 | ) | ||
1251 | ); | 1118 | ); |
1252 | 1119 | ||
1253 | /* | 1120 | /* |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 53c7077ffd4f..775fb63471c4 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -270,7 +270,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
270 | enum ieee80211_band band = rx_status->band; | 270 | enum ieee80211_band band = rx_status->band; |
271 | 271 | ||
272 | if (elems->ds_params && elems->ds_params_len == 1) | 272 | if (elems->ds_params && elems->ds_params_len == 1) |
273 | freq = ieee80211_channel_to_frequency(elems->ds_params[0]); | 273 | freq = ieee80211_channel_to_frequency(elems->ds_params[0], |
274 | band); | ||
274 | else | 275 | else |
275 | freq = rx_status->freq; | 276 | freq = rx_status->freq; |
276 | 277 | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c47d7c0e48a4..44eea1af1553 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -225,6 +225,7 @@ struct ieee80211_if_ap { | |||
225 | struct sk_buff_head ps_bc_buf; | 225 | struct sk_buff_head ps_bc_buf; |
226 | atomic_t num_sta_ps; /* number of stations in PS mode */ | 226 | atomic_t num_sta_ps; /* number of stations in PS mode */ |
227 | int dtim_count; | 227 | int dtim_count; |
228 | bool dtim_bc_mc; | ||
228 | }; | 229 | }; |
229 | 230 | ||
230 | struct ieee80211_if_wds { | 231 | struct ieee80211_if_wds { |
@@ -654,8 +655,6 @@ struct tpt_led_trigger { | |||
654 | * well be on the operating channel | 655 | * well be on the operating channel |
655 | * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to | 656 | * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to |
656 | * determine if we are on the operating channel or not | 657 | * determine if we are on the operating channel or not |
657 | * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning, | ||
658 | * gets only set in conjunction with SCAN_SW_SCANNING | ||
659 | * @SCAN_COMPLETED: Set for our scan work function when the driver reported | 658 | * @SCAN_COMPLETED: Set for our scan work function when the driver reported |
660 | * that the scan completed. | 659 | * that the scan completed. |
661 | * @SCAN_ABORTED: Set for our scan work function when the driver reported | 660 | * @SCAN_ABORTED: Set for our scan work function when the driver reported |
@@ -664,7 +663,6 @@ struct tpt_led_trigger { | |||
664 | enum { | 663 | enum { |
665 | SCAN_SW_SCANNING, | 664 | SCAN_SW_SCANNING, |
666 | SCAN_HW_SCANNING, | 665 | SCAN_HW_SCANNING, |
667 | SCAN_OFF_CHANNEL, | ||
668 | SCAN_COMPLETED, | 666 | SCAN_COMPLETED, |
669 | SCAN_ABORTED, | 667 | SCAN_ABORTED, |
670 | }; | 668 | }; |
@@ -1147,10 +1145,14 @@ void ieee80211_rx_bss_put(struct ieee80211_local *local, | |||
1147 | struct ieee80211_bss *bss); | 1145 | struct ieee80211_bss *bss); |
1148 | 1146 | ||
1149 | /* off-channel helpers */ | 1147 | /* off-channel helpers */ |
1150 | void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local); | 1148 | bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local); |
1151 | void ieee80211_offchannel_stop_station(struct ieee80211_local *local); | 1149 | void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local, |
1150 | bool tell_ap); | ||
1151 | void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, | ||
1152 | bool offchannel_ps_enable); | ||
1152 | void ieee80211_offchannel_return(struct ieee80211_local *local, | 1153 | void ieee80211_offchannel_return(struct ieee80211_local *local, |
1153 | bool enable_beaconing); | 1154 | bool enable_beaconing, |
1155 | bool offchannel_ps_disable); | ||
1154 | void ieee80211_hw_roc_setup(struct ieee80211_local *local); | 1156 | void ieee80211_hw_roc_setup(struct ieee80211_local *local); |
1155 | 1157 | ||
1156 | /* interface handling */ | 1158 | /* interface handling */ |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8acba456744e..5a4e19b88032 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -382,6 +382,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
382 | struct sk_buff *skb, *tmp; | 382 | struct sk_buff *skb, *tmp; |
383 | u32 hw_reconf_flags = 0; | 383 | u32 hw_reconf_flags = 0; |
384 | int i; | 384 | int i; |
385 | enum nl80211_channel_type orig_ct; | ||
385 | 386 | ||
386 | if (local->scan_sdata == sdata) | 387 | if (local->scan_sdata == sdata) |
387 | ieee80211_scan_cancel(local); | 388 | ieee80211_scan_cancel(local); |
@@ -542,8 +543,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
542 | hw_reconf_flags = 0; | 543 | hw_reconf_flags = 0; |
543 | } | 544 | } |
544 | 545 | ||
546 | /* Re-calculate channel-type, in case there are multiple vifs | ||
547 | * on different channel types. | ||
548 | */ | ||
549 | orig_ct = local->_oper_channel_type; | ||
550 | ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT); | ||
551 | |||
545 | /* do after stop to avoid reconfiguring when we stop anyway */ | 552 | /* do after stop to avoid reconfiguring when we stop anyway */ |
546 | if (hw_reconf_flags) | 553 | if (hw_reconf_flags || (orig_ct != local->_oper_channel_type)) |
547 | ieee80211_hw_config(local, hw_reconf_flags); | 554 | ieee80211_hw_config(local, hw_reconf_flags); |
548 | 555 | ||
549 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 556 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index a46ff06d7cb8..c155c0b69426 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -98,6 +98,41 @@ static void ieee80211_reconfig_filter(struct work_struct *work) | |||
98 | ieee80211_configure_filter(local); | 98 | ieee80211_configure_filter(local); |
99 | } | 99 | } |
100 | 100 | ||
101 | /* | ||
102 | * Returns true if we are logically configured to be on | ||
103 | * the operating channel AND the hardware-conf is currently | ||
104 | * configured on the operating channel. Compares channel-type | ||
105 | * as well. | ||
106 | */ | ||
107 | bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local) | ||
108 | { | ||
109 | struct ieee80211_channel *chan, *scan_chan; | ||
110 | enum nl80211_channel_type channel_type; | ||
111 | |||
112 | /* This logic needs to match logic in ieee80211_hw_config */ | ||
113 | if (local->scan_channel) { | ||
114 | chan = local->scan_channel; | ||
115 | channel_type = NL80211_CHAN_NO_HT; | ||
116 | } else if (local->tmp_channel) { | ||
117 | chan = scan_chan = local->tmp_channel; | ||
118 | channel_type = local->tmp_channel_type; | ||
119 | } else { | ||
120 | chan = local->oper_channel; | ||
121 | channel_type = local->_oper_channel_type; | ||
122 | } | ||
123 | |||
124 | if (chan != local->oper_channel || | ||
125 | channel_type != local->_oper_channel_type) | ||
126 | return false; | ||
127 | |||
128 | /* Check current hardware-config against oper_channel. */ | ||
129 | if ((local->oper_channel != local->hw.conf.channel) || | ||
130 | (local->_oper_channel_type != local->hw.conf.channel_type)) | ||
131 | return false; | ||
132 | |||
133 | return true; | ||
134 | } | ||
135 | |||
101 | int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) | 136 | int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) |
102 | { | 137 | { |
103 | struct ieee80211_channel *chan, *scan_chan; | 138 | struct ieee80211_channel *chan, *scan_chan; |
@@ -110,21 +145,27 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) | |||
110 | 145 | ||
111 | scan_chan = local->scan_channel; | 146 | scan_chan = local->scan_channel; |
112 | 147 | ||
148 | /* If this off-channel logic ever changes, ieee80211_on_oper_channel | ||
149 | * may need to change as well. | ||
150 | */ | ||
113 | offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; | 151 | offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; |
114 | if (scan_chan) { | 152 | if (scan_chan) { |
115 | chan = scan_chan; | 153 | chan = scan_chan; |
116 | channel_type = NL80211_CHAN_NO_HT; | 154 | channel_type = NL80211_CHAN_NO_HT; |
117 | local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; | 155 | } else if (local->tmp_channel) { |
118 | } else if (local->tmp_channel && | ||
119 | local->oper_channel != local->tmp_channel) { | ||
120 | chan = scan_chan = local->tmp_channel; | 156 | chan = scan_chan = local->tmp_channel; |
121 | channel_type = local->tmp_channel_type; | 157 | channel_type = local->tmp_channel_type; |
122 | local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; | ||
123 | } else { | 158 | } else { |
124 | chan = local->oper_channel; | 159 | chan = local->oper_channel; |
125 | channel_type = local->_oper_channel_type; | 160 | channel_type = local->_oper_channel_type; |
126 | local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL; | ||
127 | } | 161 | } |
162 | |||
163 | if (chan != local->oper_channel || | ||
164 | channel_type != local->_oper_channel_type) | ||
165 | local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; | ||
166 | else | ||
167 | local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL; | ||
168 | |||
128 | offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; | 169 | offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; |
129 | 170 | ||
130 | if (offchannel_flag || chan != local->hw.conf.channel || | 171 | if (offchannel_flag || chan != local->hw.conf.channel || |
@@ -231,7 +272,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
231 | 272 | ||
232 | if (changed & BSS_CHANGED_BEACON_ENABLED) { | 273 | if (changed & BSS_CHANGED_BEACON_ENABLED) { |
233 | if (local->quiescing || !ieee80211_sdata_running(sdata) || | 274 | if (local->quiescing || !ieee80211_sdata_running(sdata) || |
234 | test_bit(SCAN_SW_SCANNING, &local->scanning)) { | 275 | test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) { |
235 | sdata->vif.bss_conf.enable_beacon = false; | 276 | sdata->vif.bss_conf.enable_beacon = false; |
236 | } else { | 277 | } else { |
237 | /* | 278 | /* |
@@ -554,6 +595,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
554 | local->hw.queues = 1; | 595 | local->hw.queues = 1; |
555 | local->hw.max_rates = 1; | 596 | local->hw.max_rates = 1; |
556 | local->hw.max_report_rates = 0; | 597 | local->hw.max_report_rates = 0; |
598 | local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; | ||
557 | local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; | 599 | local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; |
558 | local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; | 600 | local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; |
559 | local->user_power_level = -1; | 601 | local->user_power_level = -1; |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index ca3af4685b0a..2a57cc02c618 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -574,7 +574,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | |||
574 | &elems); | 574 | &elems); |
575 | 575 | ||
576 | if (elems.ds_params && elems.ds_params_len == 1) | 576 | if (elems.ds_params && elems.ds_params_len == 1) |
577 | freq = ieee80211_channel_to_frequency(elems.ds_params[0]); | 577 | freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); |
578 | else | 578 | else |
579 | freq = rx_status->freq; | 579 | freq = rx_status->freq; |
580 | 580 | ||
@@ -645,7 +645,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) | |||
645 | if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) | 645 | if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) |
646 | mesh_mpath_table_grow(); | 646 | mesh_mpath_table_grow(); |
647 | 647 | ||
648 | if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) | 648 | if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags)) |
649 | mesh_mpp_table_grow(); | 649 | mesh_mpp_table_grow(); |
650 | 650 | ||
651 | if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) | 651 | if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 45fbb9e33746..f77adf1a520e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -28,8 +28,15 @@ | |||
28 | #include "rate.h" | 28 | #include "rate.h" |
29 | #include "led.h" | 29 | #include "led.h" |
30 | 30 | ||
31 | #define IEEE80211_MAX_NULLFUNC_TRIES 2 | 31 | static int max_nullfunc_tries = 2; |
32 | #define IEEE80211_MAX_PROBE_TRIES 5 | 32 | module_param(max_nullfunc_tries, int, 0644); |
33 | MODULE_PARM_DESC(max_nullfunc_tries, | ||
34 | "Maximum nullfunc tx tries before disconnecting (reason 4)."); | ||
35 | |||
36 | static int max_probe_tries = 5; | ||
37 | module_param(max_probe_tries, int, 0644); | ||
38 | MODULE_PARM_DESC(max_probe_tries, | ||
39 | "Maximum probe tries before disconnecting (reason 4)."); | ||
33 | 40 | ||
34 | /* | 41 | /* |
35 | * Beacon loss timeout is calculated as N frames times the | 42 | * Beacon loss timeout is calculated as N frames times the |
@@ -51,7 +58,11 @@ | |||
51 | * a probe request because of beacon loss or for | 58 | * a probe request because of beacon loss or for |
52 | * checking the connection still works. | 59 | * checking the connection still works. |
53 | */ | 60 | */ |
54 | #define IEEE80211_PROBE_WAIT (HZ / 2) | 61 | static int probe_wait_ms = 500; |
62 | module_param(probe_wait_ms, int, 0644); | ||
63 | MODULE_PARM_DESC(probe_wait_ms, | ||
64 | "Maximum time(ms) to wait for probe response" | ||
65 | " before disconnecting (reason 4)."); | ||
55 | 66 | ||
56 | /* | 67 | /* |
57 | * Weight given to the latest Beacon frame when calculating average signal | 68 | * Weight given to the latest Beacon frame when calculating average signal |
@@ -161,6 +172,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, | |||
161 | struct ieee80211_supported_band *sband; | 172 | struct ieee80211_supported_band *sband; |
162 | struct sta_info *sta; | 173 | struct sta_info *sta; |
163 | u32 changed = 0; | 174 | u32 changed = 0; |
175 | int hti_cfreq; | ||
164 | u16 ht_opmode; | 176 | u16 ht_opmode; |
165 | bool enable_ht = true; | 177 | bool enable_ht = true; |
166 | enum nl80211_channel_type prev_chantype; | 178 | enum nl80211_channel_type prev_chantype; |
@@ -174,10 +186,27 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, | |||
174 | if (!sband->ht_cap.ht_supported) | 186 | if (!sband->ht_cap.ht_supported) |
175 | enable_ht = false; | 187 | enable_ht = false; |
176 | 188 | ||
177 | /* check that channel matches the right operating channel */ | 189 | if (enable_ht) { |
178 | if (local->hw.conf.channel->center_freq != | 190 | hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan, |
179 | ieee80211_channel_to_frequency(hti->control_chan)) | 191 | sband->band); |
180 | enable_ht = false; | 192 | /* check that channel matches the right operating channel */ |
193 | if (local->hw.conf.channel->center_freq != hti_cfreq) { | ||
194 | /* Some APs mess this up, evidently. | ||
195 | * Netgear WNDR3700 sometimes reports 4 higher than | ||
196 | * the actual channel, for instance. | ||
197 | */ | ||
198 | printk(KERN_DEBUG | ||
199 | "%s: Wrong control channel in association" | ||
200 | " response: configured center-freq: %d" | ||
201 | " hti-cfreq: %d hti->control_chan: %d" | ||
202 | " band: %d. Disabling HT.\n", | ||
203 | sdata->name, | ||
204 | local->hw.conf.channel->center_freq, | ||
205 | hti_cfreq, hti->control_chan, | ||
206 | sband->band); | ||
207 | enable_ht = false; | ||
208 | } | ||
209 | } | ||
181 | 210 | ||
182 | if (enable_ht) { | 211 | if (enable_ht) { |
183 | channel_type = NL80211_CHAN_HT20; | 212 | channel_type = NL80211_CHAN_HT20; |
@@ -429,7 +458,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
429 | container_of((void *)bss, struct cfg80211_bss, priv); | 458 | container_of((void *)bss, struct cfg80211_bss, priv); |
430 | struct ieee80211_channel *new_ch; | 459 | struct ieee80211_channel *new_ch; |
431 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 460 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
432 | int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num); | 461 | int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num, |
462 | cbss->channel->band); | ||
433 | 463 | ||
434 | ASSERT_MGD_MTX(ifmgd); | 464 | ASSERT_MGD_MTX(ifmgd); |
435 | 465 | ||
@@ -600,6 +630,14 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) | |||
600 | list_for_each_entry(sdata, &local->interfaces, list) { | 630 | list_for_each_entry(sdata, &local->interfaces, list) { |
601 | if (!ieee80211_sdata_running(sdata)) | 631 | if (!ieee80211_sdata_running(sdata)) |
602 | continue; | 632 | continue; |
633 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | ||
634 | /* If an AP vif is found, then disable PS | ||
635 | * by setting the count to zero thereby setting | ||
636 | * ps_sdata to NULL. | ||
637 | */ | ||
638 | count = 0; | ||
639 | break; | ||
640 | } | ||
603 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | 641 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
604 | continue; | 642 | continue; |
605 | found = sdata; | 643 | found = sdata; |
@@ -1089,7 +1127,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) | |||
1089 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1127 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1090 | const u8 *ssid; | 1128 | const u8 *ssid; |
1091 | u8 *dst = ifmgd->associated->bssid; | 1129 | u8 *dst = ifmgd->associated->bssid; |
1092 | u8 unicast_limit = max(1, IEEE80211_MAX_PROBE_TRIES - 3); | 1130 | u8 unicast_limit = max(1, max_probe_tries - 3); |
1093 | 1131 | ||
1094 | /* | 1132 | /* |
1095 | * Try sending broadcast probe requests for the last three | 1133 | * Try sending broadcast probe requests for the last three |
@@ -1115,7 +1153,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) | |||
1115 | } | 1153 | } |
1116 | 1154 | ||
1117 | ifmgd->probe_send_count++; | 1155 | ifmgd->probe_send_count++; |
1118 | ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT; | 1156 | ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); |
1119 | run_again(ifmgd, ifmgd->probe_timeout); | 1157 | run_again(ifmgd, ifmgd->probe_timeout); |
1120 | } | 1158 | } |
1121 | 1159 | ||
@@ -1216,7 +1254,8 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata) | |||
1216 | 1254 | ||
1217 | memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); | 1255 | memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); |
1218 | 1256 | ||
1219 | printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid); | 1257 | printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n", |
1258 | sdata->name, bssid); | ||
1220 | 1259 | ||
1221 | ieee80211_set_disassoc(sdata, true, true); | 1260 | ieee80211_set_disassoc(sdata, true, true); |
1222 | mutex_unlock(&ifmgd->mtx); | 1261 | mutex_unlock(&ifmgd->mtx); |
@@ -1519,7 +1558,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
1519 | } | 1558 | } |
1520 | 1559 | ||
1521 | if (elems->ds_params && elems->ds_params_len == 1) | 1560 | if (elems->ds_params && elems->ds_params_len == 1) |
1522 | freq = ieee80211_channel_to_frequency(elems->ds_params[0]); | 1561 | freq = ieee80211_channel_to_frequency(elems->ds_params[0], |
1562 | rx_status->band); | ||
1523 | else | 1563 | else |
1524 | freq = rx_status->freq; | 1564 | freq = rx_status->freq; |
1525 | 1565 | ||
@@ -1960,9 +2000,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) | |||
1960 | memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); | 2000 | memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); |
1961 | 2001 | ||
1962 | if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) | 2002 | if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) |
1963 | max_tries = IEEE80211_MAX_NULLFUNC_TRIES; | 2003 | max_tries = max_nullfunc_tries; |
1964 | else | 2004 | else |
1965 | max_tries = IEEE80211_MAX_PROBE_TRIES; | 2005 | max_tries = max_probe_tries; |
1966 | 2006 | ||
1967 | /* ACK received for nullfunc probing frame */ | 2007 | /* ACK received for nullfunc probing frame */ |
1968 | if (!ifmgd->probe_send_count) | 2008 | if (!ifmgd->probe_send_count) |
@@ -1972,9 +2012,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) | |||
1972 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 2012 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
1973 | wiphy_debug(local->hw.wiphy, | 2013 | wiphy_debug(local->hw.wiphy, |
1974 | "%s: No ack for nullfunc frame to" | 2014 | "%s: No ack for nullfunc frame to" |
1975 | " AP %pM, try %d\n", | 2015 | " AP %pM, try %d/%i\n", |
1976 | sdata->name, bssid, | 2016 | sdata->name, bssid, |
1977 | ifmgd->probe_send_count); | 2017 | ifmgd->probe_send_count, max_tries); |
1978 | #endif | 2018 | #endif |
1979 | ieee80211_mgd_probe_ap_send(sdata); | 2019 | ieee80211_mgd_probe_ap_send(sdata); |
1980 | } else { | 2020 | } else { |
@@ -1994,17 +2034,17 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) | |||
1994 | "%s: Failed to send nullfunc to AP %pM" | 2034 | "%s: Failed to send nullfunc to AP %pM" |
1995 | " after %dms, disconnecting.\n", | 2035 | " after %dms, disconnecting.\n", |
1996 | sdata->name, | 2036 | sdata->name, |
1997 | bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); | 2037 | bssid, probe_wait_ms); |
1998 | #endif | 2038 | #endif |
1999 | ieee80211_sta_connection_lost(sdata, bssid); | 2039 | ieee80211_sta_connection_lost(sdata, bssid); |
2000 | } else if (ifmgd->probe_send_count < max_tries) { | 2040 | } else if (ifmgd->probe_send_count < max_tries) { |
2001 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 2041 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
2002 | wiphy_debug(local->hw.wiphy, | 2042 | wiphy_debug(local->hw.wiphy, |
2003 | "%s: No probe response from AP %pM" | 2043 | "%s: No probe response from AP %pM" |
2004 | " after %dms, try %d\n", | 2044 | " after %dms, try %d/%i\n", |
2005 | sdata->name, | 2045 | sdata->name, |
2006 | bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ, | 2046 | bssid, probe_wait_ms, |
2007 | ifmgd->probe_send_count); | 2047 | ifmgd->probe_send_count, max_tries); |
2008 | #endif | 2048 | #endif |
2009 | ieee80211_mgd_probe_ap_send(sdata); | 2049 | ieee80211_mgd_probe_ap_send(sdata); |
2010 | } else { | 2050 | } else { |
@@ -2016,7 +2056,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) | |||
2016 | "%s: No probe response from AP %pM" | 2056 | "%s: No probe response from AP %pM" |
2017 | " after %dms, disconnecting.\n", | 2057 | " after %dms, disconnecting.\n", |
2018 | sdata->name, | 2058 | sdata->name, |
2019 | bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); | 2059 | bssid, probe_wait_ms); |
2020 | 2060 | ||
2021 | ieee80211_sta_connection_lost(sdata, bssid); | 2061 | ieee80211_sta_connection_lost(sdata, bssid); |
2022 | } | 2062 | } |
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index b4e52676f3fb..13427b194ced 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
@@ -17,10 +17,14 @@ | |||
17 | #include "driver-trace.h" | 17 | #include "driver-trace.h" |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * inform AP that we will go to sleep so that it will buffer the frames | 20 | * Tell our hardware to disable PS. |
21 | * while we scan | 21 | * Optionally inform AP that we will go to sleep so that it will buffer |
22 | * the frames while we are doing off-channel work. This is optional | ||
23 | * because we *may* be doing work on-operating channel, and want our | ||
24 | * hardware unconditionally awake, but still let the AP send us normal frames. | ||
22 | */ | 25 | */ |
23 | static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata) | 26 | static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata, |
27 | bool tell_ap) | ||
24 | { | 28 | { |
25 | struct ieee80211_local *local = sdata->local; | 29 | struct ieee80211_local *local = sdata->local; |
26 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 30 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
@@ -41,8 +45,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata) | |||
41 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | 45 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); |
42 | } | 46 | } |
43 | 47 | ||
44 | if (!(local->offchannel_ps_enabled) || | 48 | if (tell_ap && (!local->offchannel_ps_enabled || |
45 | !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) | 49 | !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))) |
46 | /* | 50 | /* |
47 | * If power save was enabled, no need to send a nullfunc | 51 | * If power save was enabled, no need to send a nullfunc |
48 | * frame because AP knows that we are sleeping. But if the | 52 | * frame because AP knows that we are sleeping. But if the |
@@ -77,6 +81,9 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata) | |||
77 | * we are sleeping, let's just enable power save mode in | 81 | * we are sleeping, let's just enable power save mode in |
78 | * hardware. | 82 | * hardware. |
79 | */ | 83 | */ |
84 | /* TODO: Only set hardware if CONF_PS changed? | ||
85 | * TODO: Should we set offchannel_ps_enabled to false? | ||
86 | */ | ||
80 | local->hw.conf.flags |= IEEE80211_CONF_PS; | 87 | local->hw.conf.flags |= IEEE80211_CONF_PS; |
81 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | 88 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); |
82 | } else if (local->hw.conf.dynamic_ps_timeout > 0) { | 89 | } else if (local->hw.conf.dynamic_ps_timeout > 0) { |
@@ -95,63 +102,61 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata) | |||
95 | ieee80211_sta_reset_conn_monitor(sdata); | 102 | ieee80211_sta_reset_conn_monitor(sdata); |
96 | } | 103 | } |
97 | 104 | ||
98 | void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local) | 105 | void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, |
106 | bool offchannel_ps_enable) | ||
99 | { | 107 | { |
100 | struct ieee80211_sub_if_data *sdata; | 108 | struct ieee80211_sub_if_data *sdata; |
101 | 109 | ||
110 | /* | ||
111 | * notify the AP about us leaving the channel and stop all | ||
112 | * STA interfaces. | ||
113 | */ | ||
102 | mutex_lock(&local->iflist_mtx); | 114 | mutex_lock(&local->iflist_mtx); |
103 | list_for_each_entry(sdata, &local->interfaces, list) { | 115 | list_for_each_entry(sdata, &local->interfaces, list) { |
104 | if (!ieee80211_sdata_running(sdata)) | 116 | if (!ieee80211_sdata_running(sdata)) |
105 | continue; | 117 | continue; |
106 | 118 | ||
107 | /* disable beaconing */ | 119 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR) |
120 | set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); | ||
121 | |||
122 | /* Check to see if we should disable beaconing. */ | ||
108 | if (sdata->vif.type == NL80211_IFTYPE_AP || | 123 | if (sdata->vif.type == NL80211_IFTYPE_AP || |
109 | sdata->vif.type == NL80211_IFTYPE_ADHOC || | 124 | sdata->vif.type == NL80211_IFTYPE_ADHOC || |
110 | sdata->vif.type == NL80211_IFTYPE_MESH_POINT) | 125 | sdata->vif.type == NL80211_IFTYPE_MESH_POINT) |
111 | ieee80211_bss_info_change_notify( | 126 | ieee80211_bss_info_change_notify( |
112 | sdata, BSS_CHANGED_BEACON_ENABLED); | 127 | sdata, BSS_CHANGED_BEACON_ENABLED); |
113 | 128 | ||
114 | /* | 129 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { |
115 | * only handle non-STA interfaces here, STA interfaces | ||
116 | * are handled in ieee80211_offchannel_stop_station(), | ||
117 | * e.g., from the background scan state machine. | ||
118 | * | ||
119 | * In addition, do not stop monitor interface to allow it to be | ||
120 | * used from user space controlled off-channel operations. | ||
121 | */ | ||
122 | if (sdata->vif.type != NL80211_IFTYPE_STATION && | ||
123 | sdata->vif.type != NL80211_IFTYPE_MONITOR) { | ||
124 | set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); | ||
125 | netif_tx_stop_all_queues(sdata->dev); | 130 | netif_tx_stop_all_queues(sdata->dev); |
131 | if (offchannel_ps_enable && | ||
132 | (sdata->vif.type == NL80211_IFTYPE_STATION) && | ||
133 | sdata->u.mgd.associated) | ||
134 | ieee80211_offchannel_ps_enable(sdata, true); | ||
126 | } | 135 | } |
127 | } | 136 | } |
128 | mutex_unlock(&local->iflist_mtx); | 137 | mutex_unlock(&local->iflist_mtx); |
129 | } | 138 | } |
130 | 139 | ||
131 | void ieee80211_offchannel_stop_station(struct ieee80211_local *local) | 140 | void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local, |
141 | bool tell_ap) | ||
132 | { | 142 | { |
133 | struct ieee80211_sub_if_data *sdata; | 143 | struct ieee80211_sub_if_data *sdata; |
134 | 144 | ||
135 | /* | ||
136 | * notify the AP about us leaving the channel and stop all STA interfaces | ||
137 | */ | ||
138 | mutex_lock(&local->iflist_mtx); | 145 | mutex_lock(&local->iflist_mtx); |
139 | list_for_each_entry(sdata, &local->interfaces, list) { | 146 | list_for_each_entry(sdata, &local->interfaces, list) { |
140 | if (!ieee80211_sdata_running(sdata)) | 147 | if (!ieee80211_sdata_running(sdata)) |
141 | continue; | 148 | continue; |
142 | 149 | ||
143 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 150 | if (sdata->vif.type == NL80211_IFTYPE_STATION && |
144 | set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); | 151 | sdata->u.mgd.associated) |
145 | netif_tx_stop_all_queues(sdata->dev); | 152 | ieee80211_offchannel_ps_enable(sdata, tell_ap); |
146 | if (sdata->u.mgd.associated) | ||
147 | ieee80211_offchannel_ps_enable(sdata); | ||
148 | } | ||
149 | } | 153 | } |
150 | mutex_unlock(&local->iflist_mtx); | 154 | mutex_unlock(&local->iflist_mtx); |
151 | } | 155 | } |
152 | 156 | ||
153 | void ieee80211_offchannel_return(struct ieee80211_local *local, | 157 | void ieee80211_offchannel_return(struct ieee80211_local *local, |
154 | bool enable_beaconing) | 158 | bool enable_beaconing, |
159 | bool offchannel_ps_disable) | ||
155 | { | 160 | { |
156 | struct ieee80211_sub_if_data *sdata; | 161 | struct ieee80211_sub_if_data *sdata; |
157 | 162 | ||
@@ -161,7 +166,8 @@ void ieee80211_offchannel_return(struct ieee80211_local *local, | |||
161 | continue; | 166 | continue; |
162 | 167 | ||
163 | /* Tell AP we're back */ | 168 | /* Tell AP we're back */ |
164 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 169 | if (offchannel_ps_disable && |
170 | sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
165 | if (sdata->u.mgd.associated) | 171 | if (sdata->u.mgd.associated) |
166 | ieee80211_offchannel_ps_disable(sdata); | 172 | ieee80211_offchannel_ps_disable(sdata); |
167 | } | 173 | } |
@@ -181,7 +187,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local, | |||
181 | netif_tx_wake_all_queues(sdata->dev); | 187 | netif_tx_wake_all_queues(sdata->dev); |
182 | } | 188 | } |
183 | 189 | ||
184 | /* re-enable beaconing */ | 190 | /* Check to see if we should re-enable beaconing */ |
185 | if (enable_beaconing && | 191 | if (enable_beaconing && |
186 | (sdata->vif.type == NL80211_IFTYPE_AP || | 192 | (sdata->vif.type == NL80211_IFTYPE_AP || |
187 | sdata->vif.type == NL80211_IFTYPE_ADHOC || | 193 | sdata->vif.type == NL80211_IFTYPE_ADHOC || |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index a6701ed87f0d..045b2fe4a414 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -85,6 +85,9 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local, | |||
85 | if (len & 1) /* padding for RX_FLAGS if necessary */ | 85 | if (len & 1) /* padding for RX_FLAGS if necessary */ |
86 | len++; | 86 | len++; |
87 | 87 | ||
88 | if (status->flag & RX_FLAG_HT) /* HT info */ | ||
89 | len += 3; | ||
90 | |||
88 | return len; | 91 | return len; |
89 | } | 92 | } |
90 | 93 | ||
@@ -139,11 +142,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
139 | /* IEEE80211_RADIOTAP_RATE */ | 142 | /* IEEE80211_RADIOTAP_RATE */ |
140 | if (status->flag & RX_FLAG_HT) { | 143 | if (status->flag & RX_FLAG_HT) { |
141 | /* | 144 | /* |
142 | * TODO: add following information into radiotap header once | 145 | * MCS information is a separate field in radiotap, |
143 | * suitable fields are defined for it: | 146 | * added below. |
144 | * - MCS index (status->rate_idx) | ||
145 | * - HT40 (status->flag & RX_FLAG_40MHZ) | ||
146 | * - short-GI (status->flag & RX_FLAG_SHORT_GI) | ||
147 | */ | 147 | */ |
148 | *pos = 0; | 148 | *pos = 0; |
149 | } else { | 149 | } else { |
@@ -193,6 +193,20 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
193 | rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; | 193 | rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; |
194 | put_unaligned_le16(rx_flags, pos); | 194 | put_unaligned_le16(rx_flags, pos); |
195 | pos += 2; | 195 | pos += 2; |
196 | |||
197 | if (status->flag & RX_FLAG_HT) { | ||
198 | rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); | ||
199 | *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS | | ||
200 | IEEE80211_RADIOTAP_MCS_HAVE_GI | | ||
201 | IEEE80211_RADIOTAP_MCS_HAVE_BW; | ||
202 | *pos = 0; | ||
203 | if (status->flag & RX_FLAG_SHORT_GI) | ||
204 | *pos |= IEEE80211_RADIOTAP_MCS_SGI; | ||
205 | if (status->flag & RX_FLAG_40MHZ) | ||
206 | *pos |= IEEE80211_RADIOTAP_MCS_BW_40; | ||
207 | pos++; | ||
208 | *pos++ = status->rate_idx; | ||
209 | } | ||
196 | } | 210 | } |
197 | 211 | ||
198 | /* | 212 | /* |
@@ -392,16 +406,10 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) | |||
392 | if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN))) | 406 | if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN))) |
393 | return RX_CONTINUE; | 407 | return RX_CONTINUE; |
394 | 408 | ||
395 | if (test_bit(SCAN_HW_SCANNING, &local->scanning)) | 409 | if (test_bit(SCAN_HW_SCANNING, &local->scanning) || |
410 | test_bit(SCAN_SW_SCANNING, &local->scanning)) | ||
396 | return ieee80211_scan_rx(rx->sdata, skb); | 411 | return ieee80211_scan_rx(rx->sdata, skb); |
397 | 412 | ||
398 | if (test_bit(SCAN_SW_SCANNING, &local->scanning)) { | ||
399 | /* drop all the other packets during a software scan anyway */ | ||
400 | if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED) | ||
401 | dev_kfree_skb(skb); | ||
402 | return RX_QUEUED; | ||
403 | } | ||
404 | |||
405 | /* scanning finished during invoking of handlers */ | 413 | /* scanning finished during invoking of handlers */ |
406 | I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); | 414 | I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); |
407 | return RX_DROP_UNUSABLE; | 415 | return RX_DROP_UNUSABLE; |
@@ -798,7 +806,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | |||
798 | rx->local->dot11FrameDuplicateCount++; | 806 | rx->local->dot11FrameDuplicateCount++; |
799 | rx->sta->num_duplicates++; | 807 | rx->sta->num_duplicates++; |
800 | } | 808 | } |
801 | return RX_DROP_MONITOR; | 809 | return RX_DROP_UNUSABLE; |
802 | } else | 810 | } else |
803 | rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl; | 811 | rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl; |
804 | } | 812 | } |
@@ -1088,7 +1096,8 @@ static void ap_sta_ps_start(struct sta_info *sta) | |||
1088 | 1096 | ||
1089 | atomic_inc(&sdata->bss->num_sta_ps); | 1097 | atomic_inc(&sdata->bss->num_sta_ps); |
1090 | set_sta_flags(sta, WLAN_STA_PS_STA); | 1098 | set_sta_flags(sta, WLAN_STA_PS_STA); |
1091 | drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); | 1099 | if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) |
1100 | drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); | ||
1092 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 1101 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
1093 | printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", | 1102 | printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", |
1094 | sdata->name, sta->sta.addr, sta->sta.aid); | 1103 | sdata->name, sta->sta.addr, sta->sta.aid); |
@@ -1117,6 +1126,27 @@ static void ap_sta_ps_end(struct sta_info *sta) | |||
1117 | ieee80211_sta_ps_deliver_wakeup(sta); | 1126 | ieee80211_sta_ps_deliver_wakeup(sta); |
1118 | } | 1127 | } |
1119 | 1128 | ||
1129 | int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start) | ||
1130 | { | ||
1131 | struct sta_info *sta_inf = container_of(sta, struct sta_info, sta); | ||
1132 | bool in_ps; | ||
1133 | |||
1134 | WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS)); | ||
1135 | |||
1136 | /* Don't let the same PS state be set twice */ | ||
1137 | in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA); | ||
1138 | if ((start && in_ps) || (!start && !in_ps)) | ||
1139 | return -EINVAL; | ||
1140 | |||
1141 | if (start) | ||
1142 | ap_sta_ps_start(sta_inf); | ||
1143 | else | ||
1144 | ap_sta_ps_end(sta_inf); | ||
1145 | |||
1146 | return 0; | ||
1147 | } | ||
1148 | EXPORT_SYMBOL(ieee80211_sta_ps_transition); | ||
1149 | |||
1120 | static ieee80211_rx_result debug_noinline | 1150 | static ieee80211_rx_result debug_noinline |
1121 | ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | 1151 | ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) |
1122 | { | 1152 | { |
@@ -1161,7 +1191,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1161 | * Change STA power saving mode only at the end of a frame | 1191 | * Change STA power saving mode only at the end of a frame |
1162 | * exchange sequence. | 1192 | * exchange sequence. |
1163 | */ | 1193 | */ |
1164 | if (!ieee80211_has_morefrags(hdr->frame_control) && | 1194 | if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) && |
1195 | !ieee80211_has_morefrags(hdr->frame_control) && | ||
1165 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && | 1196 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && |
1166 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || | 1197 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || |
1167 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { | 1198 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { |
@@ -1556,17 +1587,36 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1556 | { | 1587 | { |
1557 | struct ieee80211_sub_if_data *sdata = rx->sdata; | 1588 | struct ieee80211_sub_if_data *sdata = rx->sdata; |
1558 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 1589 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
1590 | bool check_port_control = false; | ||
1591 | struct ethhdr *ehdr; | ||
1592 | int ret; | ||
1559 | 1593 | ||
1560 | if (ieee80211_has_a4(hdr->frame_control) && | 1594 | if (ieee80211_has_a4(hdr->frame_control) && |
1561 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) | 1595 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) |
1562 | return -1; | 1596 | return -1; |
1563 | 1597 | ||
1598 | if (sdata->vif.type == NL80211_IFTYPE_STATION && | ||
1599 | !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { | ||
1600 | |||
1601 | if (!sdata->u.mgd.use_4addr) | ||
1602 | return -1; | ||
1603 | else | ||
1604 | check_port_control = true; | ||
1605 | } | ||
1606 | |||
1564 | if (is_multicast_ether_addr(hdr->addr1) && | 1607 | if (is_multicast_ether_addr(hdr->addr1) && |
1565 | ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) || | 1608 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) |
1566 | (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr))) | 1609 | return -1; |
1610 | |||
1611 | ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); | ||
1612 | if (ret < 0 || !check_port_control) | ||
1613 | return ret; | ||
1614 | |||
1615 | ehdr = (struct ethhdr *) rx->skb->data; | ||
1616 | if (ehdr->h_proto != rx->sdata->control_port_protocol) | ||
1567 | return -1; | 1617 | return -1; |
1568 | 1618 | ||
1569 | return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); | 1619 | return 0; |
1570 | } | 1620 | } |
1571 | 1621 | ||
1572 | /* | 1622 | /* |
@@ -1893,7 +1943,10 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | |||
1893 | dev->stats.rx_bytes += rx->skb->len; | 1943 | dev->stats.rx_bytes += rx->skb->len; |
1894 | 1944 | ||
1895 | if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && | 1945 | if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && |
1896 | !is_multicast_ether_addr(((struct ethhdr *)rx->skb->data)->h_dest)) { | 1946 | !is_multicast_ether_addr( |
1947 | ((struct ethhdr *)rx->skb->data)->h_dest) && | ||
1948 | (!local->scanning && | ||
1949 | !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) { | ||
1897 | mod_timer(&local->dynamic_ps_timer, jiffies + | 1950 | mod_timer(&local->dynamic_ps_timer, jiffies + |
1898 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); | 1951 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); |
1899 | } | 1952 | } |
@@ -2590,7 +2643,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, | |||
2590 | return 0; | 2643 | return 0; |
2591 | if (!multicast && | 2644 | if (!multicast && |
2592 | compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { | 2645 | compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { |
2593 | if (!(sdata->dev->flags & IFF_PROMISC)) | 2646 | if (!(sdata->dev->flags & IFF_PROMISC) || |
2647 | sdata->u.mgd.use_4addr) | ||
2594 | return 0; | 2648 | return 0; |
2595 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; | 2649 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; |
2596 | } | 2650 | } |
@@ -2639,7 +2693,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, | |||
2639 | return 0; | 2693 | return 0; |
2640 | } else if (!ieee80211_bssid_match(bssid, | 2694 | } else if (!ieee80211_bssid_match(bssid, |
2641 | sdata->vif.addr)) { | 2695 | sdata->vif.addr)) { |
2642 | if (!(status->rx_flags & IEEE80211_RX_IN_SCAN)) | 2696 | if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) && |
2697 | !ieee80211_is_beacon(hdr->frame_control)) | ||
2643 | return 0; | 2698 | return 0; |
2644 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; | 2699 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; |
2645 | } | 2700 | } |
@@ -2692,7 +2747,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, | |||
2692 | if (!skb) { | 2747 | if (!skb) { |
2693 | if (net_ratelimit()) | 2748 | if (net_ratelimit()) |
2694 | wiphy_debug(local->hw.wiphy, | 2749 | wiphy_debug(local->hw.wiphy, |
2695 | "failed to copy multicast frame for %s\n", | 2750 | "failed to copy skb for %s\n", |
2696 | sdata->name); | 2751 | sdata->name); |
2697 | return true; | 2752 | return true; |
2698 | } | 2753 | } |
@@ -2730,7 +2785,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
2730 | local->dot11ReceivedFragmentCount++; | 2785 | local->dot11ReceivedFragmentCount++; |
2731 | 2786 | ||
2732 | if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || | 2787 | if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || |
2733 | test_bit(SCAN_OFF_CHANNEL, &local->scanning))) | 2788 | test_bit(SCAN_SW_SCANNING, &local->scanning))) |
2734 | status->rx_flags |= IEEE80211_RX_IN_SCAN; | 2789 | status->rx_flags |= IEEE80211_RX_IN_SCAN; |
2735 | 2790 | ||
2736 | if (ieee80211_is_mgmt(fc)) | 2791 | if (ieee80211_is_mgmt(fc)) |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index fb274db77e3c..0ea6adae3e06 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -196,7 +196,8 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
196 | ieee802_11_parse_elems(elements, skb->len - baselen, &elems); | 196 | ieee802_11_parse_elems(elements, skb->len - baselen, &elems); |
197 | 197 | ||
198 | if (elems.ds_params && elems.ds_params_len == 1) | 198 | if (elems.ds_params && elems.ds_params_len == 1) |
199 | freq = ieee80211_channel_to_frequency(elems.ds_params[0]); | 199 | freq = ieee80211_channel_to_frequency(elems.ds_params[0], |
200 | rx_status->band); | ||
200 | else | 201 | else |
201 | freq = rx_status->freq; | 202 | freq = rx_status->freq; |
202 | 203 | ||
@@ -211,6 +212,14 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
211 | if (bss) | 212 | if (bss) |
212 | ieee80211_rx_bss_put(sdata->local, bss); | 213 | ieee80211_rx_bss_put(sdata->local, bss); |
213 | 214 | ||
215 | /* If we are on-operating-channel, and this packet is for the | ||
216 | * current channel, pass the pkt on up the stack so that | ||
217 | * the rest of the stack can make use of it. | ||
218 | */ | ||
219 | if (ieee80211_cfg_on_oper_channel(sdata->local) | ||
220 | && (channel == sdata->local->oper_channel)) | ||
221 | return RX_CONTINUE; | ||
222 | |||
214 | dev_kfree_skb(skb); | 223 | dev_kfree_skb(skb); |
215 | return RX_QUEUED; | 224 | return RX_QUEUED; |
216 | } | 225 | } |
@@ -292,15 +301,31 @@ static void __ieee80211_scan_completed_finish(struct ieee80211_hw *hw, | |||
292 | bool was_hw_scan) | 301 | bool was_hw_scan) |
293 | { | 302 | { |
294 | struct ieee80211_local *local = hw_to_local(hw); | 303 | struct ieee80211_local *local = hw_to_local(hw); |
304 | bool on_oper_chan; | ||
305 | bool enable_beacons = false; | ||
306 | |||
307 | mutex_lock(&local->mtx); | ||
308 | on_oper_chan = ieee80211_cfg_on_oper_channel(local); | ||
309 | |||
310 | if (was_hw_scan || !on_oper_chan) { | ||
311 | if (WARN_ON(local->scan_channel)) | ||
312 | local->scan_channel = NULL; | ||
313 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | ||
314 | } | ||
295 | 315 | ||
296 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | ||
297 | if (!was_hw_scan) { | 316 | if (!was_hw_scan) { |
317 | bool on_oper_chan2; | ||
298 | ieee80211_configure_filter(local); | 318 | ieee80211_configure_filter(local); |
299 | drv_sw_scan_complete(local); | 319 | drv_sw_scan_complete(local); |
300 | ieee80211_offchannel_return(local, true); | 320 | on_oper_chan2 = ieee80211_cfg_on_oper_channel(local); |
321 | /* We should always be on-channel at this point. */ | ||
322 | WARN_ON(!on_oper_chan2); | ||
323 | if (on_oper_chan2 && (on_oper_chan != on_oper_chan2)) | ||
324 | enable_beacons = true; | ||
325 | |||
326 | ieee80211_offchannel_return(local, enable_beacons, true); | ||
301 | } | 327 | } |
302 | 328 | ||
303 | mutex_lock(&local->mtx); | ||
304 | ieee80211_recalc_idle(local); | 329 | ieee80211_recalc_idle(local); |
305 | mutex_unlock(&local->mtx); | 330 | mutex_unlock(&local->mtx); |
306 | 331 | ||
@@ -340,13 +365,15 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) | |||
340 | */ | 365 | */ |
341 | drv_sw_scan_start(local); | 366 | drv_sw_scan_start(local); |
342 | 367 | ||
343 | ieee80211_offchannel_stop_beaconing(local); | ||
344 | |||
345 | local->leave_oper_channel_time = 0; | 368 | local->leave_oper_channel_time = 0; |
346 | local->next_scan_state = SCAN_DECISION; | 369 | local->next_scan_state = SCAN_DECISION; |
347 | local->scan_channel_idx = 0; | 370 | local->scan_channel_idx = 0; |
348 | 371 | ||
349 | drv_flush(local, false); | 372 | /* We always want to use off-channel PS, even if we |
373 | * are not really leaving oper-channel. Don't | ||
374 | * tell the AP though, as long as we are on-channel. | ||
375 | */ | ||
376 | ieee80211_offchannel_enable_all_ps(local, false); | ||
350 | 377 | ||
351 | ieee80211_configure_filter(local); | 378 | ieee80211_configure_filter(local); |
352 | 379 | ||
@@ -486,7 +513,21 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
486 | } | 513 | } |
487 | mutex_unlock(&local->iflist_mtx); | 514 | mutex_unlock(&local->iflist_mtx); |
488 | 515 | ||
489 | if (local->scan_channel) { | 516 | next_chan = local->scan_req->channels[local->scan_channel_idx]; |
517 | |||
518 | if (ieee80211_cfg_on_oper_channel(local)) { | ||
519 | /* We're currently on operating channel. */ | ||
520 | if ((next_chan == local->oper_channel) && | ||
521 | (local->_oper_channel_type == NL80211_CHAN_NO_HT)) | ||
522 | /* We don't need to move off of operating channel. */ | ||
523 | local->next_scan_state = SCAN_SET_CHANNEL; | ||
524 | else | ||
525 | /* | ||
526 | * We do need to leave operating channel, as next | ||
527 | * scan is somewhere else. | ||
528 | */ | ||
529 | local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL; | ||
530 | } else { | ||
490 | /* | 531 | /* |
491 | * we're currently scanning a different channel, let's | 532 | * we're currently scanning a different channel, let's |
492 | * see if we can scan another channel without interfering | 533 | * see if we can scan another channel without interfering |
@@ -502,7 +543,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
502 | * | 543 | * |
503 | * Otherwise switch back to the operating channel. | 544 | * Otherwise switch back to the operating channel. |
504 | */ | 545 | */ |
505 | next_chan = local->scan_req->channels[local->scan_channel_idx]; | ||
506 | 546 | ||
507 | bad_latency = time_after(jiffies + | 547 | bad_latency = time_after(jiffies + |
508 | ieee80211_scan_get_channel_time(next_chan), | 548 | ieee80211_scan_get_channel_time(next_chan), |
@@ -520,12 +560,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
520 | local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; | 560 | local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; |
521 | else | 561 | else |
522 | local->next_scan_state = SCAN_SET_CHANNEL; | 562 | local->next_scan_state = SCAN_SET_CHANNEL; |
523 | } else { | ||
524 | /* | ||
525 | * we're on the operating channel currently, let's | ||
526 | * leave that channel now to scan another one | ||
527 | */ | ||
528 | local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL; | ||
529 | } | 563 | } |
530 | 564 | ||
531 | *next_delay = 0; | 565 | *next_delay = 0; |
@@ -534,9 +568,10 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
534 | static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local, | 568 | static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local, |
535 | unsigned long *next_delay) | 569 | unsigned long *next_delay) |
536 | { | 570 | { |
537 | ieee80211_offchannel_stop_station(local); | 571 | /* PS will already be in off-channel mode, |
538 | 572 | * we do that once at the beginning of scanning. | |
539 | __set_bit(SCAN_OFF_CHANNEL, &local->scanning); | 573 | */ |
574 | ieee80211_offchannel_stop_vifs(local, false); | ||
540 | 575 | ||
541 | /* | 576 | /* |
542 | * What if the nullfunc frames didn't arrive? | 577 | * What if the nullfunc frames didn't arrive? |
@@ -559,15 +594,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca | |||
559 | { | 594 | { |
560 | /* switch back to the operating channel */ | 595 | /* switch back to the operating channel */ |
561 | local->scan_channel = NULL; | 596 | local->scan_channel = NULL; |
562 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | 597 | if (!ieee80211_cfg_on_oper_channel(local)) |
598 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | ||
563 | 599 | ||
564 | /* | 600 | /* |
565 | * Only re-enable station mode interface now; beaconing will be | 601 | * Re-enable vifs and beaconing. Leave PS |
566 | * re-enabled once the full scan has been completed. | 602 | * in off-channel state..will put that back |
603 | * on-channel at the end of scanning. | ||
567 | */ | 604 | */ |
568 | ieee80211_offchannel_return(local, false); | 605 | ieee80211_offchannel_return(local, true, false); |
569 | |||
570 | __clear_bit(SCAN_OFF_CHANNEL, &local->scanning); | ||
571 | 606 | ||
572 | *next_delay = HZ / 5; | 607 | *next_delay = HZ / 5; |
573 | local->next_scan_state = SCAN_DECISION; | 608 | local->next_scan_state = SCAN_DECISION; |
@@ -583,8 +618,12 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, | |||
583 | chan = local->scan_req->channels[local->scan_channel_idx]; | 618 | chan = local->scan_req->channels[local->scan_channel_idx]; |
584 | 619 | ||
585 | local->scan_channel = chan; | 620 | local->scan_channel = chan; |
586 | if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) | 621 | |
587 | skip = 1; | 622 | /* Only call hw-config if we really need to change channels. */ |
623 | if ((chan != local->hw.conf.channel) || | ||
624 | (local->hw.conf.channel_type != NL80211_CHAN_NO_HT)) | ||
625 | if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) | ||
626 | skip = 1; | ||
588 | 627 | ||
589 | /* advance state machine to next channel/band */ | 628 | /* advance state machine to next channel/band */ |
590 | local->scan_channel_idx++; | 629 | local->scan_channel_idx++; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index c426504ed1cf..5a11078827ab 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -899,7 +899,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | |||
899 | struct ieee80211_local *local = sdata->local; | 899 | struct ieee80211_local *local = sdata->local; |
900 | int sent, buffered; | 900 | int sent, buffered; |
901 | 901 | ||
902 | drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); | 902 | if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) |
903 | drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); | ||
903 | 904 | ||
904 | if (!skb_queue_empty(&sta->ps_tx_buf)) | 905 | if (!skb_queue_empty(&sta->ps_tx_buf)) |
905 | sta_info_clear_tim_bit(sta); | 906 | sta_info_clear_tim_bit(sta); |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index bbdd2a86a94b..ca0b69060ef7 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -82,6 +82,7 @@ enum ieee80211_sta_info_flags { | |||
82 | * @state: session state (see above) | 82 | * @state: session state (see above) |
83 | * @stop_initiator: initiator of a session stop | 83 | * @stop_initiator: initiator of a session stop |
84 | * @tx_stop: TX DelBA frame when stopping | 84 | * @tx_stop: TX DelBA frame when stopping |
85 | * @buf_size: reorder buffer size at receiver | ||
85 | * | 86 | * |
86 | * This structure's lifetime is managed by RCU, assignments to | 87 | * This structure's lifetime is managed by RCU, assignments to |
87 | * the array holding it must hold the aggregation mutex. | 88 | * the array holding it must hold the aggregation mutex. |
@@ -101,6 +102,7 @@ struct tid_ampdu_tx { | |||
101 | u8 dialog_token; | 102 | u8 dialog_token; |
102 | u8 stop_initiator; | 103 | u8 stop_initiator; |
103 | bool tx_stop; | 104 | bool tx_stop; |
105 | u8 buf_size; | ||
104 | }; | 106 | }; |
105 | 107 | ||
106 | /** | 108 | /** |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 38a797217a91..ffb0de9bc2fa 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -98,6 +98,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
98 | * (b) always process RX events before TX status events if ordering | 98 | * (b) always process RX events before TX status events if ordering |
99 | * can be unknown, for example with different interrupt status | 99 | * can be unknown, for example with different interrupt status |
100 | * bits. | 100 | * bits. |
101 | * (c) if PS mode transitions are manual (i.e. the flag | ||
102 | * %IEEE80211_HW_AP_LINK_PS is set), always process PS state | ||
103 | * changes before calling TX status events if ordering can be | ||
104 | * unknown. | ||
101 | */ | 105 | */ |
102 | if (test_sta_flags(sta, WLAN_STA_PS_STA) && | 106 | if (test_sta_flags(sta, WLAN_STA_PS_STA) && |
103 | skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { | 107 | skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 5950e3abead9..38e593939727 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -257,7 +257,8 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
257 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) | 257 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) |
258 | return TX_CONTINUE; | 258 | return TX_CONTINUE; |
259 | 259 | ||
260 | if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) && | 260 | if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) && |
261 | test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) && | ||
261 | !ieee80211_is_probe_req(hdr->frame_control) && | 262 | !ieee80211_is_probe_req(hdr->frame_control) && |
262 | !ieee80211_is_nullfunc(hdr->frame_control)) | 263 | !ieee80211_is_nullfunc(hdr->frame_control)) |
263 | /* | 264 | /* |
@@ -1394,7 +1395,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1394 | /* handlers after fragment must be aware of tx info fragmentation! */ | 1395 | /* handlers after fragment must be aware of tx info fragmentation! */ |
1395 | CALL_TXH(ieee80211_tx_h_stats); | 1396 | CALL_TXH(ieee80211_tx_h_stats); |
1396 | CALL_TXH(ieee80211_tx_h_encrypt); | 1397 | CALL_TXH(ieee80211_tx_h_encrypt); |
1397 | CALL_TXH(ieee80211_tx_h_calculate_duration); | 1398 | if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) |
1399 | CALL_TXH(ieee80211_tx_h_calculate_duration); | ||
1398 | #undef CALL_TXH | 1400 | #undef CALL_TXH |
1399 | 1401 | ||
1400 | txh_done: | 1402 | txh_done: |
@@ -1750,7 +1752,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1750 | __le16 fc; | 1752 | __le16 fc; |
1751 | struct ieee80211_hdr hdr; | 1753 | struct ieee80211_hdr hdr; |
1752 | struct ieee80211s_hdr mesh_hdr __maybe_unused; | 1754 | struct ieee80211s_hdr mesh_hdr __maybe_unused; |
1753 | struct mesh_path *mppath = NULL; | 1755 | struct mesh_path __maybe_unused *mppath = NULL; |
1754 | const u8 *encaps_data; | 1756 | const u8 *encaps_data; |
1755 | int encaps_len, skip_header_bytes; | 1757 | int encaps_len, skip_header_bytes; |
1756 | int nh_pos, h_pos; | 1758 | int nh_pos, h_pos; |
@@ -1815,19 +1817,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1815 | mppath = mpp_path_lookup(skb->data, sdata); | 1817 | mppath = mpp_path_lookup(skb->data, sdata); |
1816 | 1818 | ||
1817 | /* | 1819 | /* |
1818 | * Do not use address extension, if it is a packet from | 1820 | * Use address extension if it is a packet from |
1819 | * the same interface and the destination is not being | 1821 | * another interface or if we know the destination |
1820 | * proxied by any other mest point. | 1822 | * is being proxied by a portal (i.e. portal address |
1823 | * differs from proxied address) | ||
1821 | */ | 1824 | */ |
1822 | if (compare_ether_addr(sdata->vif.addr, | 1825 | if (compare_ether_addr(sdata->vif.addr, |
1823 | skb->data + ETH_ALEN) == 0 && | 1826 | skb->data + ETH_ALEN) == 0 && |
1824 | (!mppath || !compare_ether_addr(mppath->mpp, skb->data))) { | 1827 | !(mppath && compare_ether_addr(mppath->mpp, skb->data))) { |
1825 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, | 1828 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, |
1826 | skb->data, skb->data + ETH_ALEN); | 1829 | skb->data, skb->data + ETH_ALEN); |
1827 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, | 1830 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, |
1828 | sdata, NULL, NULL); | 1831 | sdata, NULL, NULL); |
1829 | } else { | 1832 | } else { |
1830 | /* packet from other interface */ | ||
1831 | int is_mesh_mcast = 1; | 1833 | int is_mesh_mcast = 1; |
1832 | const u8 *mesh_da; | 1834 | const u8 *mesh_da; |
1833 | 1835 | ||
@@ -2178,6 +2180,8 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss, | |||
2178 | if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) | 2180 | if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) |
2179 | aid0 = 1; | 2181 | aid0 = 1; |
2180 | 2182 | ||
2183 | bss->dtim_bc_mc = aid0 == 1; | ||
2184 | |||
2181 | if (have_bits) { | 2185 | if (have_bits) { |
2182 | /* Find largest even number N1 so that bits numbered 1 through | 2186 | /* Find largest even number N1 so that bits numbered 1 through |
2183 | * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits | 2187 | * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits |
@@ -2230,6 +2234,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, | |||
2230 | 2234 | ||
2231 | sdata = vif_to_sdata(vif); | 2235 | sdata = vif_to_sdata(vif); |
2232 | 2236 | ||
2237 | if (!ieee80211_sdata_running(sdata)) | ||
2238 | goto out; | ||
2239 | |||
2233 | if (tim_offset) | 2240 | if (tim_offset) |
2234 | *tim_offset = 0; | 2241 | *tim_offset = 0; |
2235 | if (tim_length) | 2242 | if (tim_length) |
@@ -2238,7 +2245,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, | |||
2238 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | 2245 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
2239 | ap = &sdata->u.ap; | 2246 | ap = &sdata->u.ap; |
2240 | beacon = rcu_dereference(ap->beacon); | 2247 | beacon = rcu_dereference(ap->beacon); |
2241 | if (ap && beacon) { | 2248 | if (beacon) { |
2242 | /* | 2249 | /* |
2243 | * headroom, head length, | 2250 | * headroom, head length, |
2244 | * tail length and maximum TIM length | 2251 | * tail length and maximum TIM length |
@@ -2299,6 +2306,11 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, | |||
2299 | struct ieee80211_mgmt *mgmt; | 2306 | struct ieee80211_mgmt *mgmt; |
2300 | u8 *pos; | 2307 | u8 *pos; |
2301 | 2308 | ||
2309 | #ifdef CONFIG_MAC80211_MESH | ||
2310 | if (!sdata->u.mesh.mesh_id_len) | ||
2311 | goto out; | ||
2312 | #endif | ||
2313 | |||
2302 | /* headroom, head length, tail length and maximum TIM length */ | 2314 | /* headroom, head length, tail length and maximum TIM length */ |
2303 | skb = dev_alloc_skb(local->tx_headroom + 400 + | 2315 | skb = dev_alloc_skb(local->tx_headroom + 400 + |
2304 | sdata->u.mesh.vendor_ie_len); | 2316 | sdata->u.mesh.vendor_ie_len); |
@@ -2540,7 +2552,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2540 | if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head) | 2552 | if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head) |
2541 | goto out; | 2553 | goto out; |
2542 | 2554 | ||
2543 | if (bss->dtim_count != 0) | 2555 | if (bss->dtim_count != 0 || !bss->dtim_bc_mc) |
2544 | goto out; /* send buffered bc/mc only after DTIM beacon */ | 2556 | goto out; /* send buffered bc/mc only after DTIM beacon */ |
2545 | 2557 | ||
2546 | while (1) { | 2558 | while (1) { |
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index 36305e0d06ef..6bf787a5b38a 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -924,18 +924,44 @@ static void ieee80211_work_work(struct work_struct *work) | |||
924 | } | 924 | } |
925 | 925 | ||
926 | if (!started && !local->tmp_channel) { | 926 | if (!started && !local->tmp_channel) { |
927 | /* | 927 | bool on_oper_chan; |
928 | * TODO: could optimize this by leaving the | 928 | bool tmp_chan_changed = false; |
929 | * station vifs in awake mode if they | 929 | bool on_oper_chan2; |
930 | * happen to be on the same channel as | 930 | on_oper_chan = ieee80211_cfg_on_oper_channel(local); |
931 | * the requested channel | 931 | if (local->tmp_channel) |
932 | */ | 932 | if ((local->tmp_channel != wk->chan) || |
933 | ieee80211_offchannel_stop_beaconing(local); | 933 | (local->tmp_channel_type != wk->chan_type)) |
934 | ieee80211_offchannel_stop_station(local); | 934 | tmp_chan_changed = true; |
935 | 935 | ||
936 | local->tmp_channel = wk->chan; | 936 | local->tmp_channel = wk->chan; |
937 | local->tmp_channel_type = wk->chan_type; | 937 | local->tmp_channel_type = wk->chan_type; |
938 | ieee80211_hw_config(local, 0); | 938 | /* |
939 | * Leave the station vifs in awake mode if they | ||
940 | * happen to be on the same channel as | ||
941 | * the requested channel. | ||
942 | */ | ||
943 | on_oper_chan2 = ieee80211_cfg_on_oper_channel(local); | ||
944 | if (on_oper_chan != on_oper_chan2) { | ||
945 | if (on_oper_chan2) { | ||
946 | /* going off oper channel, PS too */ | ||
947 | ieee80211_offchannel_stop_vifs(local, | ||
948 | true); | ||
949 | ieee80211_hw_config(local, 0); | ||
950 | } else { | ||
951 | /* going on channel, but leave PS | ||
952 | * off-channel. */ | ||
953 | ieee80211_hw_config(local, 0); | ||
954 | ieee80211_offchannel_return(local, | ||
955 | true, | ||
956 | false); | ||
957 | } | ||
958 | } else if (tmp_chan_changed) | ||
959 | /* Still off-channel, but on some other | ||
960 | * channel, so update hardware. | ||
961 | * PS should already be off-channel. | ||
962 | */ | ||
963 | ieee80211_hw_config(local, 0); | ||
964 | |||
939 | started = true; | 965 | started = true; |
940 | wk->timeout = jiffies; | 966 | wk->timeout = jiffies; |
941 | } | 967 | } |
@@ -1011,9 +1037,27 @@ static void ieee80211_work_work(struct work_struct *work) | |||
1011 | } | 1037 | } |
1012 | 1038 | ||
1013 | if (!remain_off_channel && local->tmp_channel) { | 1039 | if (!remain_off_channel && local->tmp_channel) { |
1040 | bool on_oper_chan = ieee80211_cfg_on_oper_channel(local); | ||
1014 | local->tmp_channel = NULL; | 1041 | local->tmp_channel = NULL; |
1015 | ieee80211_hw_config(local, 0); | 1042 | /* If tmp_channel wasn't operating channel, then |
1016 | ieee80211_offchannel_return(local, true); | 1043 | * we need to go back on-channel. |
1044 | * NOTE: If we can ever be here while scannning, | ||
1045 | * or if the hw_config() channel config logic changes, | ||
1046 | * then we may need to do a more thorough check to see if | ||
1047 | * we still need to do a hardware config. Currently, | ||
1048 | * we cannot be here while scanning, however. | ||
1049 | */ | ||
1050 | if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan) | ||
1051 | ieee80211_hw_config(local, 0); | ||
1052 | |||
1053 | /* At the least, we need to disable offchannel_ps, | ||
1054 | * so just go ahead and run the entire offchannel | ||
1055 | * return logic here. We *could* skip enabling | ||
1056 | * beaconing if we were already on-oper-channel | ||
1057 | * as a future optimization. | ||
1058 | */ | ||
1059 | ieee80211_offchannel_return(local, true, true); | ||
1060 | |||
1017 | /* give connection some time to breathe */ | 1061 | /* give connection some time to breathe */ |
1018 | run_again(local, jiffies + HZ/2); | 1062 | run_again(local, jiffies + HZ/2); |
1019 | } | 1063 | } |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index bee230d8fd11..f1765de2f4bf 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -26,13 +26,12 @@ | |||
26 | ieee80211_tx_result | 26 | ieee80211_tx_result |
27 | ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) | 27 | ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) |
28 | { | 28 | { |
29 | u8 *data, *key, *mic, key_offset; | 29 | u8 *data, *key, *mic; |
30 | size_t data_len; | 30 | size_t data_len; |
31 | unsigned int hdrlen; | 31 | unsigned int hdrlen; |
32 | struct ieee80211_hdr *hdr; | 32 | struct ieee80211_hdr *hdr; |
33 | struct sk_buff *skb = tx->skb; | 33 | struct sk_buff *skb = tx->skb; |
34 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 34 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
35 | int authenticator; | ||
36 | int tail; | 35 | int tail; |
37 | 36 | ||
38 | hdr = (struct ieee80211_hdr *)skb->data; | 37 | hdr = (struct ieee80211_hdr *)skb->data; |
@@ -47,6 +46,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) | |||
47 | data = skb->data + hdrlen; | 46 | data = skb->data + hdrlen; |
48 | data_len = skb->len - hdrlen; | 47 | data_len = skb->len - hdrlen; |
49 | 48 | ||
49 | if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) { | ||
50 | /* Need to use software crypto for the test */ | ||
51 | info->control.hw_key = NULL; | ||
52 | } | ||
53 | |||
50 | if (info->control.hw_key && | 54 | if (info->control.hw_key && |
51 | !(tx->flags & IEEE80211_TX_FRAGMENTED) && | 55 | !(tx->flags & IEEE80211_TX_FRAGMENTED) && |
52 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { | 56 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { |
@@ -62,17 +66,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) | |||
62 | skb_headroom(skb) < TKIP_IV_LEN)) | 66 | skb_headroom(skb) < TKIP_IV_LEN)) |
63 | return TX_DROP; | 67 | return TX_DROP; |
64 | 68 | ||
65 | #if 0 | 69 | key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; |
66 | authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */ | ||
67 | #else | ||
68 | authenticator = 1; | ||
69 | #endif | ||
70 | key_offset = authenticator ? | ||
71 | NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY : | ||
72 | NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY; | ||
73 | key = &tx->key->conf.key[key_offset]; | ||
74 | mic = skb_put(skb, MICHAEL_MIC_LEN); | 70 | mic = skb_put(skb, MICHAEL_MIC_LEN); |
75 | michael_mic(key, hdr, data, data_len, mic); | 71 | michael_mic(key, hdr, data, data_len, mic); |
72 | if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) | ||
73 | mic[0]++; | ||
76 | 74 | ||
77 | return TX_CONTINUE; | 75 | return TX_CONTINUE; |
78 | } | 76 | } |
@@ -81,14 +79,13 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) | |||
81 | ieee80211_rx_result | 79 | ieee80211_rx_result |
82 | ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | 80 | ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) |
83 | { | 81 | { |
84 | u8 *data, *key = NULL, key_offset; | 82 | u8 *data, *key = NULL; |
85 | size_t data_len; | 83 | size_t data_len; |
86 | unsigned int hdrlen; | 84 | unsigned int hdrlen; |
87 | u8 mic[MICHAEL_MIC_LEN]; | 85 | u8 mic[MICHAEL_MIC_LEN]; |
88 | struct sk_buff *skb = rx->skb; | 86 | struct sk_buff *skb = rx->skb; |
89 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 87 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
90 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 88 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
91 | int authenticator = 1, wpa_test = 0; | ||
92 | 89 | ||
93 | /* No way to verify the MIC if the hardware stripped it */ | 90 | /* No way to verify the MIC if the hardware stripped it */ |
94 | if (status->flag & RX_FLAG_MMIC_STRIPPED) | 91 | if (status->flag & RX_FLAG_MMIC_STRIPPED) |
@@ -106,17 +103,9 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
106 | data = skb->data + hdrlen; | 103 | data = skb->data + hdrlen; |
107 | data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; | 104 | data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; |
108 | 105 | ||
109 | #if 0 | 106 | key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; |
110 | authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */ | ||
111 | #else | ||
112 | authenticator = 1; | ||
113 | #endif | ||
114 | key_offset = authenticator ? | ||
115 | NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY : | ||
116 | NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY; | ||
117 | key = &rx->key->conf.key[key_offset]; | ||
118 | michael_mic(key, hdr, data, data_len, mic); | 107 | michael_mic(key, hdr, data, data_len, mic); |
119 | if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { | 108 | if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) { |
120 | if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) | 109 | if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) |
121 | return RX_DROP_UNUSABLE; | 110 | return RX_DROP_UNUSABLE; |
122 | 111 | ||
@@ -208,7 +197,7 @@ ieee80211_rx_result | |||
208 | ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | 197 | ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) |
209 | { | 198 | { |
210 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 199 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; |
211 | int hdrlen, res, hwaccel = 0, wpa_test = 0; | 200 | int hdrlen, res, hwaccel = 0; |
212 | struct ieee80211_key *key = rx->key; | 201 | struct ieee80211_key *key = rx->key; |
213 | struct sk_buff *skb = rx->skb; | 202 | struct sk_buff *skb = rx->skb; |
214 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 203 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
@@ -235,7 +224,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | |||
235 | hdr->addr1, hwaccel, rx->queue, | 224 | hdr->addr1, hwaccel, rx->queue, |
236 | &rx->tkip_iv32, | 225 | &rx->tkip_iv32, |
237 | &rx->tkip_iv16); | 226 | &rx->tkip_iv16); |
238 | if (res != TKIP_DECRYPT_OK || wpa_test) | 227 | if (res != TKIP_DECRYPT_OK) |
239 | return RX_DROP_UNUSABLE; | 228 | return RX_DROP_UNUSABLE; |
240 | 229 | ||
241 | /* Trim ICV */ | 230 | /* Trim ICV */ |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 746140264b2d..2b7eef37875c 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | |||
645 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); | 645 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); |
646 | u_int8_t l3proto = nfmsg->nfgen_family; | 646 | u_int8_t l3proto = nfmsg->nfgen_family; |
647 | 647 | ||
648 | rcu_read_lock(); | 648 | spin_lock_bh(&nf_conntrack_lock); |
649 | last = (struct nf_conn *)cb->args[1]; | 649 | last = (struct nf_conn *)cb->args[1]; |
650 | for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { | 650 | for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { |
651 | restart: | 651 | restart: |
652 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], | 652 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]], |
653 | hnnode) { | 653 | hnnode) { |
654 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) | 654 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) |
655 | continue; | 655 | continue; |
656 | ct = nf_ct_tuplehash_to_ctrack(h); | 656 | ct = nf_ct_tuplehash_to_ctrack(h); |
657 | if (!atomic_inc_not_zero(&ct->ct_general.use)) | ||
658 | continue; | ||
659 | /* Dump entries of a given L3 protocol number. | 657 | /* Dump entries of a given L3 protocol number. |
660 | * If it is not specified, ie. l3proto == 0, | 658 | * If it is not specified, ie. l3proto == 0, |
661 | * then dump everything. */ | 659 | * then dump everything. */ |
662 | if (l3proto && nf_ct_l3num(ct) != l3proto) | 660 | if (l3proto && nf_ct_l3num(ct) != l3proto) |
663 | goto releasect; | 661 | continue; |
664 | if (cb->args[1]) { | 662 | if (cb->args[1]) { |
665 | if (ct != last) | 663 | if (ct != last) |
666 | goto releasect; | 664 | continue; |
667 | cb->args[1] = 0; | 665 | cb->args[1] = 0; |
668 | } | 666 | } |
669 | if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, | 667 | if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, |
@@ -681,8 +679,6 @@ restart: | |||
681 | if (acct) | 679 | if (acct) |
682 | memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); | 680 | memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); |
683 | } | 681 | } |
684 | releasect: | ||
685 | nf_ct_put(ct); | ||
686 | } | 682 | } |
687 | if (cb->args[1]) { | 683 | if (cb->args[1]) { |
688 | cb->args[1] = 0; | 684 | cb->args[1] = 0; |
@@ -690,7 +686,7 @@ releasect: | |||
690 | } | 686 | } |
691 | } | 687 | } |
692 | out: | 688 | out: |
693 | rcu_read_unlock(); | 689 | spin_unlock_bh(&nf_conntrack_lock); |
694 | if (last) | 690 | if (last) |
695 | nf_ct_put(last); | 691 | nf_ct_put(last); |
696 | 692 | ||
@@ -976,7 +972,8 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
976 | free: | 972 | free: |
977 | kfree_skb(skb2); | 973 | kfree_skb(skb2); |
978 | out: | 974 | out: |
979 | return err; | 975 | /* this avoids a loop in nfnetlink. */ |
976 | return err == -EAGAIN ? -ENOBUFS : err; | ||
980 | } | 977 | } |
981 | 978 | ||
982 | #ifdef CONFIG_NF_NAT_NEEDED | 979 | #ifdef CONFIG_NF_NAT_NEEDED |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 80463507420e..c94237631077 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -1325,7 +1325,8 @@ static int __init xt_init(void) | |||
1325 | 1325 | ||
1326 | for_each_possible_cpu(i) { | 1326 | for_each_possible_cpu(i) { |
1327 | struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); | 1327 | struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); |
1328 | spin_lock_init(&lock->lock); | 1328 | |
1329 | seqlock_init(&lock->lock); | ||
1329 | lock->readers = 0; | 1330 | lock->readers = 0; |
1330 | } | 1331 | } |
1331 | 1332 | ||
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index fd95beb72f5d..1072b2c19d31 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c | |||
@@ -37,7 +37,7 @@ | |||
37 | /* Transport protocol registration */ | 37 | /* Transport protocol registration */ |
38 | static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; | 38 | static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; |
39 | 39 | ||
40 | static struct phonet_protocol *phonet_proto_get(int protocol) | 40 | static struct phonet_protocol *phonet_proto_get(unsigned int protocol) |
41 | { | 41 | { |
42 | struct phonet_protocol *pp; | 42 | struct phonet_protocol *pp; |
43 | 43 | ||
@@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = { | |||
458 | 458 | ||
459 | static DEFINE_MUTEX(proto_tab_lock); | 459 | static DEFINE_MUTEX(proto_tab_lock); |
460 | 460 | ||
461 | int __init_or_module phonet_proto_register(int protocol, | 461 | int __init_or_module phonet_proto_register(unsigned int protocol, |
462 | struct phonet_protocol *pp) | 462 | struct phonet_protocol *pp) |
463 | { | 463 | { |
464 | int err = 0; | 464 | int err = 0; |
@@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol, | |||
481 | } | 481 | } |
482 | EXPORT_SYMBOL(phonet_proto_register); | 482 | EXPORT_SYMBOL(phonet_proto_register); |
483 | 483 | ||
484 | void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) | 484 | void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp) |
485 | { | 485 | { |
486 | mutex_lock(&proto_tab_lock); | 486 | mutex_lock(&proto_tab_lock); |
487 | BUG_ON(proto_tab[protocol] != pp); | 487 | BUG_ON(proto_tab[protocol] != pp); |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 0b9bb2085ce4..74c064c0dfdd 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -808,7 +808,7 @@ static int __init af_rxrpc_init(void) | |||
808 | goto error_call_jar; | 808 | goto error_call_jar; |
809 | } | 809 | } |
810 | 810 | ||
811 | rxrpc_workqueue = create_workqueue("krxrpcd"); | 811 | rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); |
812 | if (!rxrpc_workqueue) { | 812 | if (!rxrpc_workqueue) { |
813 | printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); | 813 | printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); |
814 | goto error_work_queue; | 814 | goto error_work_queue; |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index a36270a994d7..f04d4a484d53 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -24,7 +24,7 @@ menuconfig NET_SCHED | |||
24 | To administer these schedulers, you'll need the user-level utilities | 24 | To administer these schedulers, you'll need the user-level utilities |
25 | from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>. | 25 | from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>. |
26 | That package also contains some documentation; for more, check out | 26 | That package also contains some documentation; for more, check out |
27 | <http://linux-net.osdl.org/index.php/Iproute2>. | 27 | <http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>. |
28 | 28 | ||
29 | This Quality of Service (QoS) support will enable you to use | 29 | This Quality of Service (QoS) support will enable you to use |
30 | Differentiated Services (diffserv) and Resource Reservation Protocol | 30 | Differentiated Services (diffserv) and Resource Reservation Protocol |
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 67dc7ce9b63a..83ddfc07e45d 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb, | |||
508 | 508 | ||
509 | spin_lock(&p->tcf_lock); | 509 | spin_lock(&p->tcf_lock); |
510 | p->tcf_tm.lastuse = jiffies; | 510 | p->tcf_tm.lastuse = jiffies; |
511 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); | 511 | bstats_update(&p->tcf_bstats, skb); |
512 | p->tcf_bstats.packets++; | ||
513 | action = p->tcf_action; | 512 | action = p->tcf_action; |
514 | update_flags = p->update_flags; | 513 | update_flags = p->update_flags; |
515 | spin_unlock(&p->tcf_lock); | 514 | spin_unlock(&p->tcf_lock); |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8daef9632255..c2a7c20e81c1 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, | |||
209 | spin_lock(&ipt->tcf_lock); | 209 | spin_lock(&ipt->tcf_lock); |
210 | 210 | ||
211 | ipt->tcf_tm.lastuse = jiffies; | 211 | ipt->tcf_tm.lastuse = jiffies; |
212 | ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); | 212 | bstats_update(&ipt->tcf_bstats, skb); |
213 | ipt->tcf_bstats.packets++; | ||
214 | 213 | ||
215 | /* yes, we have to worry about both in and out dev | 214 | /* yes, we have to worry about both in and out dev |
216 | worry later - danger - this API seems to have changed | 215 | worry later - danger - this API seems to have changed |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 0c311be92827..d765067e99db 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, | |||
165 | 165 | ||
166 | spin_lock(&m->tcf_lock); | 166 | spin_lock(&m->tcf_lock); |
167 | m->tcf_tm.lastuse = jiffies; | 167 | m->tcf_tm.lastuse = jiffies; |
168 | m->tcf_bstats.bytes += qdisc_pkt_len(skb); | 168 | bstats_update(&m->tcf_bstats, skb); |
169 | m->tcf_bstats.packets++; | ||
170 | 169 | ||
171 | dev = m->tcfm_dev; | 170 | dev = m->tcfm_dev; |
172 | if (!dev) { | 171 | if (!dev) { |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 186eb837e600..178a4bd7b7cb 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
125 | egress = p->flags & TCA_NAT_FLAG_EGRESS; | 125 | egress = p->flags & TCA_NAT_FLAG_EGRESS; |
126 | action = p->tcf_action; | 126 | action = p->tcf_action; |
127 | 127 | ||
128 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); | 128 | bstats_update(&p->tcf_bstats, skb); |
129 | p->tcf_bstats.packets++; | ||
130 | 129 | ||
131 | spin_unlock(&p->tcf_lock); | 130 | spin_unlock(&p->tcf_lock); |
132 | 131 | ||
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index a0593c9640db..445bef716f77 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
187 | bad: | 187 | bad: |
188 | p->tcf_qstats.overlimits++; | 188 | p->tcf_qstats.overlimits++; |
189 | done: | 189 | done: |
190 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); | 190 | bstats_update(&p->tcf_bstats, skb); |
191 | p->tcf_bstats.packets++; | ||
192 | spin_unlock(&p->tcf_lock); | 191 | spin_unlock(&p->tcf_lock); |
193 | return p->tcf_action; | 192 | return p->tcf_action; |
194 | } | 193 | } |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 7ebf7439b478..e2f08b1e2e58 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | |||
298 | 298 | ||
299 | spin_lock(&police->tcf_lock); | 299 | spin_lock(&police->tcf_lock); |
300 | 300 | ||
301 | police->tcf_bstats.bytes += qdisc_pkt_len(skb); | 301 | bstats_update(&police->tcf_bstats, skb); |
302 | police->tcf_bstats.packets++; | ||
303 | 302 | ||
304 | if (police->tcfp_ewma_rate && | 303 | if (police->tcfp_ewma_rate && |
305 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { | 304 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 97e84f3ee775..7287cff7af3e 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
42 | 42 | ||
43 | spin_lock(&d->tcf_lock); | 43 | spin_lock(&d->tcf_lock); |
44 | d->tcf_tm.lastuse = jiffies; | 44 | d->tcf_tm.lastuse = jiffies; |
45 | d->tcf_bstats.bytes += qdisc_pkt_len(skb); | 45 | bstats_update(&d->tcf_bstats, skb); |
46 | d->tcf_bstats.packets++; | ||
47 | 46 | ||
48 | /* print policy string followed by _ then packet count | 47 | /* print policy string followed by _ then packet count |
49 | * Example if this was the 3rd packet and the string was "hello" | 48 | * Example if this was the 3rd packet and the string was "hello" |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 66cbf4eb8855..836f5fee9e58 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, | |||
46 | 46 | ||
47 | spin_lock(&d->tcf_lock); | 47 | spin_lock(&d->tcf_lock); |
48 | d->tcf_tm.lastuse = jiffies; | 48 | d->tcf_tm.lastuse = jiffies; |
49 | d->tcf_bstats.bytes += qdisc_pkt_len(skb); | 49 | bstats_update(&d->tcf_bstats, skb); |
50 | d->tcf_bstats.packets++; | ||
51 | 50 | ||
52 | if (d->flags & SKBEDIT_F_PRIORITY) | 51 | if (d->flags & SKBEDIT_F_PRIORITY) |
53 | skb->priority = d->priority; | 52 | skb->priority = d->priority; |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 282540778aa8..943d733409d0 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -422,10 +422,8 @@ drop: __maybe_unused | |||
422 | } | 422 | } |
423 | return ret; | 423 | return ret; |
424 | } | 424 | } |
425 | sch->bstats.bytes += qdisc_pkt_len(skb); | 425 | qdisc_bstats_update(sch, skb); |
426 | sch->bstats.packets++; | 426 | bstats_update(&flow->bstats, skb); |
427 | flow->bstats.bytes += qdisc_pkt_len(skb); | ||
428 | flow->bstats.packets++; | ||
429 | /* | 427 | /* |
430 | * Okay, this may seem weird. We pretend we've dropped the packet if | 428 | * Okay, this may seem weird. We pretend we've dropped the packet if |
431 | * it goes via ATM. The reason for this is that the outer qdisc | 429 | * it goes via ATM. The reason for this is that the outer qdisc |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index eb7631590865..c80d1c210c5d 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
390 | ret = qdisc_enqueue(skb, cl->q); | 390 | ret = qdisc_enqueue(skb, cl->q); |
391 | if (ret == NET_XMIT_SUCCESS) { | 391 | if (ret == NET_XMIT_SUCCESS) { |
392 | sch->q.qlen++; | 392 | sch->q.qlen++; |
393 | sch->bstats.packets++; | 393 | qdisc_bstats_update(sch, skb); |
394 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
395 | cbq_mark_toplevel(q, cl); | 394 | cbq_mark_toplevel(q, cl); |
396 | if (!cl->next_alive) | 395 | if (!cl->next_alive) |
397 | cbq_activate_class(cl); | 396 | cbq_activate_class(cl); |
@@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | |||
650 | ret = qdisc_enqueue(skb, cl->q); | 649 | ret = qdisc_enqueue(skb, cl->q); |
651 | if (ret == NET_XMIT_SUCCESS) { | 650 | if (ret == NET_XMIT_SUCCESS) { |
652 | sch->q.qlen++; | 651 | sch->q.qlen++; |
653 | sch->bstats.packets++; | 652 | qdisc_bstats_update(sch, skb); |
654 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
655 | if (!cl->next_alive) | 653 | if (!cl->next_alive) |
656 | cbq_activate_class(cl); | 654 | cbq_activate_class(cl); |
657 | return 0; | 655 | return 0; |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index aa8b5313f8cf..de55e642eafc 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
351 | { | 351 | { |
352 | struct drr_sched *q = qdisc_priv(sch); | 352 | struct drr_sched *q = qdisc_priv(sch); |
353 | struct drr_class *cl; | 353 | struct drr_class *cl; |
354 | unsigned int len; | ||
355 | int err; | 354 | int err; |
356 | 355 | ||
357 | cl = drr_classify(skb, sch, &err); | 356 | cl = drr_classify(skb, sch, &err); |
@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
362 | return err; | 361 | return err; |
363 | } | 362 | } |
364 | 363 | ||
365 | len = qdisc_pkt_len(skb); | ||
366 | err = qdisc_enqueue(skb, cl->qdisc); | 364 | err = qdisc_enqueue(skb, cl->qdisc); |
367 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 365 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
368 | if (net_xmit_drop_count(err)) { | 366 | if (net_xmit_drop_count(err)) { |
@@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
377 | cl->deficit = cl->quantum; | 375 | cl->deficit = cl->quantum; |
378 | } | 376 | } |
379 | 377 | ||
380 | cl->bstats.packets++; | 378 | bstats_update(&cl->bstats, skb); |
381 | cl->bstats.bytes += len; | 379 | qdisc_bstats_update(sch, skb); |
382 | sch->bstats.packets++; | ||
383 | sch->bstats.bytes += len; | ||
384 | 380 | ||
385 | sch->q.qlen++; | 381 | sch->q.qlen++; |
386 | return err; | 382 | return err; |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 1d295d62bb5c..60f4bdd4408e 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
260 | return err; | 260 | return err; |
261 | } | 261 | } |
262 | 262 | ||
263 | sch->bstats.bytes += qdisc_pkt_len(skb); | 263 | qdisc_bstats_update(sch, skb); |
264 | sch->bstats.packets++; | ||
265 | sch->q.qlen++; | 264 | sch->q.qlen++; |
266 | 265 | ||
267 | return NET_XMIT_SUCCESS; | 266 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 069c62b7bb36..2e45791d4f6c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1599 | if (cl->qdisc->q.qlen == 1) | 1599 | if (cl->qdisc->q.qlen == 1) |
1600 | set_active(cl, qdisc_pkt_len(skb)); | 1600 | set_active(cl, qdisc_pkt_len(skb)); |
1601 | 1601 | ||
1602 | cl->bstats.packets++; | 1602 | bstats_update(&cl->bstats, skb); |
1603 | cl->bstats.bytes += qdisc_pkt_len(skb); | 1603 | qdisc_bstats_update(sch, skb); |
1604 | sch->bstats.packets++; | ||
1605 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
1606 | sch->q.qlen++; | 1604 | sch->q.qlen++; |
1607 | 1605 | ||
1608 | return NET_XMIT_SUCCESS; | 1606 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 01b519d6c52d..984c1b0c6836 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
569 | } | 569 | } |
570 | return ret; | 570 | return ret; |
571 | } else { | 571 | } else { |
572 | cl->bstats.packets += | 572 | bstats_update(&cl->bstats, skb); |
573 | skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | ||
574 | cl->bstats.bytes += qdisc_pkt_len(skb); | ||
575 | htb_activate(q, cl); | 573 | htb_activate(q, cl); |
576 | } | 574 | } |
577 | 575 | ||
578 | sch->q.qlen++; | 576 | sch->q.qlen++; |
579 | sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | 577 | qdisc_bstats_update(sch, skb); |
580 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
581 | return NET_XMIT_SUCCESS; | 578 | return NET_XMIT_SUCCESS; |
582 | } | 579 | } |
583 | 580 | ||
@@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
648 | htb_add_to_wait_tree(q, cl, diff); | 645 | htb_add_to_wait_tree(q, cl, diff); |
649 | } | 646 | } |
650 | 647 | ||
651 | /* update byte stats except for leaves which are already updated */ | 648 | /* update basic stats except for leaves which are already updated */ |
652 | if (cl->level) { | 649 | if (cl->level) |
653 | cl->bstats.bytes += bytes; | 650 | bstats_update(&cl->bstats, skb); |
654 | cl->bstats.packets += skb_is_gso(skb)? | 651 | |
655 | skb_shinfo(skb)->gso_segs:1; | ||
656 | } | ||
657 | cl = cl->parent; | 652 | cl = cl->parent; |
658 | } | 653 | } |
659 | } | 654 | } |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index f10e34a68445..bce1665239b8 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
63 | 63 | ||
64 | result = tc_classify(skb, p->filter_list, &res); | 64 | result = tc_classify(skb, p->filter_list, &res); |
65 | 65 | ||
66 | sch->bstats.packets++; | 66 | qdisc_bstats_update(sch, skb); |
67 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
68 | switch (result) { | 67 | switch (result) { |
69 | case TC_ACT_SHOT: | 68 | case TC_ACT_SHOT: |
70 | result = TC_ACT_SHOT; | 69 | result = TC_ACT_SHOT; |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 32690deab5d0..21f13da24763 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
83 | 83 | ||
84 | ret = qdisc_enqueue(skb, qdisc); | 84 | ret = qdisc_enqueue(skb, qdisc); |
85 | if (ret == NET_XMIT_SUCCESS) { | 85 | if (ret == NET_XMIT_SUCCESS) { |
86 | sch->bstats.bytes += qdisc_pkt_len(skb); | 86 | qdisc_bstats_update(sch, skb); |
87 | sch->bstats.packets++; | ||
88 | sch->q.qlen++; | 87 | sch->q.qlen++; |
89 | return NET_XMIT_SUCCESS; | 88 | return NET_XMIT_SUCCESS; |
90 | } | 89 | } |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e5593c083a78..1c4bce863479 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
240 | 240 | ||
241 | if (likely(ret == NET_XMIT_SUCCESS)) { | 241 | if (likely(ret == NET_XMIT_SUCCESS)) { |
242 | sch->q.qlen++; | 242 | sch->q.qlen++; |
243 | sch->bstats.bytes += qdisc_pkt_len(skb); | 243 | qdisc_bstats_update(sch, skb); |
244 | sch->bstats.packets++; | ||
245 | } else if (net_xmit_drop_count(ret)) { | 244 | } else if (net_xmit_drop_count(ret)) { |
246 | sch->qstats.drops++; | 245 | sch->qstats.drops++; |
247 | } | 246 | } |
@@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | |||
477 | __skb_queue_after(list, skb, nskb); | 476 | __skb_queue_after(list, skb, nskb); |
478 | 477 | ||
479 | sch->qstats.backlog += qdisc_pkt_len(nskb); | 478 | sch->qstats.backlog += qdisc_pkt_len(nskb); |
480 | sch->bstats.bytes += qdisc_pkt_len(nskb); | 479 | qdisc_bstats_update(sch, nskb); |
481 | sch->bstats.packets++; | ||
482 | 480 | ||
483 | return NET_XMIT_SUCCESS; | 481 | return NET_XMIT_SUCCESS; |
484 | } | 482 | } |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index b1c95bce33ce..966158d49dd1 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
84 | 84 | ||
85 | ret = qdisc_enqueue(skb, qdisc); | 85 | ret = qdisc_enqueue(skb, qdisc); |
86 | if (ret == NET_XMIT_SUCCESS) { | 86 | if (ret == NET_XMIT_SUCCESS) { |
87 | sch->bstats.bytes += qdisc_pkt_len(skb); | 87 | qdisc_bstats_update(sch, skb); |
88 | sch->bstats.packets++; | ||
89 | sch->q.qlen++; | 88 | sch->q.qlen++; |
90 | return NET_XMIT_SUCCESS; | 89 | return NET_XMIT_SUCCESS; |
91 | } | 90 | } |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a67ba3c5a0cc..a6009c5a2c97 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
94 | 94 | ||
95 | ret = qdisc_enqueue(skb, child); | 95 | ret = qdisc_enqueue(skb, child); |
96 | if (likely(ret == NET_XMIT_SUCCESS)) { | 96 | if (likely(ret == NET_XMIT_SUCCESS)) { |
97 | sch->bstats.bytes += qdisc_pkt_len(skb); | 97 | qdisc_bstats_update(sch, skb); |
98 | sch->bstats.packets++; | ||
99 | sch->q.qlen++; | 98 | sch->q.qlen++; |
100 | } else if (net_xmit_drop_count(ret)) { | 99 | } else if (net_xmit_drop_count(ret)) { |
101 | q->stats.pdrop++; | 100 | q->stats.pdrop++; |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index d54ac94066c2..239ec53a634d 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
403 | slot->allot = q->scaled_quantum; | 403 | slot->allot = q->scaled_quantum; |
404 | } | 404 | } |
405 | if (++sch->q.qlen <= q->limit) { | 405 | if (++sch->q.qlen <= q->limit) { |
406 | sch->bstats.bytes += qdisc_pkt_len(skb); | 406 | qdisc_bstats_update(sch, skb); |
407 | sch->bstats.packets++; | ||
408 | return NET_XMIT_SUCCESS; | 407 | return NET_XMIT_SUCCESS; |
409 | } | 408 | } |
410 | 409 | ||
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 641a30d64635..77565e721811 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | sch->q.qlen++; | 136 | sch->q.qlen++; |
137 | sch->bstats.bytes += qdisc_pkt_len(skb); | 137 | qdisc_bstats_update(sch, skb); |
138 | sch->bstats.packets++; | ||
139 | return NET_XMIT_SUCCESS; | 138 | return NET_XMIT_SUCCESS; |
140 | } | 139 | } |
141 | 140 | ||
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 106479a7c94a..84ce48eadff4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -59,6 +59,10 @@ struct teql_master | |||
59 | struct net_device *dev; | 59 | struct net_device *dev; |
60 | struct Qdisc *slaves; | 60 | struct Qdisc *slaves; |
61 | struct list_head master_list; | 61 | struct list_head master_list; |
62 | unsigned long tx_bytes; | ||
63 | unsigned long tx_packets; | ||
64 | unsigned long tx_errors; | ||
65 | unsigned long tx_dropped; | ||
62 | }; | 66 | }; |
63 | 67 | ||
64 | struct teql_sched_data | 68 | struct teql_sched_data |
@@ -83,8 +87,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
83 | 87 | ||
84 | if (q->q.qlen < dev->tx_queue_len) { | 88 | if (q->q.qlen < dev->tx_queue_len) { |
85 | __skb_queue_tail(&q->q, skb); | 89 | __skb_queue_tail(&q->q, skb); |
86 | sch->bstats.bytes += qdisc_pkt_len(skb); | 90 | qdisc_bstats_update(sch, skb); |
87 | sch->bstats.packets++; | ||
88 | return NET_XMIT_SUCCESS; | 91 | return NET_XMIT_SUCCESS; |
89 | } | 92 | } |
90 | 93 | ||
@@ -275,7 +278,6 @@ static inline int teql_resolve(struct sk_buff *skb, | |||
275 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 278 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) |
276 | { | 279 | { |
277 | struct teql_master *master = netdev_priv(dev); | 280 | struct teql_master *master = netdev_priv(dev); |
278 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
279 | struct Qdisc *start, *q; | 281 | struct Qdisc *start, *q; |
280 | int busy; | 282 | int busy; |
281 | int nores; | 283 | int nores; |
@@ -315,8 +317,8 @@ restart: | |||
315 | __netif_tx_unlock(slave_txq); | 317 | __netif_tx_unlock(slave_txq); |
316 | master->slaves = NEXT_SLAVE(q); | 318 | master->slaves = NEXT_SLAVE(q); |
317 | netif_wake_queue(dev); | 319 | netif_wake_queue(dev); |
318 | txq->tx_packets++; | 320 | master->tx_packets++; |
319 | txq->tx_bytes += length; | 321 | master->tx_bytes += length; |
320 | return NETDEV_TX_OK; | 322 | return NETDEV_TX_OK; |
321 | } | 323 | } |
322 | __netif_tx_unlock(slave_txq); | 324 | __netif_tx_unlock(slave_txq); |
@@ -343,10 +345,10 @@ restart: | |||
343 | netif_stop_queue(dev); | 345 | netif_stop_queue(dev); |
344 | return NETDEV_TX_BUSY; | 346 | return NETDEV_TX_BUSY; |
345 | } | 347 | } |
346 | dev->stats.tx_errors++; | 348 | master->tx_errors++; |
347 | 349 | ||
348 | drop: | 350 | drop: |
349 | txq->tx_dropped++; | 351 | master->tx_dropped++; |
350 | dev_kfree_skb(skb); | 352 | dev_kfree_skb(skb); |
351 | return NETDEV_TX_OK; | 353 | return NETDEV_TX_OK; |
352 | } | 354 | } |
@@ -399,6 +401,18 @@ static int teql_master_close(struct net_device *dev) | |||
399 | return 0; | 401 | return 0; |
400 | } | 402 | } |
401 | 403 | ||
404 | static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev, | ||
405 | struct rtnl_link_stats64 *stats) | ||
406 | { | ||
407 | struct teql_master *m = netdev_priv(dev); | ||
408 | |||
409 | stats->tx_packets = m->tx_packets; | ||
410 | stats->tx_bytes = m->tx_bytes; | ||
411 | stats->tx_errors = m->tx_errors; | ||
412 | stats->tx_dropped = m->tx_dropped; | ||
413 | return stats; | ||
414 | } | ||
415 | |||
402 | static int teql_master_mtu(struct net_device *dev, int new_mtu) | 416 | static int teql_master_mtu(struct net_device *dev, int new_mtu) |
403 | { | 417 | { |
404 | struct teql_master *m = netdev_priv(dev); | 418 | struct teql_master *m = netdev_priv(dev); |
@@ -423,6 +437,7 @@ static const struct net_device_ops teql_netdev_ops = { | |||
423 | .ndo_open = teql_master_open, | 437 | .ndo_open = teql_master_open, |
424 | .ndo_stop = teql_master_close, | 438 | .ndo_stop = teql_master_close, |
425 | .ndo_start_xmit = teql_master_xmit, | 439 | .ndo_start_xmit = teql_master_xmit, |
440 | .ndo_get_stats64 = teql_master_stats64, | ||
426 | .ndo_change_mtu = teql_master_mtu, | 441 | .ndo_change_mtu = teql_master_mtu, |
427 | }; | 442 | }; |
428 | 443 | ||
diff --git a/net/socket.c b/net/socket.c index ccc576a6a508..ac2219f90d5d 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -306,20 +306,6 @@ static const struct super_operations sockfs_ops = { | |||
306 | .statfs = simple_statfs, | 306 | .statfs = simple_statfs, |
307 | }; | 307 | }; |
308 | 308 | ||
309 | static struct dentry *sockfs_mount(struct file_system_type *fs_type, | ||
310 | int flags, const char *dev_name, void *data) | ||
311 | { | ||
312 | return mount_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC); | ||
313 | } | ||
314 | |||
315 | static struct vfsmount *sock_mnt __read_mostly; | ||
316 | |||
317 | static struct file_system_type sock_fs_type = { | ||
318 | .name = "sockfs", | ||
319 | .mount = sockfs_mount, | ||
320 | .kill_sb = kill_anon_super, | ||
321 | }; | ||
322 | |||
323 | /* | 309 | /* |
324 | * sockfs_dname() is called from d_path(). | 310 | * sockfs_dname() is called from d_path(). |
325 | */ | 311 | */ |
@@ -333,6 +319,21 @@ static const struct dentry_operations sockfs_dentry_operations = { | |||
333 | .d_dname = sockfs_dname, | 319 | .d_dname = sockfs_dname, |
334 | }; | 320 | }; |
335 | 321 | ||
322 | static struct dentry *sockfs_mount(struct file_system_type *fs_type, | ||
323 | int flags, const char *dev_name, void *data) | ||
324 | { | ||
325 | return mount_pseudo(fs_type, "socket:", &sockfs_ops, | ||
326 | &sockfs_dentry_operations, SOCKFS_MAGIC); | ||
327 | } | ||
328 | |||
329 | static struct vfsmount *sock_mnt __read_mostly; | ||
330 | |||
331 | static struct file_system_type sock_fs_type = { | ||
332 | .name = "sockfs", | ||
333 | .mount = sockfs_mount, | ||
334 | .kill_sb = kill_anon_super, | ||
335 | }; | ||
336 | |||
336 | /* | 337 | /* |
337 | * Obtains the first available file descriptor and sets it up for use. | 338 | * Obtains the first available file descriptor and sets it up for use. |
338 | * | 339 | * |
@@ -368,7 +369,6 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags) | |||
368 | } | 369 | } |
369 | path.mnt = mntget(sock_mnt); | 370 | path.mnt = mntget(sock_mnt); |
370 | 371 | ||
371 | d_set_d_op(path.dentry, &sockfs_dentry_operations); | ||
372 | d_instantiate(path.dentry, SOCK_INODE(sock)); | 372 | d_instantiate(path.dentry, SOCK_INODE(sock)); |
373 | SOCK_INODE(sock)->i_fop = &socket_file_ops; | 373 | SOCK_INODE(sock)->i_fop = &socket_file_ops; |
374 | 374 | ||
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index afe67849269f..67e31276682a 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -563,8 +563,17 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p) | |||
563 | return cred->cr_ops->crvalidate(task, p); | 563 | return cred->cr_ops->crvalidate(task, p); |
564 | } | 564 | } |
565 | 565 | ||
566 | static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, | ||
567 | __be32 *data, void *obj) | ||
568 | { | ||
569 | struct xdr_stream xdr; | ||
570 | |||
571 | xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data); | ||
572 | encode(rqstp, &xdr, obj); | ||
573 | } | ||
574 | |||
566 | int | 575 | int |
567 | rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, | 576 | rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, |
568 | __be32 *data, void *obj) | 577 | __be32 *data, void *obj) |
569 | { | 578 | { |
570 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; | 579 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
@@ -574,11 +583,22 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, | |||
574 | if (cred->cr_ops->crwrap_req) | 583 | if (cred->cr_ops->crwrap_req) |
575 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); | 584 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); |
576 | /* By default, we encode the arguments normally. */ | 585 | /* By default, we encode the arguments normally. */ |
577 | return encode(rqstp, data, obj); | 586 | rpcauth_wrap_req_encode(encode, rqstp, data, obj); |
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static int | ||
591 | rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, | ||
592 | __be32 *data, void *obj) | ||
593 | { | ||
594 | struct xdr_stream xdr; | ||
595 | |||
596 | xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data); | ||
597 | return decode(rqstp, &xdr, obj); | ||
578 | } | 598 | } |
579 | 599 | ||
580 | int | 600 | int |
581 | rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | 601 | rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, |
582 | __be32 *data, void *obj) | 602 | __be32 *data, void *obj) |
583 | { | 603 | { |
584 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; | 604 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
@@ -589,7 +609,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | |||
589 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, | 609 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, |
590 | data, obj); | 610 | data, obj); |
591 | /* By default, we decode the arguments normally. */ | 611 | /* By default, we decode the arguments normally. */ |
592 | return decode(rqstp, data, obj); | 612 | return rpcauth_unwrap_req_decode(decode, rqstp, data, obj); |
593 | } | 613 | } |
594 | 614 | ||
595 | int | 615 | int |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 3835ce35e224..45dbf1521b9a 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -1231,9 +1231,19 @@ out_bad: | |||
1231 | return NULL; | 1231 | return NULL; |
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, | ||
1235 | __be32 *p, void *obj) | ||
1236 | { | ||
1237 | struct xdr_stream xdr; | ||
1238 | |||
1239 | xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p); | ||
1240 | encode(rqstp, &xdr, obj); | ||
1241 | } | ||
1242 | |||
1234 | static inline int | 1243 | static inline int |
1235 | gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | 1244 | gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, |
1236 | kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) | 1245 | kxdreproc_t encode, struct rpc_rqst *rqstp, |
1246 | __be32 *p, void *obj) | ||
1237 | { | 1247 | { |
1238 | struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; | 1248 | struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; |
1239 | struct xdr_buf integ_buf; | 1249 | struct xdr_buf integ_buf; |
@@ -1249,9 +1259,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1249 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; | 1259 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; |
1250 | *p++ = htonl(rqstp->rq_seqno); | 1260 | *p++ = htonl(rqstp->rq_seqno); |
1251 | 1261 | ||
1252 | status = encode(rqstp, p, obj); | 1262 | gss_wrap_req_encode(encode, rqstp, p, obj); |
1253 | if (status) | ||
1254 | return status; | ||
1255 | 1263 | ||
1256 | if (xdr_buf_subsegment(snd_buf, &integ_buf, | 1264 | if (xdr_buf_subsegment(snd_buf, &integ_buf, |
1257 | offset, snd_buf->len - offset)) | 1265 | offset, snd_buf->len - offset)) |
@@ -1325,7 +1333,8 @@ out: | |||
1325 | 1333 | ||
1326 | static inline int | 1334 | static inline int |
1327 | gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | 1335 | gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, |
1328 | kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) | 1336 | kxdreproc_t encode, struct rpc_rqst *rqstp, |
1337 | __be32 *p, void *obj) | ||
1329 | { | 1338 | { |
1330 | struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; | 1339 | struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; |
1331 | u32 offset; | 1340 | u32 offset; |
@@ -1342,9 +1351,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1342 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; | 1351 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; |
1343 | *p++ = htonl(rqstp->rq_seqno); | 1352 | *p++ = htonl(rqstp->rq_seqno); |
1344 | 1353 | ||
1345 | status = encode(rqstp, p, obj); | 1354 | gss_wrap_req_encode(encode, rqstp, p, obj); |
1346 | if (status) | ||
1347 | return status; | ||
1348 | 1355 | ||
1349 | status = alloc_enc_pages(rqstp); | 1356 | status = alloc_enc_pages(rqstp); |
1350 | if (status) | 1357 | if (status) |
@@ -1394,7 +1401,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1394 | 1401 | ||
1395 | static int | 1402 | static int |
1396 | gss_wrap_req(struct rpc_task *task, | 1403 | gss_wrap_req(struct rpc_task *task, |
1397 | kxdrproc_t encode, void *rqstp, __be32 *p, void *obj) | 1404 | kxdreproc_t encode, void *rqstp, __be32 *p, void *obj) |
1398 | { | 1405 | { |
1399 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; | 1406 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
1400 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, | 1407 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, |
@@ -1407,12 +1414,14 @@ gss_wrap_req(struct rpc_task *task, | |||
1407 | /* The spec seems a little ambiguous here, but I think that not | 1414 | /* The spec seems a little ambiguous here, but I think that not |
1408 | * wrapping context destruction requests makes the most sense. | 1415 | * wrapping context destruction requests makes the most sense. |
1409 | */ | 1416 | */ |
1410 | status = encode(rqstp, p, obj); | 1417 | gss_wrap_req_encode(encode, rqstp, p, obj); |
1418 | status = 0; | ||
1411 | goto out; | 1419 | goto out; |
1412 | } | 1420 | } |
1413 | switch (gss_cred->gc_service) { | 1421 | switch (gss_cred->gc_service) { |
1414 | case RPC_GSS_SVC_NONE: | 1422 | case RPC_GSS_SVC_NONE: |
1415 | status = encode(rqstp, p, obj); | 1423 | gss_wrap_req_encode(encode, rqstp, p, obj); |
1424 | status = 0; | ||
1416 | break; | 1425 | break; |
1417 | case RPC_GSS_SVC_INTEGRITY: | 1426 | case RPC_GSS_SVC_INTEGRITY: |
1418 | status = gss_wrap_req_integ(cred, ctx, encode, | 1427 | status = gss_wrap_req_integ(cred, ctx, encode, |
@@ -1494,10 +1503,19 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1494 | return 0; | 1503 | return 0; |
1495 | } | 1504 | } |
1496 | 1505 | ||
1506 | static int | ||
1507 | gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, | ||
1508 | __be32 *p, void *obj) | ||
1509 | { | ||
1510 | struct xdr_stream xdr; | ||
1511 | |||
1512 | xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); | ||
1513 | return decode(rqstp, &xdr, obj); | ||
1514 | } | ||
1497 | 1515 | ||
1498 | static int | 1516 | static int |
1499 | gss_unwrap_resp(struct rpc_task *task, | 1517 | gss_unwrap_resp(struct rpc_task *task, |
1500 | kxdrproc_t decode, void *rqstp, __be32 *p, void *obj) | 1518 | kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj) |
1501 | { | 1519 | { |
1502 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; | 1520 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
1503 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, | 1521 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, |
@@ -1528,7 +1546,7 @@ gss_unwrap_resp(struct rpc_task *task, | |||
1528 | cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) | 1546 | cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) |
1529 | + (savedlen - head->iov_len); | 1547 | + (savedlen - head->iov_len); |
1530 | out_decode: | 1548 | out_decode: |
1531 | status = decode(rqstp, p, obj); | 1549 | status = gss_unwrap_req_decode(decode, rqstp, p, obj); |
1532 | out: | 1550 | out: |
1533 | gss_put_ctx(ctx); | 1551 | gss_put_ctx(ctx); |
1534 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, | 1552 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, |
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 75ee993ea057..9576f35ab701 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
@@ -137,7 +137,7 @@ arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) | |||
137 | ms_usage = 13; | 137 | ms_usage = 13; |
138 | break; | 138 | break; |
139 | default: | 139 | default: |
140 | return EINVAL;; | 140 | return -EINVAL; |
141 | } | 141 | } |
142 | salt[0] = (ms_usage >> 0) & 0xff; | 142 | salt[0] = (ms_usage >> 0) & 0xff; |
143 | salt[1] = (ms_usage >> 8) & 0xff; | 143 | salt[1] = (ms_usage >> 8) & 0xff; |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index dec2a6fc7c12..bcdae78fdfc6 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -67,7 +67,6 @@ static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b) | |||
67 | 67 | ||
68 | #define RSI_HASHBITS 6 | 68 | #define RSI_HASHBITS 6 |
69 | #define RSI_HASHMAX (1<<RSI_HASHBITS) | 69 | #define RSI_HASHMAX (1<<RSI_HASHBITS) |
70 | #define RSI_HASHMASK (RSI_HASHMAX-1) | ||
71 | 70 | ||
72 | struct rsi { | 71 | struct rsi { |
73 | struct cache_head h; | 72 | struct cache_head h; |
@@ -319,7 +318,6 @@ static struct rsi *rsi_update(struct rsi *new, struct rsi *old) | |||
319 | 318 | ||
320 | #define RSC_HASHBITS 10 | 319 | #define RSC_HASHBITS 10 |
321 | #define RSC_HASHMAX (1<<RSC_HASHBITS) | 320 | #define RSC_HASHMAX (1<<RSC_HASHBITS) |
322 | #define RSC_HASHMASK (RSC_HASHMAX-1) | ||
323 | 321 | ||
324 | #define GSS_SEQ_WIN 128 | 322 | #define GSS_SEQ_WIN 128 |
325 | 323 | ||
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c index 7dcfe0cc3500..1dd1a6890007 100644 --- a/net/sunrpc/bc_svc.c +++ b/net/sunrpc/bc_svc.c | |||
@@ -59,8 +59,8 @@ int bc_send(struct rpc_rqst *req) | |||
59 | ret = task->tk_status; | 59 | ret = task->tk_status; |
60 | rpc_put_task(task); | 60 | rpc_put_task(task); |
61 | } | 61 | } |
62 | return ret; | ||
63 | dprintk("RPC: bc_send ret= %d\n", ret); | 62 | dprintk("RPC: bc_send ret= %d\n", ret); |
63 | return ret; | ||
64 | } | 64 | } |
65 | 65 | ||
66 | #endif /* CONFIG_NFS_V4_1 */ | 66 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index e433e7580e27..72ad836e4fe0 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -37,7 +37,7 @@ | |||
37 | 37 | ||
38 | #define RPCDBG_FACILITY RPCDBG_CACHE | 38 | #define RPCDBG_FACILITY RPCDBG_CACHE |
39 | 39 | ||
40 | static void cache_defer_req(struct cache_req *req, struct cache_head *item); | 40 | static bool cache_defer_req(struct cache_req *req, struct cache_head *item); |
41 | static void cache_revisit_request(struct cache_head *item); | 41 | static void cache_revisit_request(struct cache_head *item); |
42 | 42 | ||
43 | static void cache_init(struct cache_head *h) | 43 | static void cache_init(struct cache_head *h) |
@@ -128,6 +128,7 @@ static void cache_fresh_locked(struct cache_head *head, time_t expiry) | |||
128 | { | 128 | { |
129 | head->expiry_time = expiry; | 129 | head->expiry_time = expiry; |
130 | head->last_refresh = seconds_since_boot(); | 130 | head->last_refresh = seconds_since_boot(); |
131 | smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ | ||
131 | set_bit(CACHE_VALID, &head->flags); | 132 | set_bit(CACHE_VALID, &head->flags); |
132 | } | 133 | } |
133 | 134 | ||
@@ -208,11 +209,36 @@ static inline int cache_is_valid(struct cache_detail *detail, struct cache_head | |||
208 | /* entry is valid */ | 209 | /* entry is valid */ |
209 | if (test_bit(CACHE_NEGATIVE, &h->flags)) | 210 | if (test_bit(CACHE_NEGATIVE, &h->flags)) |
210 | return -ENOENT; | 211 | return -ENOENT; |
211 | else | 212 | else { |
213 | /* | ||
214 | * In combination with write barrier in | ||
215 | * sunrpc_cache_update, ensures that anyone | ||
216 | * using the cache entry after this sees the | ||
217 | * updated contents: | ||
218 | */ | ||
219 | smp_rmb(); | ||
212 | return 0; | 220 | return 0; |
221 | } | ||
213 | } | 222 | } |
214 | } | 223 | } |
215 | 224 | ||
225 | static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) | ||
226 | { | ||
227 | int rv; | ||
228 | |||
229 | write_lock(&detail->hash_lock); | ||
230 | rv = cache_is_valid(detail, h); | ||
231 | if (rv != -EAGAIN) { | ||
232 | write_unlock(&detail->hash_lock); | ||
233 | return rv; | ||
234 | } | ||
235 | set_bit(CACHE_NEGATIVE, &h->flags); | ||
236 | cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); | ||
237 | write_unlock(&detail->hash_lock); | ||
238 | cache_fresh_unlocked(h, detail); | ||
239 | return -ENOENT; | ||
240 | } | ||
241 | |||
216 | /* | 242 | /* |
217 | * This is the generic cache management routine for all | 243 | * This is the generic cache management routine for all |
218 | * the authentication caches. | 244 | * the authentication caches. |
@@ -251,14 +277,8 @@ int cache_check(struct cache_detail *detail, | |||
251 | case -EINVAL: | 277 | case -EINVAL: |
252 | clear_bit(CACHE_PENDING, &h->flags); | 278 | clear_bit(CACHE_PENDING, &h->flags); |
253 | cache_revisit_request(h); | 279 | cache_revisit_request(h); |
254 | if (rv == -EAGAIN) { | 280 | rv = try_to_negate_entry(detail, h); |
255 | set_bit(CACHE_NEGATIVE, &h->flags); | ||
256 | cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); | ||
257 | cache_fresh_unlocked(h, detail); | ||
258 | rv = -ENOENT; | ||
259 | } | ||
260 | break; | 281 | break; |
261 | |||
262 | case -EAGAIN: | 282 | case -EAGAIN: |
263 | clear_bit(CACHE_PENDING, &h->flags); | 283 | clear_bit(CACHE_PENDING, &h->flags); |
264 | cache_revisit_request(h); | 284 | cache_revisit_request(h); |
@@ -268,9 +288,11 @@ int cache_check(struct cache_detail *detail, | |||
268 | } | 288 | } |
269 | 289 | ||
270 | if (rv == -EAGAIN) { | 290 | if (rv == -EAGAIN) { |
271 | cache_defer_req(rqstp, h); | 291 | if (!cache_defer_req(rqstp, h)) { |
272 | if (!test_bit(CACHE_PENDING, &h->flags)) { | 292 | /* |
273 | /* Request is not deferred */ | 293 | * Request was not deferred; handle it as best |
294 | * we can ourselves: | ||
295 | */ | ||
274 | rv = cache_is_valid(detail, h); | 296 | rv = cache_is_valid(detail, h); |
275 | if (rv == -EAGAIN) | 297 | if (rv == -EAGAIN) |
276 | rv = -ETIMEDOUT; | 298 | rv = -ETIMEDOUT; |
@@ -618,18 +640,19 @@ static void cache_limit_defers(void) | |||
618 | discard->revisit(discard, 1); | 640 | discard->revisit(discard, 1); |
619 | } | 641 | } |
620 | 642 | ||
621 | static void cache_defer_req(struct cache_req *req, struct cache_head *item) | 643 | /* Return true if and only if a deferred request is queued. */ |
644 | static bool cache_defer_req(struct cache_req *req, struct cache_head *item) | ||
622 | { | 645 | { |
623 | struct cache_deferred_req *dreq; | 646 | struct cache_deferred_req *dreq; |
624 | 647 | ||
625 | if (req->thread_wait) { | 648 | if (req->thread_wait) { |
626 | cache_wait_req(req, item); | 649 | cache_wait_req(req, item); |
627 | if (!test_bit(CACHE_PENDING, &item->flags)) | 650 | if (!test_bit(CACHE_PENDING, &item->flags)) |
628 | return; | 651 | return false; |
629 | } | 652 | } |
630 | dreq = req->defer(req); | 653 | dreq = req->defer(req); |
631 | if (dreq == NULL) | 654 | if (dreq == NULL) |
632 | return; | 655 | return false; |
633 | setup_deferral(dreq, item, 1); | 656 | setup_deferral(dreq, item, 1); |
634 | if (!test_bit(CACHE_PENDING, &item->flags)) | 657 | if (!test_bit(CACHE_PENDING, &item->flags)) |
635 | /* Bit could have been cleared before we managed to | 658 | /* Bit could have been cleared before we managed to |
@@ -638,6 +661,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
638 | cache_revisit_request(item); | 661 | cache_revisit_request(item); |
639 | 662 | ||
640 | cache_limit_defers(); | 663 | cache_limit_defers(); |
664 | return true; | ||
641 | } | 665 | } |
642 | 666 | ||
643 | static void cache_revisit_request(struct cache_head *item) | 667 | static void cache_revisit_request(struct cache_head *item) |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 92ce94f5146b..57d344cf2256 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -1095,7 +1095,7 @@ static void | |||
1095 | rpc_xdr_encode(struct rpc_task *task) | 1095 | rpc_xdr_encode(struct rpc_task *task) |
1096 | { | 1096 | { |
1097 | struct rpc_rqst *req = task->tk_rqstp; | 1097 | struct rpc_rqst *req = task->tk_rqstp; |
1098 | kxdrproc_t encode; | 1098 | kxdreproc_t encode; |
1099 | __be32 *p; | 1099 | __be32 *p; |
1100 | 1100 | ||
1101 | dprint_status(task); | 1101 | dprint_status(task); |
@@ -1535,7 +1535,7 @@ call_decode(struct rpc_task *task) | |||
1535 | { | 1535 | { |
1536 | struct rpc_clnt *clnt = task->tk_client; | 1536 | struct rpc_clnt *clnt = task->tk_client; |
1537 | struct rpc_rqst *req = task->tk_rqstp; | 1537 | struct rpc_rqst *req = task->tk_rqstp; |
1538 | kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; | 1538 | kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; |
1539 | __be32 *p; | 1539 | __be32 *p; |
1540 | 1540 | ||
1541 | dprintk("RPC: %5u call_decode (status %d)\n", | 1541 | dprintk("RPC: %5u call_decode (status %d)\n", |
@@ -1776,12 +1776,11 @@ out_overflow: | |||
1776 | goto out_garbage; | 1776 | goto out_garbage; |
1777 | } | 1777 | } |
1778 | 1778 | ||
1779 | static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj) | 1779 | static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj) |
1780 | { | 1780 | { |
1781 | return 0; | ||
1782 | } | 1781 | } |
1783 | 1782 | ||
1784 | static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj) | 1783 | static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj) |
1785 | { | 1784 | { |
1786 | return 0; | 1785 | return 0; |
1787 | } | 1786 | } |
@@ -1830,23 +1829,15 @@ static void rpc_show_task(const struct rpc_clnt *clnt, | |||
1830 | const struct rpc_task *task) | 1829 | const struct rpc_task *task) |
1831 | { | 1830 | { |
1832 | const char *rpc_waitq = "none"; | 1831 | const char *rpc_waitq = "none"; |
1833 | char *p, action[KSYM_SYMBOL_LEN]; | ||
1834 | 1832 | ||
1835 | if (RPC_IS_QUEUED(task)) | 1833 | if (RPC_IS_QUEUED(task)) |
1836 | rpc_waitq = rpc_qname(task->tk_waitqueue); | 1834 | rpc_waitq = rpc_qname(task->tk_waitqueue); |
1837 | 1835 | ||
1838 | /* map tk_action pointer to a function name; then trim off | 1836 | printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", |
1839 | * the "+0x0 [sunrpc]" */ | ||
1840 | sprint_symbol(action, (unsigned long)task->tk_action); | ||
1841 | p = strchr(action, '+'); | ||
1842 | if (p) | ||
1843 | *p = '\0'; | ||
1844 | |||
1845 | printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n", | ||
1846 | task->tk_pid, task->tk_flags, task->tk_status, | 1837 | task->tk_pid, task->tk_flags, task->tk_status, |
1847 | clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, | 1838 | clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, |
1848 | clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), | 1839 | clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), |
1849 | action, rpc_waitq); | 1840 | task->tk_action, rpc_waitq); |
1850 | } | 1841 | } |
1851 | 1842 | ||
1852 | void rpc_show_tasks(void) | 1843 | void rpc_show_tasks(void) |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 09f01f41e55a..72bc53683965 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -474,7 +474,7 @@ static int __rpc_create_common(struct inode *dir, struct dentry *dentry, | |||
474 | { | 474 | { |
475 | struct inode *inode; | 475 | struct inode *inode; |
476 | 476 | ||
477 | BUG_ON(!d_unhashed(dentry)); | 477 | d_drop(dentry); |
478 | inode = rpc_get_inode(dir->i_sb, mode); | 478 | inode = rpc_get_inode(dir->i_sb, mode); |
479 | if (!inode) | 479 | if (!inode) |
480 | goto out_err; | 480 | goto out_err; |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index fa6d7ca2c851..c652e4cc9fe9 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -57,10 +57,6 @@ enum { | |||
57 | RPCBPROC_GETSTAT, | 57 | RPCBPROC_GETSTAT, |
58 | }; | 58 | }; |
59 | 59 | ||
60 | #define RPCB_HIGHPROC_2 RPCBPROC_CALLIT | ||
61 | #define RPCB_HIGHPROC_3 RPCBPROC_TADDR2UADDR | ||
62 | #define RPCB_HIGHPROC_4 RPCBPROC_GETSTAT | ||
63 | |||
64 | /* | 60 | /* |
65 | * r_owner | 61 | * r_owner |
66 | * | 62 | * |
@@ -693,46 +689,37 @@ static void rpcb_getport_done(struct rpc_task *child, void *data) | |||
693 | * XDR functions for rpcbind | 689 | * XDR functions for rpcbind |
694 | */ | 690 | */ |
695 | 691 | ||
696 | static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p, | 692 | static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr, |
697 | const struct rpcbind_args *rpcb) | 693 | const struct rpcbind_args *rpcb) |
698 | { | 694 | { |
699 | struct rpc_task *task = req->rq_task; | 695 | struct rpc_task *task = req->rq_task; |
700 | struct xdr_stream xdr; | 696 | __be32 *p; |
701 | 697 | ||
702 | dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n", | 698 | dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n", |
703 | task->tk_pid, task->tk_msg.rpc_proc->p_name, | 699 | task->tk_pid, task->tk_msg.rpc_proc->p_name, |
704 | rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port); | 700 | rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port); |
705 | 701 | ||
706 | xdr_init_encode(&xdr, &req->rq_snd_buf, p); | 702 | p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2); |
707 | 703 | *p++ = cpu_to_be32(rpcb->r_prog); | |
708 | p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz); | 704 | *p++ = cpu_to_be32(rpcb->r_vers); |
709 | if (unlikely(p == NULL)) | 705 | *p++ = cpu_to_be32(rpcb->r_prot); |
710 | return -EIO; | 706 | *p = cpu_to_be32(rpcb->r_port); |
711 | |||
712 | *p++ = htonl(rpcb->r_prog); | ||
713 | *p++ = htonl(rpcb->r_vers); | ||
714 | *p++ = htonl(rpcb->r_prot); | ||
715 | *p = htonl(rpcb->r_port); | ||
716 | |||
717 | return 0; | ||
718 | } | 707 | } |
719 | 708 | ||
720 | static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p, | 709 | static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr, |
721 | struct rpcbind_args *rpcb) | 710 | struct rpcbind_args *rpcb) |
722 | { | 711 | { |
723 | struct rpc_task *task = req->rq_task; | 712 | struct rpc_task *task = req->rq_task; |
724 | struct xdr_stream xdr; | ||
725 | unsigned long port; | 713 | unsigned long port; |
726 | 714 | __be32 *p; | |
727 | xdr_init_decode(&xdr, &req->rq_rcv_buf, p); | ||
728 | 715 | ||
729 | rpcb->r_port = 0; | 716 | rpcb->r_port = 0; |
730 | 717 | ||
731 | p = xdr_inline_decode(&xdr, sizeof(__be32)); | 718 | p = xdr_inline_decode(xdr, 4); |
732 | if (unlikely(p == NULL)) | 719 | if (unlikely(p == NULL)) |
733 | return -EIO; | 720 | return -EIO; |
734 | 721 | ||
735 | port = ntohl(*p); | 722 | port = be32_to_cpup(p); |
736 | dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, | 723 | dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, |
737 | task->tk_msg.rpc_proc->p_name, port); | 724 | task->tk_msg.rpc_proc->p_name, port); |
738 | if (unlikely(port > USHRT_MAX)) | 725 | if (unlikely(port > USHRT_MAX)) |
@@ -742,20 +729,18 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p, | |||
742 | return 0; | 729 | return 0; |
743 | } | 730 | } |
744 | 731 | ||
745 | static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p, | 732 | static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr, |
746 | unsigned int *boolp) | 733 | unsigned int *boolp) |
747 | { | 734 | { |
748 | struct rpc_task *task = req->rq_task; | 735 | struct rpc_task *task = req->rq_task; |
749 | struct xdr_stream xdr; | 736 | __be32 *p; |
750 | |||
751 | xdr_init_decode(&xdr, &req->rq_rcv_buf, p); | ||
752 | 737 | ||
753 | p = xdr_inline_decode(&xdr, sizeof(__be32)); | 738 | p = xdr_inline_decode(xdr, 4); |
754 | if (unlikely(p == NULL)) | 739 | if (unlikely(p == NULL)) |
755 | return -EIO; | 740 | return -EIO; |
756 | 741 | ||
757 | *boolp = 0; | 742 | *boolp = 0; |
758 | if (*p) | 743 | if (*p != xdr_zero) |
759 | *boolp = 1; | 744 | *boolp = 1; |
760 | 745 | ||
761 | dprintk("RPC: %5u RPCB_%s call %s\n", | 746 | dprintk("RPC: %5u RPCB_%s call %s\n", |
@@ -764,73 +749,53 @@ static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p, | |||
764 | return 0; | 749 | return 0; |
765 | } | 750 | } |
766 | 751 | ||
767 | static int encode_rpcb_string(struct xdr_stream *xdr, const char *string, | 752 | static void encode_rpcb_string(struct xdr_stream *xdr, const char *string, |
768 | const u32 maxstrlen) | 753 | const u32 maxstrlen) |
769 | { | 754 | { |
770 | u32 len; | ||
771 | __be32 *p; | 755 | __be32 *p; |
756 | u32 len; | ||
772 | 757 | ||
773 | if (unlikely(string == NULL)) | ||
774 | return -EIO; | ||
775 | len = strlen(string); | 758 | len = strlen(string); |
776 | if (unlikely(len > maxstrlen)) | 759 | BUG_ON(len > maxstrlen); |
777 | return -EIO; | 760 | p = xdr_reserve_space(xdr, 4 + len); |
778 | |||
779 | p = xdr_reserve_space(xdr, sizeof(__be32) + len); | ||
780 | if (unlikely(p == NULL)) | ||
781 | return -EIO; | ||
782 | xdr_encode_opaque(p, string, len); | 761 | xdr_encode_opaque(p, string, len); |
783 | |||
784 | return 0; | ||
785 | } | 762 | } |
786 | 763 | ||
787 | static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p, | 764 | static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, |
788 | const struct rpcbind_args *rpcb) | 765 | const struct rpcbind_args *rpcb) |
789 | { | 766 | { |
790 | struct rpc_task *task = req->rq_task; | 767 | struct rpc_task *task = req->rq_task; |
791 | struct xdr_stream xdr; | 768 | __be32 *p; |
792 | 769 | ||
793 | dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n", | 770 | dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n", |
794 | task->tk_pid, task->tk_msg.rpc_proc->p_name, | 771 | task->tk_pid, task->tk_msg.rpc_proc->p_name, |
795 | rpcb->r_prog, rpcb->r_vers, | 772 | rpcb->r_prog, rpcb->r_vers, |
796 | rpcb->r_netid, rpcb->r_addr); | 773 | rpcb->r_netid, rpcb->r_addr); |
797 | 774 | ||
798 | xdr_init_encode(&xdr, &req->rq_snd_buf, p); | 775 | p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2); |
799 | 776 | *p++ = cpu_to_be32(rpcb->r_prog); | |
800 | p = xdr_reserve_space(&xdr, | 777 | *p = cpu_to_be32(rpcb->r_vers); |
801 | sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz)); | ||
802 | if (unlikely(p == NULL)) | ||
803 | return -EIO; | ||
804 | *p++ = htonl(rpcb->r_prog); | ||
805 | *p = htonl(rpcb->r_vers); | ||
806 | |||
807 | if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN)) | ||
808 | return -EIO; | ||
809 | if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN)) | ||
810 | return -EIO; | ||
811 | if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN)) | ||
812 | return -EIO; | ||
813 | 778 | ||
814 | return 0; | 779 | encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN); |
780 | encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN); | ||
781 | encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN); | ||
815 | } | 782 | } |
816 | 783 | ||
817 | static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p, | 784 | static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, |
818 | struct rpcbind_args *rpcb) | 785 | struct rpcbind_args *rpcb) |
819 | { | 786 | { |
820 | struct sockaddr_storage address; | 787 | struct sockaddr_storage address; |
821 | struct sockaddr *sap = (struct sockaddr *)&address; | 788 | struct sockaddr *sap = (struct sockaddr *)&address; |
822 | struct rpc_task *task = req->rq_task; | 789 | struct rpc_task *task = req->rq_task; |
823 | struct xdr_stream xdr; | 790 | __be32 *p; |
824 | u32 len; | 791 | u32 len; |
825 | 792 | ||
826 | rpcb->r_port = 0; | 793 | rpcb->r_port = 0; |
827 | 794 | ||
828 | xdr_init_decode(&xdr, &req->rq_rcv_buf, p); | 795 | p = xdr_inline_decode(xdr, 4); |
829 | |||
830 | p = xdr_inline_decode(&xdr, sizeof(__be32)); | ||
831 | if (unlikely(p == NULL)) | 796 | if (unlikely(p == NULL)) |
832 | goto out_fail; | 797 | goto out_fail; |
833 | len = ntohl(*p); | 798 | len = be32_to_cpup(p); |
834 | 799 | ||
835 | /* | 800 | /* |
836 | * If the returned universal address is a null string, | 801 | * If the returned universal address is a null string, |
@@ -845,7 +810,7 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p, | |||
845 | if (unlikely(len > RPCBIND_MAXUADDRLEN)) | 810 | if (unlikely(len > RPCBIND_MAXUADDRLEN)) |
846 | goto out_fail; | 811 | goto out_fail; |
847 | 812 | ||
848 | p = xdr_inline_decode(&xdr, len); | 813 | p = xdr_inline_decode(xdr, len); |
849 | if (unlikely(p == NULL)) | 814 | if (unlikely(p == NULL)) |
850 | goto out_fail; | 815 | goto out_fail; |
851 | dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid, | 816 | dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid, |
@@ -871,8 +836,8 @@ out_fail: | |||
871 | static struct rpc_procinfo rpcb_procedures2[] = { | 836 | static struct rpc_procinfo rpcb_procedures2[] = { |
872 | [RPCBPROC_SET] = { | 837 | [RPCBPROC_SET] = { |
873 | .p_proc = RPCBPROC_SET, | 838 | .p_proc = RPCBPROC_SET, |
874 | .p_encode = (kxdrproc_t)rpcb_enc_mapping, | 839 | .p_encode = (kxdreproc_t)rpcb_enc_mapping, |
875 | .p_decode = (kxdrproc_t)rpcb_dec_set, | 840 | .p_decode = (kxdrdproc_t)rpcb_dec_set, |
876 | .p_arglen = RPCB_mappingargs_sz, | 841 | .p_arglen = RPCB_mappingargs_sz, |
877 | .p_replen = RPCB_setres_sz, | 842 | .p_replen = RPCB_setres_sz, |
878 | .p_statidx = RPCBPROC_SET, | 843 | .p_statidx = RPCBPROC_SET, |
@@ -881,8 +846,8 @@ static struct rpc_procinfo rpcb_procedures2[] = { | |||
881 | }, | 846 | }, |
882 | [RPCBPROC_UNSET] = { | 847 | [RPCBPROC_UNSET] = { |
883 | .p_proc = RPCBPROC_UNSET, | 848 | .p_proc = RPCBPROC_UNSET, |
884 | .p_encode = (kxdrproc_t)rpcb_enc_mapping, | 849 | .p_encode = (kxdreproc_t)rpcb_enc_mapping, |
885 | .p_decode = (kxdrproc_t)rpcb_dec_set, | 850 | .p_decode = (kxdrdproc_t)rpcb_dec_set, |
886 | .p_arglen = RPCB_mappingargs_sz, | 851 | .p_arglen = RPCB_mappingargs_sz, |
887 | .p_replen = RPCB_setres_sz, | 852 | .p_replen = RPCB_setres_sz, |
888 | .p_statidx = RPCBPROC_UNSET, | 853 | .p_statidx = RPCBPROC_UNSET, |
@@ -891,8 +856,8 @@ static struct rpc_procinfo rpcb_procedures2[] = { | |||
891 | }, | 856 | }, |
892 | [RPCBPROC_GETPORT] = { | 857 | [RPCBPROC_GETPORT] = { |
893 | .p_proc = RPCBPROC_GETPORT, | 858 | .p_proc = RPCBPROC_GETPORT, |
894 | .p_encode = (kxdrproc_t)rpcb_enc_mapping, | 859 | .p_encode = (kxdreproc_t)rpcb_enc_mapping, |
895 | .p_decode = (kxdrproc_t)rpcb_dec_getport, | 860 | .p_decode = (kxdrdproc_t)rpcb_dec_getport, |
896 | .p_arglen = RPCB_mappingargs_sz, | 861 | .p_arglen = RPCB_mappingargs_sz, |
897 | .p_replen = RPCB_getportres_sz, | 862 | .p_replen = RPCB_getportres_sz, |
898 | .p_statidx = RPCBPROC_GETPORT, | 863 | .p_statidx = RPCBPROC_GETPORT, |
@@ -904,8 +869,8 @@ static struct rpc_procinfo rpcb_procedures2[] = { | |||
904 | static struct rpc_procinfo rpcb_procedures3[] = { | 869 | static struct rpc_procinfo rpcb_procedures3[] = { |
905 | [RPCBPROC_SET] = { | 870 | [RPCBPROC_SET] = { |
906 | .p_proc = RPCBPROC_SET, | 871 | .p_proc = RPCBPROC_SET, |
907 | .p_encode = (kxdrproc_t)rpcb_enc_getaddr, | 872 | .p_encode = (kxdreproc_t)rpcb_enc_getaddr, |
908 | .p_decode = (kxdrproc_t)rpcb_dec_set, | 873 | .p_decode = (kxdrdproc_t)rpcb_dec_set, |
909 | .p_arglen = RPCB_getaddrargs_sz, | 874 | .p_arglen = RPCB_getaddrargs_sz, |
910 | .p_replen = RPCB_setres_sz, | 875 | .p_replen = RPCB_setres_sz, |
911 | .p_statidx = RPCBPROC_SET, | 876 | .p_statidx = RPCBPROC_SET, |
@@ -914,8 +879,8 @@ static struct rpc_procinfo rpcb_procedures3[] = { | |||
914 | }, | 879 | }, |
915 | [RPCBPROC_UNSET] = { | 880 | [RPCBPROC_UNSET] = { |
916 | .p_proc = RPCBPROC_UNSET, | 881 | .p_proc = RPCBPROC_UNSET, |
917 | .p_encode = (kxdrproc_t)rpcb_enc_getaddr, | 882 | .p_encode = (kxdreproc_t)rpcb_enc_getaddr, |
918 | .p_decode = (kxdrproc_t)rpcb_dec_set, | 883 | .p_decode = (kxdrdproc_t)rpcb_dec_set, |
919 | .p_arglen = RPCB_getaddrargs_sz, | 884 | .p_arglen = RPCB_getaddrargs_sz, |
920 | .p_replen = RPCB_setres_sz, | 885 | .p_replen = RPCB_setres_sz, |
921 | .p_statidx = RPCBPROC_UNSET, | 886 | .p_statidx = RPCBPROC_UNSET, |
@@ -924,8 +889,8 @@ static struct rpc_procinfo rpcb_procedures3[] = { | |||
924 | }, | 889 | }, |
925 | [RPCBPROC_GETADDR] = { | 890 | [RPCBPROC_GETADDR] = { |
926 | .p_proc = RPCBPROC_GETADDR, | 891 | .p_proc = RPCBPROC_GETADDR, |
927 | .p_encode = (kxdrproc_t)rpcb_enc_getaddr, | 892 | .p_encode = (kxdreproc_t)rpcb_enc_getaddr, |
928 | .p_decode = (kxdrproc_t)rpcb_dec_getaddr, | 893 | .p_decode = (kxdrdproc_t)rpcb_dec_getaddr, |
929 | .p_arglen = RPCB_getaddrargs_sz, | 894 | .p_arglen = RPCB_getaddrargs_sz, |
930 | .p_replen = RPCB_getaddrres_sz, | 895 | .p_replen = RPCB_getaddrres_sz, |
931 | .p_statidx = RPCBPROC_GETADDR, | 896 | .p_statidx = RPCBPROC_GETADDR, |
@@ -937,8 +902,8 @@ static struct rpc_procinfo rpcb_procedures3[] = { | |||
937 | static struct rpc_procinfo rpcb_procedures4[] = { | 902 | static struct rpc_procinfo rpcb_procedures4[] = { |
938 | [RPCBPROC_SET] = { | 903 | [RPCBPROC_SET] = { |
939 | .p_proc = RPCBPROC_SET, | 904 | .p_proc = RPCBPROC_SET, |
940 | .p_encode = (kxdrproc_t)rpcb_enc_getaddr, | 905 | .p_encode = (kxdreproc_t)rpcb_enc_getaddr, |
941 | .p_decode = (kxdrproc_t)rpcb_dec_set, | 906 | .p_decode = (kxdrdproc_t)rpcb_dec_set, |
942 | .p_arglen = RPCB_getaddrargs_sz, | 907 | .p_arglen = RPCB_getaddrargs_sz, |
943 | .p_replen = RPCB_setres_sz, | 908 | .p_replen = RPCB_setres_sz, |
944 | .p_statidx = RPCBPROC_SET, | 909 | .p_statidx = RPCBPROC_SET, |
@@ -947,8 +912,8 @@ static struct rpc_procinfo rpcb_procedures4[] = { | |||
947 | }, | 912 | }, |
948 | [RPCBPROC_UNSET] = { | 913 | [RPCBPROC_UNSET] = { |
949 | .p_proc = RPCBPROC_UNSET, | 914 | .p_proc = RPCBPROC_UNSET, |
950 | .p_encode = (kxdrproc_t)rpcb_enc_getaddr, | 915 | .p_encode = (kxdreproc_t)rpcb_enc_getaddr, |
951 | .p_decode = (kxdrproc_t)rpcb_dec_set, | 916 | .p_decode = (kxdrdproc_t)rpcb_dec_set, |
952 | .p_arglen = RPCB_getaddrargs_sz, | 917 | .p_arglen = RPCB_getaddrargs_sz, |
953 | .p_replen = RPCB_setres_sz, | 918 | .p_replen = RPCB_setres_sz, |
954 | .p_statidx = RPCBPROC_UNSET, | 919 | .p_statidx = RPCBPROC_UNSET, |
@@ -957,8 +922,8 @@ static struct rpc_procinfo rpcb_procedures4[] = { | |||
957 | }, | 922 | }, |
958 | [RPCBPROC_GETADDR] = { | 923 | [RPCBPROC_GETADDR] = { |
959 | .p_proc = RPCBPROC_GETADDR, | 924 | .p_proc = RPCBPROC_GETADDR, |
960 | .p_encode = (kxdrproc_t)rpcb_enc_getaddr, | 925 | .p_encode = (kxdreproc_t)rpcb_enc_getaddr, |
961 | .p_decode = (kxdrproc_t)rpcb_dec_getaddr, | 926 | .p_decode = (kxdrdproc_t)rpcb_dec_getaddr, |
962 | .p_arglen = RPCB_getaddrargs_sz, | 927 | .p_arglen = RPCB_getaddrargs_sz, |
963 | .p_replen = RPCB_getaddrres_sz, | 928 | .p_replen = RPCB_getaddrres_sz, |
964 | .p_statidx = RPCBPROC_GETADDR, | 929 | .p_statidx = RPCBPROC_GETADDR, |
@@ -993,19 +958,19 @@ static struct rpcb_info rpcb_next_version6[] = { | |||
993 | 958 | ||
994 | static struct rpc_version rpcb_version2 = { | 959 | static struct rpc_version rpcb_version2 = { |
995 | .number = RPCBVERS_2, | 960 | .number = RPCBVERS_2, |
996 | .nrprocs = RPCB_HIGHPROC_2, | 961 | .nrprocs = ARRAY_SIZE(rpcb_procedures2), |
997 | .procs = rpcb_procedures2 | 962 | .procs = rpcb_procedures2 |
998 | }; | 963 | }; |
999 | 964 | ||
1000 | static struct rpc_version rpcb_version3 = { | 965 | static struct rpc_version rpcb_version3 = { |
1001 | .number = RPCBVERS_3, | 966 | .number = RPCBVERS_3, |
1002 | .nrprocs = RPCB_HIGHPROC_3, | 967 | .nrprocs = ARRAY_SIZE(rpcb_procedures3), |
1003 | .procs = rpcb_procedures3 | 968 | .procs = rpcb_procedures3 |
1004 | }; | 969 | }; |
1005 | 970 | ||
1006 | static struct rpc_version rpcb_version4 = { | 971 | static struct rpc_version rpcb_version4 = { |
1007 | .number = RPCBVERS_4, | 972 | .number = RPCBVERS_4, |
1008 | .nrprocs = RPCB_HIGHPROC_4, | 973 | .nrprocs = ARRAY_SIZE(rpcb_procedures4), |
1009 | .procs = rpcb_procedures4 | 974 | .procs = rpcb_procedures4 |
1010 | }; | 975 | }; |
1011 | 976 | ||
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 6359c42c4941..08e05a8ce025 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -488,10 +488,6 @@ svc_destroy(struct svc_serv *serv) | |||
488 | if (svc_serv_is_pooled(serv)) | 488 | if (svc_serv_is_pooled(serv)) |
489 | svc_pool_map_put(); | 489 | svc_pool_map_put(); |
490 | 490 | ||
491 | #if defined(CONFIG_NFS_V4_1) | ||
492 | svc_sock_destroy(serv->bc_xprt); | ||
493 | #endif /* CONFIG_NFS_V4_1 */ | ||
494 | |||
495 | svc_unregister(serv); | 491 | svc_unregister(serv); |
496 | kfree(serv->sv_pools); | 492 | kfree(serv->sv_pools); |
497 | kfree(serv); | 493 | kfree(serv); |
@@ -1005,6 +1001,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) | |||
1005 | rqstp->rq_splice_ok = 1; | 1001 | rqstp->rq_splice_ok = 1; |
1006 | /* Will be turned off only when NFSv4 Sessions are used */ | 1002 | /* Will be turned off only when NFSv4 Sessions are used */ |
1007 | rqstp->rq_usedeferral = 1; | 1003 | rqstp->rq_usedeferral = 1; |
1004 | rqstp->rq_dropme = false; | ||
1008 | 1005 | ||
1009 | /* Setup reply header */ | 1006 | /* Setup reply header */ |
1010 | rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); | 1007 | rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); |
@@ -1106,7 +1103,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) | |||
1106 | *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); | 1103 | *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); |
1107 | 1104 | ||
1108 | /* Encode reply */ | 1105 | /* Encode reply */ |
1109 | if (*statp == rpc_drop_reply) { | 1106 | if (rqstp->rq_dropme) { |
1110 | if (procp->pc_release) | 1107 | if (procp->pc_release) |
1111 | procp->pc_release(rqstp, NULL, rqstp->rq_resp); | 1108 | procp->pc_release(rqstp, NULL, rqstp->rq_resp); |
1112 | goto dropit; | 1109 | goto dropit; |
@@ -1147,7 +1144,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) | |||
1147 | dropit: | 1144 | dropit: |
1148 | svc_authorise(rqstp); /* doesn't hurt to call this twice */ | 1145 | svc_authorise(rqstp); /* doesn't hurt to call this twice */ |
1149 | dprintk("svc: svc_process dropit\n"); | 1146 | dprintk("svc: svc_process dropit\n"); |
1150 | svc_drop(rqstp); | ||
1151 | return 0; | 1147 | return 0; |
1152 | 1148 | ||
1153 | err_short_len: | 1149 | err_short_len: |
@@ -1218,7 +1214,6 @@ svc_process(struct svc_rqst *rqstp) | |||
1218 | struct kvec *resv = &rqstp->rq_res.head[0]; | 1214 | struct kvec *resv = &rqstp->rq_res.head[0]; |
1219 | struct svc_serv *serv = rqstp->rq_server; | 1215 | struct svc_serv *serv = rqstp->rq_server; |
1220 | u32 dir; | 1216 | u32 dir; |
1221 | int error; | ||
1222 | 1217 | ||
1223 | /* | 1218 | /* |
1224 | * Setup response xdr_buf. | 1219 | * Setup response xdr_buf. |
@@ -1246,11 +1241,13 @@ svc_process(struct svc_rqst *rqstp) | |||
1246 | return 0; | 1241 | return 0; |
1247 | } | 1242 | } |
1248 | 1243 | ||
1249 | error = svc_process_common(rqstp, argv, resv); | 1244 | /* Returns 1 for send, 0 for drop */ |
1250 | if (error <= 0) | 1245 | if (svc_process_common(rqstp, argv, resv)) |
1251 | return error; | 1246 | return svc_send(rqstp); |
1252 | 1247 | else { | |
1253 | return svc_send(rqstp); | 1248 | svc_drop(rqstp); |
1249 | return 0; | ||
1250 | } | ||
1254 | } | 1251 | } |
1255 | 1252 | ||
1256 | #if defined(CONFIG_NFS_V4_1) | 1253 | #if defined(CONFIG_NFS_V4_1) |
@@ -1264,10 +1261,9 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, | |||
1264 | { | 1261 | { |
1265 | struct kvec *argv = &rqstp->rq_arg.head[0]; | 1262 | struct kvec *argv = &rqstp->rq_arg.head[0]; |
1266 | struct kvec *resv = &rqstp->rq_res.head[0]; | 1263 | struct kvec *resv = &rqstp->rq_res.head[0]; |
1267 | int error; | ||
1268 | 1264 | ||
1269 | /* Build the svc_rqst used by the common processing routine */ | 1265 | /* Build the svc_rqst used by the common processing routine */ |
1270 | rqstp->rq_xprt = serv->bc_xprt; | 1266 | rqstp->rq_xprt = serv->sv_bc_xprt; |
1271 | rqstp->rq_xid = req->rq_xid; | 1267 | rqstp->rq_xid = req->rq_xid; |
1272 | rqstp->rq_prot = req->rq_xprt->prot; | 1268 | rqstp->rq_prot = req->rq_xprt->prot; |
1273 | rqstp->rq_server = serv; | 1269 | rqstp->rq_server = serv; |
@@ -1292,12 +1288,15 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, | |||
1292 | svc_getu32(argv); /* XID */ | 1288 | svc_getu32(argv); /* XID */ |
1293 | svc_getnl(argv); /* CALLDIR */ | 1289 | svc_getnl(argv); /* CALLDIR */ |
1294 | 1290 | ||
1295 | error = svc_process_common(rqstp, argv, resv); | 1291 | /* Returns 1 for send, 0 for drop */ |
1296 | if (error <= 0) | 1292 | if (svc_process_common(rqstp, argv, resv)) { |
1297 | return error; | 1293 | memcpy(&req->rq_snd_buf, &rqstp->rq_res, |
1298 | 1294 | sizeof(req->rq_snd_buf)); | |
1299 | memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); | 1295 | return bc_send(req); |
1300 | return bc_send(req); | 1296 | } else { |
1297 | /* Nothing to do to drop request */ | ||
1298 | return 0; | ||
1299 | } | ||
1301 | } | 1300 | } |
1302 | EXPORT_SYMBOL(bc_svc_process); | 1301 | EXPORT_SYMBOL(bc_svc_process); |
1303 | #endif /* CONFIG_NFS_V4_1 */ | 1302 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 3f2c5559ca1a..ab86b7927f84 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/sunrpc/stats.h> | 13 | #include <linux/sunrpc/stats.h> |
14 | #include <linux/sunrpc/svc_xprt.h> | 14 | #include <linux/sunrpc/svc_xprt.h> |
15 | #include <linux/sunrpc/svcsock.h> | 15 | #include <linux/sunrpc/svcsock.h> |
16 | #include <linux/sunrpc/xprt.h> | ||
16 | 17 | ||
17 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | 18 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
18 | 19 | ||
@@ -128,6 +129,9 @@ static void svc_xprt_free(struct kref *kref) | |||
128 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) | 129 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) |
129 | svcauth_unix_info_release(xprt); | 130 | svcauth_unix_info_release(xprt); |
130 | put_net(xprt->xpt_net); | 131 | put_net(xprt->xpt_net); |
132 | /* See comment on corresponding get in xs_setup_bc_tcp(): */ | ||
133 | if (xprt->xpt_bc_xprt) | ||
134 | xprt_put(xprt->xpt_bc_xprt); | ||
131 | xprt->xpt_ops->xpo_free(xprt); | 135 | xprt->xpt_ops->xpo_free(xprt); |
132 | module_put(owner); | 136 | module_put(owner); |
133 | } | 137 | } |
@@ -303,6 +307,15 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) | |||
303 | list_del(&rqstp->rq_list); | 307 | list_del(&rqstp->rq_list); |
304 | } | 308 | } |
305 | 309 | ||
310 | static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) | ||
311 | { | ||
312 | if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) | ||
313 | return true; | ||
314 | if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) | ||
315 | return xprt->xpt_ops->xpo_has_wspace(xprt); | ||
316 | return false; | ||
317 | } | ||
318 | |||
306 | /* | 319 | /* |
307 | * Queue up a transport with data pending. If there are idle nfsd | 320 | * Queue up a transport with data pending. If there are idle nfsd |
308 | * processes, wake 'em up. | 321 | * processes, wake 'em up. |
@@ -315,8 +328,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
315 | struct svc_rqst *rqstp; | 328 | struct svc_rqst *rqstp; |
316 | int cpu; | 329 | int cpu; |
317 | 330 | ||
318 | if (!(xprt->xpt_flags & | 331 | if (!svc_xprt_has_something_to_do(xprt)) |
319 | ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) | ||
320 | return; | 332 | return; |
321 | 333 | ||
322 | cpu = get_cpu(); | 334 | cpu = get_cpu(); |
@@ -343,28 +355,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
343 | dprintk("svc: transport %p busy, not enqueued\n", xprt); | 355 | dprintk("svc: transport %p busy, not enqueued\n", xprt); |
344 | goto out_unlock; | 356 | goto out_unlock; |
345 | } | 357 | } |
346 | BUG_ON(xprt->xpt_pool != NULL); | ||
347 | xprt->xpt_pool = pool; | ||
348 | |||
349 | /* Handle pending connection */ | ||
350 | if (test_bit(XPT_CONN, &xprt->xpt_flags)) | ||
351 | goto process; | ||
352 | |||
353 | /* Handle close in-progress */ | ||
354 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) | ||
355 | goto process; | ||
356 | |||
357 | /* Check if we have space to reply to a request */ | ||
358 | if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { | ||
359 | /* Don't enqueue while not enough space for reply */ | ||
360 | dprintk("svc: no write space, transport %p not enqueued\n", | ||
361 | xprt); | ||
362 | xprt->xpt_pool = NULL; | ||
363 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
364 | goto out_unlock; | ||
365 | } | ||
366 | 358 | ||
367 | process: | ||
368 | if (!list_empty(&pool->sp_threads)) { | 359 | if (!list_empty(&pool->sp_threads)) { |
369 | rqstp = list_entry(pool->sp_threads.next, | 360 | rqstp = list_entry(pool->sp_threads.next, |
370 | struct svc_rqst, | 361 | struct svc_rqst, |
@@ -381,13 +372,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
381 | rqstp->rq_reserved = serv->sv_max_mesg; | 372 | rqstp->rq_reserved = serv->sv_max_mesg; |
382 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | 373 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
383 | pool->sp_stats.threads_woken++; | 374 | pool->sp_stats.threads_woken++; |
384 | BUG_ON(xprt->xpt_pool != pool); | ||
385 | wake_up(&rqstp->rq_wait); | 375 | wake_up(&rqstp->rq_wait); |
386 | } else { | 376 | } else { |
387 | dprintk("svc: transport %p put into queue\n", xprt); | 377 | dprintk("svc: transport %p put into queue\n", xprt); |
388 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); | 378 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); |
389 | pool->sp_stats.sockets_queued++; | 379 | pool->sp_stats.sockets_queued++; |
390 | BUG_ON(xprt->xpt_pool != pool); | ||
391 | } | 380 | } |
392 | 381 | ||
393 | out_unlock: | 382 | out_unlock: |
@@ -426,7 +415,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) | |||
426 | void svc_xprt_received(struct svc_xprt *xprt) | 415 | void svc_xprt_received(struct svc_xprt *xprt) |
427 | { | 416 | { |
428 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); | 417 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); |
429 | xprt->xpt_pool = NULL; | ||
430 | /* As soon as we clear busy, the xprt could be closed and | 418 | /* As soon as we clear busy, the xprt could be closed and |
431 | * 'put', so we need a reference to call svc_xprt_enqueue with: | 419 | * 'put', so we need a reference to call svc_xprt_enqueue with: |
432 | */ | 420 | */ |
@@ -722,7 +710,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
722 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | 710 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
723 | dprintk("svc_recv: found XPT_CLOSE\n"); | 711 | dprintk("svc_recv: found XPT_CLOSE\n"); |
724 | svc_delete_xprt(xprt); | 712 | svc_delete_xprt(xprt); |
725 | } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | 713 | /* Leave XPT_BUSY set on the dead xprt: */ |
714 | goto out; | ||
715 | } | ||
716 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | ||
726 | struct svc_xprt *newxpt; | 717 | struct svc_xprt *newxpt; |
727 | newxpt = xprt->xpt_ops->xpo_accept(xprt); | 718 | newxpt = xprt->xpt_ops->xpo_accept(xprt); |
728 | if (newxpt) { | 719 | if (newxpt) { |
@@ -747,28 +738,23 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
747 | spin_unlock_bh(&serv->sv_lock); | 738 | spin_unlock_bh(&serv->sv_lock); |
748 | svc_xprt_received(newxpt); | 739 | svc_xprt_received(newxpt); |
749 | } | 740 | } |
750 | svc_xprt_received(xprt); | 741 | } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { |
751 | } else { | ||
752 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", | 742 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
753 | rqstp, pool->sp_id, xprt, | 743 | rqstp, pool->sp_id, xprt, |
754 | atomic_read(&xprt->xpt_ref.refcount)); | 744 | atomic_read(&xprt->xpt_ref.refcount)); |
755 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); | 745 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); |
756 | if (rqstp->rq_deferred) { | 746 | if (rqstp->rq_deferred) |
757 | svc_xprt_received(xprt); | ||
758 | len = svc_deferred_recv(rqstp); | 747 | len = svc_deferred_recv(rqstp); |
759 | } else { | 748 | else |
760 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); | 749 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); |
761 | svc_xprt_received(xprt); | ||
762 | } | ||
763 | dprintk("svc: got len=%d\n", len); | 750 | dprintk("svc: got len=%d\n", len); |
764 | } | 751 | } |
752 | svc_xprt_received(xprt); | ||
765 | 753 | ||
766 | /* No data, incomplete (TCP) read, or accept() */ | 754 | /* No data, incomplete (TCP) read, or accept() */ |
767 | if (len == 0 || len == -EAGAIN) { | 755 | if (len == 0 || len == -EAGAIN) |
768 | rqstp->rq_res.len = 0; | 756 | goto out; |
769 | svc_xprt_release(rqstp); | 757 | |
770 | return -EAGAIN; | ||
771 | } | ||
772 | clear_bit(XPT_OLD, &xprt->xpt_flags); | 758 | clear_bit(XPT_OLD, &xprt->xpt_flags); |
773 | 759 | ||
774 | rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); | 760 | rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); |
@@ -777,6 +763,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
777 | if (serv->sv_stats) | 763 | if (serv->sv_stats) |
778 | serv->sv_stats->netcnt++; | 764 | serv->sv_stats->netcnt++; |
779 | return len; | 765 | return len; |
766 | out: | ||
767 | rqstp->rq_res.len = 0; | ||
768 | svc_xprt_release(rqstp); | ||
769 | return -EAGAIN; | ||
780 | } | 770 | } |
781 | EXPORT_SYMBOL_GPL(svc_recv); | 771 | EXPORT_SYMBOL_GPL(svc_recv); |
782 | 772 | ||
@@ -935,7 +925,12 @@ void svc_close_xprt(struct svc_xprt *xprt) | |||
935 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) | 925 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) |
936 | /* someone else will have to effect the close */ | 926 | /* someone else will have to effect the close */ |
937 | return; | 927 | return; |
938 | 928 | /* | |
929 | * We expect svc_close_xprt() to work even when no threads are | ||
930 | * running (e.g., while configuring the server before starting | ||
931 | * any threads), so if the transport isn't busy, we delete | ||
932 | * it ourself: | ||
933 | */ | ||
939 | svc_delete_xprt(xprt); | 934 | svc_delete_xprt(xprt); |
940 | } | 935 | } |
941 | EXPORT_SYMBOL_GPL(svc_close_xprt); | 936 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
@@ -945,16 +940,16 @@ void svc_close_all(struct list_head *xprt_list) | |||
945 | struct svc_xprt *xprt; | 940 | struct svc_xprt *xprt; |
946 | struct svc_xprt *tmp; | 941 | struct svc_xprt *tmp; |
947 | 942 | ||
943 | /* | ||
944 | * The server is shutting down, and no more threads are running. | ||
945 | * svc_xprt_enqueue() might still be running, but at worst it | ||
946 | * will re-add the xprt to sp_sockets, which will soon get | ||
947 | * freed. So we don't bother with any more locking, and don't | ||
948 | * leave the close to the (nonexistent) server threads: | ||
949 | */ | ||
948 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | 950 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { |
949 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 951 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
950 | if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { | 952 | svc_delete_xprt(xprt); |
951 | /* Waiting to be processed, but no threads left, | ||
952 | * So just remove it from the waiting list | ||
953 | */ | ||
954 | list_del_init(&xprt->xpt_ready); | ||
955 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
956 | } | ||
957 | svc_close_xprt(xprt); | ||
958 | } | 953 | } |
959 | } | 954 | } |
960 | 955 | ||
@@ -1028,6 +1023,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req) | |||
1028 | } | 1023 | } |
1029 | svc_xprt_get(rqstp->rq_xprt); | 1024 | svc_xprt_get(rqstp->rq_xprt); |
1030 | dr->xprt = rqstp->rq_xprt; | 1025 | dr->xprt = rqstp->rq_xprt; |
1026 | rqstp->rq_dropme = true; | ||
1031 | 1027 | ||
1032 | dr->handle.revisit = svc_revisit; | 1028 | dr->handle.revisit = svc_revisit; |
1033 | return &dr->handle; | 1029 | return &dr->handle; |
@@ -1065,14 +1061,13 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) | |||
1065 | if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) | 1061 | if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) |
1066 | return NULL; | 1062 | return NULL; |
1067 | spin_lock(&xprt->xpt_lock); | 1063 | spin_lock(&xprt->xpt_lock); |
1068 | clear_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
1069 | if (!list_empty(&xprt->xpt_deferred)) { | 1064 | if (!list_empty(&xprt->xpt_deferred)) { |
1070 | dr = list_entry(xprt->xpt_deferred.next, | 1065 | dr = list_entry(xprt->xpt_deferred.next, |
1071 | struct svc_deferred_req, | 1066 | struct svc_deferred_req, |
1072 | handle.recent); | 1067 | handle.recent); |
1073 | list_del_init(&dr->handle.recent); | 1068 | list_del_init(&dr->handle.recent); |
1074 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | 1069 | } else |
1075 | } | 1070 | clear_bit(XPT_DEFERRED, &xprt->xpt_flags); |
1076 | spin_unlock(&xprt->xpt_lock); | 1071 | spin_unlock(&xprt->xpt_lock); |
1077 | return dr; | 1072 | return dr; |
1078 | } | 1073 | } |
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 4e9393c24687..7963569fc04f 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c | |||
@@ -118,7 +118,6 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister); | |||
118 | 118 | ||
119 | #define DN_HASHBITS 6 | 119 | #define DN_HASHBITS 6 |
120 | #define DN_HASHMAX (1<<DN_HASHBITS) | 120 | #define DN_HASHMAX (1<<DN_HASHBITS) |
121 | #define DN_HASHMASK (DN_HASHMAX-1) | ||
122 | 121 | ||
123 | static struct hlist_head auth_domain_table[DN_HASHMAX]; | 122 | static struct hlist_head auth_domain_table[DN_HASHMAX]; |
124 | static spinlock_t auth_domain_lock = | 123 | static spinlock_t auth_domain_lock = |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 560677d187f1..30916b06c12b 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -30,7 +30,9 @@ | |||
30 | 30 | ||
31 | struct unix_domain { | 31 | struct unix_domain { |
32 | struct auth_domain h; | 32 | struct auth_domain h; |
33 | #ifdef CONFIG_NFSD_DEPRECATED | ||
33 | int addr_changes; | 34 | int addr_changes; |
35 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
34 | /* other stuff later */ | 36 | /* other stuff later */ |
35 | }; | 37 | }; |
36 | 38 | ||
@@ -64,7 +66,9 @@ struct auth_domain *unix_domain_find(char *name) | |||
64 | return NULL; | 66 | return NULL; |
65 | } | 67 | } |
66 | new->h.flavour = &svcauth_unix; | 68 | new->h.flavour = &svcauth_unix; |
69 | #ifdef CONFIG_NFSD_DEPRECATED | ||
67 | new->addr_changes = 0; | 70 | new->addr_changes = 0; |
71 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
68 | rv = auth_domain_lookup(name, &new->h); | 72 | rv = auth_domain_lookup(name, &new->h); |
69 | } | 73 | } |
70 | } | 74 | } |
@@ -85,14 +89,15 @@ static void svcauth_unix_domain_release(struct auth_domain *dom) | |||
85 | */ | 89 | */ |
86 | #define IP_HASHBITS 8 | 90 | #define IP_HASHBITS 8 |
87 | #define IP_HASHMAX (1<<IP_HASHBITS) | 91 | #define IP_HASHMAX (1<<IP_HASHBITS) |
88 | #define IP_HASHMASK (IP_HASHMAX-1) | ||
89 | 92 | ||
90 | struct ip_map { | 93 | struct ip_map { |
91 | struct cache_head h; | 94 | struct cache_head h; |
92 | char m_class[8]; /* e.g. "nfsd" */ | 95 | char m_class[8]; /* e.g. "nfsd" */ |
93 | struct in6_addr m_addr; | 96 | struct in6_addr m_addr; |
94 | struct unix_domain *m_client; | 97 | struct unix_domain *m_client; |
98 | #ifdef CONFIG_NFSD_DEPRECATED | ||
95 | int m_add_change; | 99 | int m_add_change; |
100 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
96 | }; | 101 | }; |
97 | 102 | ||
98 | static void ip_map_put(struct kref *kref) | 103 | static void ip_map_put(struct kref *kref) |
@@ -146,7 +151,9 @@ static void update(struct cache_head *cnew, struct cache_head *citem) | |||
146 | 151 | ||
147 | kref_get(&item->m_client->h.ref); | 152 | kref_get(&item->m_client->h.ref); |
148 | new->m_client = item->m_client; | 153 | new->m_client = item->m_client; |
154 | #ifdef CONFIG_NFSD_DEPRECATED | ||
149 | new->m_add_change = item->m_add_change; | 155 | new->m_add_change = item->m_add_change; |
156 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
150 | } | 157 | } |
151 | static struct cache_head *ip_map_alloc(void) | 158 | static struct cache_head *ip_map_alloc(void) |
152 | { | 159 | { |
@@ -331,6 +338,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, | |||
331 | ip.h.flags = 0; | 338 | ip.h.flags = 0; |
332 | if (!udom) | 339 | if (!udom) |
333 | set_bit(CACHE_NEGATIVE, &ip.h.flags); | 340 | set_bit(CACHE_NEGATIVE, &ip.h.flags); |
341 | #ifdef CONFIG_NFSD_DEPRECATED | ||
334 | else { | 342 | else { |
335 | ip.m_add_change = udom->addr_changes; | 343 | ip.m_add_change = udom->addr_changes; |
336 | /* if this is from the legacy set_client system call, | 344 | /* if this is from the legacy set_client system call, |
@@ -339,6 +347,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, | |||
339 | if (expiry == NEVER) | 347 | if (expiry == NEVER) |
340 | ip.m_add_change++; | 348 | ip.m_add_change++; |
341 | } | 349 | } |
350 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
342 | ip.h.expiry_time = expiry; | 351 | ip.h.expiry_time = expiry; |
343 | ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, | 352 | ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, |
344 | hash_str(ipm->m_class, IP_HASHBITS) ^ | 353 | hash_str(ipm->m_class, IP_HASHBITS) ^ |
@@ -358,6 +367,7 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm, | |||
358 | return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); | 367 | return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); |
359 | } | 368 | } |
360 | 369 | ||
370 | #ifdef CONFIG_NFSD_DEPRECATED | ||
361 | int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom) | 371 | int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom) |
362 | { | 372 | { |
363 | struct unix_domain *udom; | 373 | struct unix_domain *udom; |
@@ -402,8 +412,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr) | |||
402 | return NULL; | 412 | return NULL; |
403 | 413 | ||
404 | if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { | 414 | if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { |
405 | if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0) | 415 | sunrpc_invalidate(&ipm->h, sn->ip_map_cache); |
406 | auth_domain_put(&ipm->m_client->h); | ||
407 | rv = NULL; | 416 | rv = NULL; |
408 | } else { | 417 | } else { |
409 | rv = &ipm->m_client->h; | 418 | rv = &ipm->m_client->h; |
@@ -413,6 +422,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr) | |||
413 | return rv; | 422 | return rv; |
414 | } | 423 | } |
415 | EXPORT_SYMBOL_GPL(auth_unix_lookup); | 424 | EXPORT_SYMBOL_GPL(auth_unix_lookup); |
425 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
416 | 426 | ||
417 | void svcauth_unix_purge(void) | 427 | void svcauth_unix_purge(void) |
418 | { | 428 | { |
@@ -497,7 +507,6 @@ svcauth_unix_info_release(struct svc_xprt *xpt) | |||
497 | */ | 507 | */ |
498 | #define GID_HASHBITS 8 | 508 | #define GID_HASHBITS 8 |
499 | #define GID_HASHMAX (1<<GID_HASHBITS) | 509 | #define GID_HASHMAX (1<<GID_HASHBITS) |
500 | #define GID_HASHMASK (GID_HASHMAX - 1) | ||
501 | 510 | ||
502 | struct unix_gid { | 511 | struct unix_gid { |
503 | struct cache_head h; | 512 | struct cache_head h; |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 07919e16be3e..7bd3bbba4710 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -66,6 +66,13 @@ static void svc_sock_free(struct svc_xprt *); | |||
66 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, | 66 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, |
67 | struct net *, struct sockaddr *, | 67 | struct net *, struct sockaddr *, |
68 | int, int); | 68 | int, int); |
69 | #if defined(CONFIG_NFS_V4_1) | ||
70 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, | ||
71 | struct net *, struct sockaddr *, | ||
72 | int, int); | ||
73 | static void svc_bc_sock_free(struct svc_xprt *xprt); | ||
74 | #endif /* CONFIG_NFS_V4_1 */ | ||
75 | |||
69 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 76 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
70 | static struct lock_class_key svc_key[2]; | 77 | static struct lock_class_key svc_key[2]; |
71 | static struct lock_class_key svc_slock_key[2]; | 78 | static struct lock_class_key svc_slock_key[2]; |
@@ -324,19 +331,21 @@ int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen, | |||
324 | len = onelen; | 331 | len = onelen; |
325 | break; | 332 | break; |
326 | } | 333 | } |
327 | if (toclose && strcmp(toclose, buf + len) == 0) | 334 | if (toclose && strcmp(toclose, buf + len) == 0) { |
328 | closesk = svsk; | 335 | closesk = svsk; |
329 | else | 336 | svc_xprt_get(&closesk->sk_xprt); |
337 | } else | ||
330 | len += onelen; | 338 | len += onelen; |
331 | } | 339 | } |
332 | spin_unlock_bh(&serv->sv_lock); | 340 | spin_unlock_bh(&serv->sv_lock); |
333 | 341 | ||
334 | if (closesk) | 342 | if (closesk) { |
335 | /* Should unregister with portmap, but you cannot | 343 | /* Should unregister with portmap, but you cannot |
336 | * unregister just one protocol... | 344 | * unregister just one protocol... |
337 | */ | 345 | */ |
338 | svc_close_xprt(&closesk->sk_xprt); | 346 | svc_close_xprt(&closesk->sk_xprt); |
339 | else if (toclose) | 347 | svc_xprt_put(&closesk->sk_xprt); |
348 | } else if (toclose) | ||
340 | return -ENOENT; | 349 | return -ENOENT; |
341 | return len; | 350 | return len; |
342 | } | 351 | } |
@@ -985,15 +994,17 @@ static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp, | |||
985 | vec[0] = rqstp->rq_arg.head[0]; | 994 | vec[0] = rqstp->rq_arg.head[0]; |
986 | } else { | 995 | } else { |
987 | /* REPLY */ | 996 | /* REPLY */ |
988 | if (svsk->sk_bc_xprt) | 997 | struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt; |
989 | req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid); | 998 | |
999 | if (bc_xprt) | ||
1000 | req = xprt_lookup_rqst(bc_xprt, xid); | ||
990 | 1001 | ||
991 | if (!req) { | 1002 | if (!req) { |
992 | printk(KERN_NOTICE | 1003 | printk(KERN_NOTICE |
993 | "%s: Got unrecognized reply: " | 1004 | "%s: Got unrecognized reply: " |
994 | "calldir 0x%x sk_bc_xprt %p xid %08x\n", | 1005 | "calldir 0x%x xpt_bc_xprt %p xid %08x\n", |
995 | __func__, ntohl(calldir), | 1006 | __func__, ntohl(calldir), |
996 | svsk->sk_bc_xprt, xid); | 1007 | bc_xprt, xid); |
997 | vec[0] = rqstp->rq_arg.head[0]; | 1008 | vec[0] = rqstp->rq_arg.head[0]; |
998 | goto out; | 1009 | goto out; |
999 | } | 1010 | } |
@@ -1184,6 +1195,57 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, | |||
1184 | return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); | 1195 | return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); |
1185 | } | 1196 | } |
1186 | 1197 | ||
1198 | #if defined(CONFIG_NFS_V4_1) | ||
1199 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, | ||
1200 | struct net *, struct sockaddr *, | ||
1201 | int, int); | ||
1202 | static void svc_bc_sock_free(struct svc_xprt *xprt); | ||
1203 | |||
1204 | static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv, | ||
1205 | struct net *net, | ||
1206 | struct sockaddr *sa, int salen, | ||
1207 | int flags) | ||
1208 | { | ||
1209 | return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); | ||
1210 | } | ||
1211 | |||
1212 | static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt) | ||
1213 | { | ||
1214 | } | ||
1215 | |||
1216 | static struct svc_xprt_ops svc_tcp_bc_ops = { | ||
1217 | .xpo_create = svc_bc_tcp_create, | ||
1218 | .xpo_detach = svc_bc_tcp_sock_detach, | ||
1219 | .xpo_free = svc_bc_sock_free, | ||
1220 | .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, | ||
1221 | }; | ||
1222 | |||
1223 | static struct svc_xprt_class svc_tcp_bc_class = { | ||
1224 | .xcl_name = "tcp-bc", | ||
1225 | .xcl_owner = THIS_MODULE, | ||
1226 | .xcl_ops = &svc_tcp_bc_ops, | ||
1227 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, | ||
1228 | }; | ||
1229 | |||
1230 | static void svc_init_bc_xprt_sock(void) | ||
1231 | { | ||
1232 | svc_reg_xprt_class(&svc_tcp_bc_class); | ||
1233 | } | ||
1234 | |||
1235 | static void svc_cleanup_bc_xprt_sock(void) | ||
1236 | { | ||
1237 | svc_unreg_xprt_class(&svc_tcp_bc_class); | ||
1238 | } | ||
1239 | #else /* CONFIG_NFS_V4_1 */ | ||
1240 | static void svc_init_bc_xprt_sock(void) | ||
1241 | { | ||
1242 | } | ||
1243 | |||
1244 | static void svc_cleanup_bc_xprt_sock(void) | ||
1245 | { | ||
1246 | } | ||
1247 | #endif /* CONFIG_NFS_V4_1 */ | ||
1248 | |||
1187 | static struct svc_xprt_ops svc_tcp_ops = { | 1249 | static struct svc_xprt_ops svc_tcp_ops = { |
1188 | .xpo_create = svc_tcp_create, | 1250 | .xpo_create = svc_tcp_create, |
1189 | .xpo_recvfrom = svc_tcp_recvfrom, | 1251 | .xpo_recvfrom = svc_tcp_recvfrom, |
@@ -1207,12 +1269,14 @@ void svc_init_xprt_sock(void) | |||
1207 | { | 1269 | { |
1208 | svc_reg_xprt_class(&svc_tcp_class); | 1270 | svc_reg_xprt_class(&svc_tcp_class); |
1209 | svc_reg_xprt_class(&svc_udp_class); | 1271 | svc_reg_xprt_class(&svc_udp_class); |
1272 | svc_init_bc_xprt_sock(); | ||
1210 | } | 1273 | } |
1211 | 1274 | ||
1212 | void svc_cleanup_xprt_sock(void) | 1275 | void svc_cleanup_xprt_sock(void) |
1213 | { | 1276 | { |
1214 | svc_unreg_xprt_class(&svc_tcp_class); | 1277 | svc_unreg_xprt_class(&svc_tcp_class); |
1215 | svc_unreg_xprt_class(&svc_udp_class); | 1278 | svc_unreg_xprt_class(&svc_udp_class); |
1279 | svc_cleanup_bc_xprt_sock(); | ||
1216 | } | 1280 | } |
1217 | 1281 | ||
1218 | static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) | 1282 | static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) |
@@ -1509,41 +1573,45 @@ static void svc_sock_free(struct svc_xprt *xprt) | |||
1509 | kfree(svsk); | 1573 | kfree(svsk); |
1510 | } | 1574 | } |
1511 | 1575 | ||
1576 | #if defined(CONFIG_NFS_V4_1) | ||
1512 | /* | 1577 | /* |
1513 | * Create a svc_xprt. | 1578 | * Create a back channel svc_xprt which shares the fore channel socket. |
1514 | * | ||
1515 | * For internal use only (e.g. nfsv4.1 backchannel). | ||
1516 | * Callers should typically use the xpo_create() method. | ||
1517 | */ | 1579 | */ |
1518 | struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot) | 1580 | static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv, |
1581 | int protocol, | ||
1582 | struct net *net, | ||
1583 | struct sockaddr *sin, int len, | ||
1584 | int flags) | ||
1519 | { | 1585 | { |
1520 | struct svc_sock *svsk; | 1586 | struct svc_sock *svsk; |
1521 | struct svc_xprt *xprt = NULL; | 1587 | struct svc_xprt *xprt; |
1588 | |||
1589 | if (protocol != IPPROTO_TCP) { | ||
1590 | printk(KERN_WARNING "svc: only TCP sockets" | ||
1591 | " supported on shared back channel\n"); | ||
1592 | return ERR_PTR(-EINVAL); | ||
1593 | } | ||
1522 | 1594 | ||
1523 | dprintk("svc: %s\n", __func__); | ||
1524 | svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); | 1595 | svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); |
1525 | if (!svsk) | 1596 | if (!svsk) |
1526 | goto out; | 1597 | return ERR_PTR(-ENOMEM); |
1527 | 1598 | ||
1528 | xprt = &svsk->sk_xprt; | 1599 | xprt = &svsk->sk_xprt; |
1529 | if (prot == IPPROTO_TCP) | 1600 | svc_xprt_init(&svc_tcp_bc_class, xprt, serv); |
1530 | svc_xprt_init(&svc_tcp_class, xprt, serv); | 1601 | |
1531 | else if (prot == IPPROTO_UDP) | 1602 | serv->sv_bc_xprt = xprt; |
1532 | svc_xprt_init(&svc_udp_class, xprt, serv); | 1603 | |
1533 | else | ||
1534 | BUG(); | ||
1535 | out: | ||
1536 | dprintk("svc: %s return %p\n", __func__, xprt); | ||
1537 | return xprt; | 1604 | return xprt; |
1538 | } | 1605 | } |
1539 | EXPORT_SYMBOL_GPL(svc_sock_create); | ||
1540 | 1606 | ||
1541 | /* | 1607 | /* |
1542 | * Destroy a svc_sock. | 1608 | * Free a back channel svc_sock. |
1543 | */ | 1609 | */ |
1544 | void svc_sock_destroy(struct svc_xprt *xprt) | 1610 | static void svc_bc_sock_free(struct svc_xprt *xprt) |
1545 | { | 1611 | { |
1546 | if (xprt) | 1612 | if (xprt) { |
1613 | kfree(xprt->xpt_bc_sid); | ||
1547 | kfree(container_of(xprt, struct svc_sock, sk_xprt)); | 1614 | kfree(container_of(xprt, struct svc_sock, sk_xprt)); |
1615 | } | ||
1548 | } | 1616 | } |
1549 | EXPORT_SYMBOL_GPL(svc_sock_destroy); | 1617 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index cd9e841e7492..679cd674b81d 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -552,6 +552,74 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b | |||
552 | } | 552 | } |
553 | EXPORT_SYMBOL_GPL(xdr_write_pages); | 553 | EXPORT_SYMBOL_GPL(xdr_write_pages); |
554 | 554 | ||
555 | static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, | ||
556 | __be32 *p, unsigned int len) | ||
557 | { | ||
558 | if (len > iov->iov_len) | ||
559 | len = iov->iov_len; | ||
560 | if (p == NULL) | ||
561 | p = (__be32*)iov->iov_base; | ||
562 | xdr->p = p; | ||
563 | xdr->end = (__be32*)(iov->iov_base + len); | ||
564 | xdr->iov = iov; | ||
565 | xdr->page_ptr = NULL; | ||
566 | } | ||
567 | |||
568 | static int xdr_set_page_base(struct xdr_stream *xdr, | ||
569 | unsigned int base, unsigned int len) | ||
570 | { | ||
571 | unsigned int pgnr; | ||
572 | unsigned int maxlen; | ||
573 | unsigned int pgoff; | ||
574 | unsigned int pgend; | ||
575 | void *kaddr; | ||
576 | |||
577 | maxlen = xdr->buf->page_len; | ||
578 | if (base >= maxlen) | ||
579 | return -EINVAL; | ||
580 | maxlen -= base; | ||
581 | if (len > maxlen) | ||
582 | len = maxlen; | ||
583 | |||
584 | base += xdr->buf->page_base; | ||
585 | |||
586 | pgnr = base >> PAGE_SHIFT; | ||
587 | xdr->page_ptr = &xdr->buf->pages[pgnr]; | ||
588 | kaddr = page_address(*xdr->page_ptr); | ||
589 | |||
590 | pgoff = base & ~PAGE_MASK; | ||
591 | xdr->p = (__be32*)(kaddr + pgoff); | ||
592 | |||
593 | pgend = pgoff + len; | ||
594 | if (pgend > PAGE_SIZE) | ||
595 | pgend = PAGE_SIZE; | ||
596 | xdr->end = (__be32*)(kaddr + pgend); | ||
597 | xdr->iov = NULL; | ||
598 | return 0; | ||
599 | } | ||
600 | |||
601 | static void xdr_set_next_page(struct xdr_stream *xdr) | ||
602 | { | ||
603 | unsigned int newbase; | ||
604 | |||
605 | newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; | ||
606 | newbase -= xdr->buf->page_base; | ||
607 | |||
608 | if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0) | ||
609 | xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len); | ||
610 | } | ||
611 | |||
612 | static bool xdr_set_next_buffer(struct xdr_stream *xdr) | ||
613 | { | ||
614 | if (xdr->page_ptr != NULL) | ||
615 | xdr_set_next_page(xdr); | ||
616 | else if (xdr->iov == xdr->buf->head) { | ||
617 | if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0) | ||
618 | xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len); | ||
619 | } | ||
620 | return xdr->p != xdr->end; | ||
621 | } | ||
622 | |||
555 | /** | 623 | /** |
556 | * xdr_init_decode - Initialize an xdr_stream for decoding data. | 624 | * xdr_init_decode - Initialize an xdr_stream for decoding data. |
557 | * @xdr: pointer to xdr_stream struct | 625 | * @xdr: pointer to xdr_stream struct |
@@ -560,41 +628,67 @@ EXPORT_SYMBOL_GPL(xdr_write_pages); | |||
560 | */ | 628 | */ |
561 | void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) | 629 | void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) |
562 | { | 630 | { |
563 | struct kvec *iov = buf->head; | ||
564 | unsigned int len = iov->iov_len; | ||
565 | |||
566 | if (len > buf->len) | ||
567 | len = buf->len; | ||
568 | xdr->buf = buf; | 631 | xdr->buf = buf; |
569 | xdr->iov = iov; | 632 | xdr->scratch.iov_base = NULL; |
570 | xdr->p = p; | 633 | xdr->scratch.iov_len = 0; |
571 | xdr->end = (__be32 *)((char *)iov->iov_base + len); | 634 | if (buf->head[0].iov_len != 0) |
635 | xdr_set_iov(xdr, buf->head, p, buf->len); | ||
636 | else if (buf->page_len != 0) | ||
637 | xdr_set_page_base(xdr, 0, buf->len); | ||
572 | } | 638 | } |
573 | EXPORT_SYMBOL_GPL(xdr_init_decode); | 639 | EXPORT_SYMBOL_GPL(xdr_init_decode); |
574 | 640 | ||
575 | /** | 641 | static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) |
576 | * xdr_inline_peek - Allow read-ahead in the XDR data stream | ||
577 | * @xdr: pointer to xdr_stream struct | ||
578 | * @nbytes: number of bytes of data to decode | ||
579 | * | ||
580 | * Check if the input buffer is long enough to enable us to decode | ||
581 | * 'nbytes' more bytes of data starting at the current position. | ||
582 | * If so return the current pointer without updating the current | ||
583 | * pointer position. | ||
584 | */ | ||
585 | __be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes) | ||
586 | { | 642 | { |
587 | __be32 *p = xdr->p; | 643 | __be32 *p = xdr->p; |
588 | __be32 *q = p + XDR_QUADLEN(nbytes); | 644 | __be32 *q = p + XDR_QUADLEN(nbytes); |
589 | 645 | ||
590 | if (unlikely(q > xdr->end || q < p)) | 646 | if (unlikely(q > xdr->end || q < p)) |
591 | return NULL; | 647 | return NULL; |
648 | xdr->p = q; | ||
592 | return p; | 649 | return p; |
593 | } | 650 | } |
594 | EXPORT_SYMBOL_GPL(xdr_inline_peek); | ||
595 | 651 | ||
596 | /** | 652 | /** |
597 | * xdr_inline_decode - Retrieve non-page XDR data to decode | 653 | * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. |
654 | * @xdr: pointer to xdr_stream struct | ||
655 | * @buf: pointer to an empty buffer | ||
656 | * @buflen: size of 'buf' | ||
657 | * | ||
658 | * The scratch buffer is used when decoding from an array of pages. | ||
659 | * If an xdr_inline_decode() call spans across page boundaries, then | ||
660 | * we copy the data into the scratch buffer in order to allow linear | ||
661 | * access. | ||
662 | */ | ||
663 | void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen) | ||
664 | { | ||
665 | xdr->scratch.iov_base = buf; | ||
666 | xdr->scratch.iov_len = buflen; | ||
667 | } | ||
668 | EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer); | ||
669 | |||
670 | static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) | ||
671 | { | ||
672 | __be32 *p; | ||
673 | void *cpdest = xdr->scratch.iov_base; | ||
674 | size_t cplen = (char *)xdr->end - (char *)xdr->p; | ||
675 | |||
676 | if (nbytes > xdr->scratch.iov_len) | ||
677 | return NULL; | ||
678 | memcpy(cpdest, xdr->p, cplen); | ||
679 | cpdest += cplen; | ||
680 | nbytes -= cplen; | ||
681 | if (!xdr_set_next_buffer(xdr)) | ||
682 | return NULL; | ||
683 | p = __xdr_inline_decode(xdr, nbytes); | ||
684 | if (p == NULL) | ||
685 | return NULL; | ||
686 | memcpy(cpdest, p, nbytes); | ||
687 | return xdr->scratch.iov_base; | ||
688 | } | ||
689 | |||
690 | /** | ||
691 | * xdr_inline_decode - Retrieve XDR data to decode | ||
598 | * @xdr: pointer to xdr_stream struct | 692 | * @xdr: pointer to xdr_stream struct |
599 | * @nbytes: number of bytes of data to decode | 693 | * @nbytes: number of bytes of data to decode |
600 | * | 694 | * |
@@ -605,13 +699,16 @@ EXPORT_SYMBOL_GPL(xdr_inline_peek); | |||
605 | */ | 699 | */ |
606 | __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) | 700 | __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) |
607 | { | 701 | { |
608 | __be32 *p = xdr->p; | 702 | __be32 *p; |
609 | __be32 *q = p + XDR_QUADLEN(nbytes); | ||
610 | 703 | ||
611 | if (unlikely(q > xdr->end || q < p)) | 704 | if (nbytes == 0) |
705 | return xdr->p; | ||
706 | if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) | ||
612 | return NULL; | 707 | return NULL; |
613 | xdr->p = q; | 708 | p = __xdr_inline_decode(xdr, nbytes); |
614 | return p; | 709 | if (p != NULL) |
710 | return p; | ||
711 | return xdr_copy_to_scratch(xdr, nbytes); | ||
615 | } | 712 | } |
616 | EXPORT_SYMBOL_GPL(xdr_inline_decode); | 713 | EXPORT_SYMBOL_GPL(xdr_inline_decode); |
617 | 714 | ||
@@ -671,16 +768,12 @@ EXPORT_SYMBOL_GPL(xdr_read_pages); | |||
671 | */ | 768 | */ |
672 | void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) | 769 | void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) |
673 | { | 770 | { |
674 | char * kaddr = page_address(xdr->buf->pages[0]); | ||
675 | xdr_read_pages(xdr, len); | 771 | xdr_read_pages(xdr, len); |
676 | /* | 772 | /* |
677 | * Position current pointer at beginning of tail, and | 773 | * Position current pointer at beginning of tail, and |
678 | * set remaining message length. | 774 | * set remaining message length. |
679 | */ | 775 | */ |
680 | if (len > PAGE_CACHE_SIZE - xdr->buf->page_base) | 776 | xdr_set_page_base(xdr, 0, len); |
681 | len = PAGE_CACHE_SIZE - xdr->buf->page_base; | ||
682 | xdr->p = (__be32 *)(kaddr + xdr->buf->page_base); | ||
683 | xdr->end = (__be32 *)((char *)xdr->p + len); | ||
684 | } | 777 | } |
685 | EXPORT_SYMBOL_GPL(xdr_enter_page); | 778 | EXPORT_SYMBOL_GPL(xdr_enter_page); |
686 | 779 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 4c8f18aff7c3..856274d7e85c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -965,6 +965,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req) | |||
965 | xprt = kzalloc(size, GFP_KERNEL); | 965 | xprt = kzalloc(size, GFP_KERNEL); |
966 | if (xprt == NULL) | 966 | if (xprt == NULL) |
967 | goto out; | 967 | goto out; |
968 | kref_init(&xprt->kref); | ||
968 | 969 | ||
969 | xprt->max_reqs = max_req; | 970 | xprt->max_reqs = max_req; |
970 | xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); | 971 | xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); |
@@ -1101,8 +1102,10 @@ found: | |||
1101 | -PTR_ERR(xprt)); | 1102 | -PTR_ERR(xprt)); |
1102 | return xprt; | 1103 | return xprt; |
1103 | } | 1104 | } |
1105 | if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state)) | ||
1106 | /* ->setup returned a pre-initialized xprt: */ | ||
1107 | return xprt; | ||
1104 | 1108 | ||
1105 | kref_init(&xprt->kref); | ||
1106 | spin_lock_init(&xprt->transport_lock); | 1109 | spin_lock_init(&xprt->transport_lock); |
1107 | spin_lock_init(&xprt->reserve_lock); | 1110 | spin_lock_init(&xprt->reserve_lock); |
1108 | 1111 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 96549df836ee..c431f5a57960 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2359,6 +2359,15 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2359 | struct svc_sock *bc_sock; | 2359 | struct svc_sock *bc_sock; |
2360 | struct rpc_xprt *ret; | 2360 | struct rpc_xprt *ret; |
2361 | 2361 | ||
2362 | if (args->bc_xprt->xpt_bc_xprt) { | ||
2363 | /* | ||
2364 | * This server connection already has a backchannel | ||
2365 | * export; we can't create a new one, as we wouldn't be | ||
2366 | * able to match replies based on xid any more. So, | ||
2367 | * reuse the already-existing one: | ||
2368 | */ | ||
2369 | return args->bc_xprt->xpt_bc_xprt; | ||
2370 | } | ||
2362 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2371 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); |
2363 | if (IS_ERR(xprt)) | 2372 | if (IS_ERR(xprt)) |
2364 | return xprt; | 2373 | return xprt; |
@@ -2375,16 +2384,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2375 | xprt->reestablish_timeout = 0; | 2384 | xprt->reestablish_timeout = 0; |
2376 | xprt->idle_timeout = 0; | 2385 | xprt->idle_timeout = 0; |
2377 | 2386 | ||
2378 | /* | ||
2379 | * The backchannel uses the same socket connection as the | ||
2380 | * forechannel | ||
2381 | */ | ||
2382 | xprt->bc_xprt = args->bc_xprt; | ||
2383 | bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); | ||
2384 | bc_sock->sk_bc_xprt = xprt; | ||
2385 | transport->sock = bc_sock->sk_sock; | ||
2386 | transport->inet = bc_sock->sk_sk; | ||
2387 | |||
2388 | xprt->ops = &bc_tcp_ops; | 2387 | xprt->ops = &bc_tcp_ops; |
2389 | 2388 | ||
2390 | switch (addr->sa_family) { | 2389 | switch (addr->sa_family) { |
@@ -2407,6 +2406,20 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2407 | xprt->address_strings[RPC_DISPLAY_PROTO]); | 2406 | xprt->address_strings[RPC_DISPLAY_PROTO]); |
2408 | 2407 | ||
2409 | /* | 2408 | /* |
2409 | * Once we've associated a backchannel xprt with a connection, | ||
2410 | * we want to keep it around as long as long as the connection | ||
2411 | * lasts, in case we need to start using it for a backchannel | ||
2412 | * again; this reference won't be dropped until bc_xprt is | ||
2413 | * destroyed. | ||
2414 | */ | ||
2415 | xprt_get(xprt); | ||
2416 | args->bc_xprt->xpt_bc_xprt = xprt; | ||
2417 | xprt->bc_xprt = args->bc_xprt; | ||
2418 | bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); | ||
2419 | transport->sock = bc_sock->sk_sock; | ||
2420 | transport->inet = bc_sock->sk_sk; | ||
2421 | |||
2422 | /* | ||
2410 | * Since we don't want connections for the backchannel, we set | 2423 | * Since we don't want connections for the backchannel, we set |
2411 | * the xprt status to connected | 2424 | * the xprt status to connected |
2412 | */ | 2425 | */ |
@@ -2415,6 +2428,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2415 | 2428 | ||
2416 | if (try_module_get(THIS_MODULE)) | 2429 | if (try_module_get(THIS_MODULE)) |
2417 | return xprt; | 2430 | return xprt; |
2431 | xprt_put(xprt); | ||
2418 | ret = ERR_PTR(-EINVAL); | 2432 | ret = ERR_PTR(-EINVAL); |
2419 | out_err: | 2433 | out_err: |
2420 | xprt_free(xprt); | 2434 | xprt_free(xprt); |
diff --git a/net/wireless/core.c b/net/wireless/core.c index e9a5f8ca4c27..fe01de29bfe8 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -718,13 +718,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
718 | wdev->ps = false; | 718 | wdev->ps = false; |
719 | /* allow mac80211 to determine the timeout */ | 719 | /* allow mac80211 to determine the timeout */ |
720 | wdev->ps_timeout = -1; | 720 | wdev->ps_timeout = -1; |
721 | if (rdev->ops->set_power_mgmt) | ||
722 | if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, | ||
723 | wdev->ps, | ||
724 | wdev->ps_timeout)) { | ||
725 | /* assume this means it's off */ | ||
726 | wdev->ps = false; | ||
727 | } | ||
728 | 721 | ||
729 | if (!dev->ethtool_ops) | 722 | if (!dev->ethtool_ops) |
730 | dev->ethtool_ops = &cfg80211_ethtool_ops; | 723 | dev->ethtool_ops = &cfg80211_ethtool_ops; |
@@ -813,6 +806,19 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
813 | rdev->opencount++; | 806 | rdev->opencount++; |
814 | mutex_unlock(&rdev->devlist_mtx); | 807 | mutex_unlock(&rdev->devlist_mtx); |
815 | cfg80211_unlock_rdev(rdev); | 808 | cfg80211_unlock_rdev(rdev); |
809 | |||
810 | /* | ||
811 | * Configure power management to the driver here so that its | ||
812 | * correctly set also after interface type changes etc. | ||
813 | */ | ||
814 | if (wdev->iftype == NL80211_IFTYPE_STATION && | ||
815 | rdev->ops->set_power_mgmt) | ||
816 | if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, | ||
817 | wdev->ps, | ||
818 | wdev->ps_timeout)) { | ||
819 | /* assume this means it's off */ | ||
820 | wdev->ps = false; | ||
821 | } | ||
816 | break; | 822 | break; |
817 | case NETDEV_UNREGISTER: | 823 | case NETDEV_UNREGISTER: |
818 | /* | 824 | /* |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 9b62710891a2..864ddfbeff2f 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -2718,7 +2718,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, | |||
2718 | hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, | 2718 | hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, |
2719 | NL80211_CMD_GET_MESH_CONFIG); | 2719 | NL80211_CMD_GET_MESH_CONFIG); |
2720 | if (!hdr) | 2720 | if (!hdr) |
2721 | goto nla_put_failure; | 2721 | goto out; |
2722 | pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG); | 2722 | pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG); |
2723 | if (!pinfoattr) | 2723 | if (!pinfoattr) |
2724 | goto nla_put_failure; | 2724 | goto nla_put_failure; |
@@ -2759,6 +2759,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, | |||
2759 | 2759 | ||
2760 | nla_put_failure: | 2760 | nla_put_failure: |
2761 | genlmsg_cancel(msg, hdr); | 2761 | genlmsg_cancel(msg, hdr); |
2762 | out: | ||
2762 | nlmsg_free(msg); | 2763 | nlmsg_free(msg); |
2763 | return -ENOBUFS; | 2764 | return -ENOBUFS; |
2764 | } | 2765 | } |
@@ -2954,7 +2955,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) | |||
2954 | hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, | 2955 | hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, |
2955 | NL80211_CMD_GET_REG); | 2956 | NL80211_CMD_GET_REG); |
2956 | if (!hdr) | 2957 | if (!hdr) |
2957 | goto nla_put_failure; | 2958 | goto put_failure; |
2958 | 2959 | ||
2959 | NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, | 2960 | NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, |
2960 | cfg80211_regdomain->alpha2); | 2961 | cfg80211_regdomain->alpha2); |
@@ -3001,6 +3002,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) | |||
3001 | 3002 | ||
3002 | nla_put_failure: | 3003 | nla_put_failure: |
3003 | genlmsg_cancel(msg, hdr); | 3004 | genlmsg_cancel(msg, hdr); |
3005 | put_failure: | ||
3004 | nlmsg_free(msg); | 3006 | nlmsg_free(msg); |
3005 | err = -EMSGSIZE; | 3007 | err = -EMSGSIZE; |
3006 | out: | 3008 | out: |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 37693b6ef23a..c565689f0b9f 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -1801,9 +1801,9 @@ void regulatory_hint_disconnect(void) | |||
1801 | 1801 | ||
1802 | static bool freq_is_chan_12_13_14(u16 freq) | 1802 | static bool freq_is_chan_12_13_14(u16 freq) |
1803 | { | 1803 | { |
1804 | if (freq == ieee80211_channel_to_frequency(12) || | 1804 | if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) || |
1805 | freq == ieee80211_channel_to_frequency(13) || | 1805 | freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) || |
1806 | freq == ieee80211_channel_to_frequency(14)) | 1806 | freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ)) |
1807 | return true; | 1807 | return true; |
1808 | return false; | 1808 | return false; |
1809 | } | 1809 | } |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 7620ae2fcf18..6a750bc6bcfe 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -29,29 +29,37 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband, | |||
29 | } | 29 | } |
30 | EXPORT_SYMBOL(ieee80211_get_response_rate); | 30 | EXPORT_SYMBOL(ieee80211_get_response_rate); |
31 | 31 | ||
32 | int ieee80211_channel_to_frequency(int chan) | 32 | int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band) |
33 | { | 33 | { |
34 | if (chan < 14) | 34 | /* see 802.11 17.3.8.3.2 and Annex J |
35 | return 2407 + chan * 5; | 35 | * there are overlapping channel numbers in 5GHz and 2GHz bands */ |
36 | 36 | if (band == IEEE80211_BAND_5GHZ) { | |
37 | if (chan == 14) | 37 | if (chan >= 182 && chan <= 196) |
38 | return 2484; | 38 | return 4000 + chan * 5; |
39 | 39 | else | |
40 | /* FIXME: 802.11j 17.3.8.3.2 */ | 40 | return 5000 + chan * 5; |
41 | return (chan + 1000) * 5; | 41 | } else { /* IEEE80211_BAND_2GHZ */ |
42 | if (chan == 14) | ||
43 | return 2484; | ||
44 | else if (chan < 14) | ||
45 | return 2407 + chan * 5; | ||
46 | else | ||
47 | return 0; /* not supported */ | ||
48 | } | ||
42 | } | 49 | } |
43 | EXPORT_SYMBOL(ieee80211_channel_to_frequency); | 50 | EXPORT_SYMBOL(ieee80211_channel_to_frequency); |
44 | 51 | ||
45 | int ieee80211_frequency_to_channel(int freq) | 52 | int ieee80211_frequency_to_channel(int freq) |
46 | { | 53 | { |
54 | /* see 802.11 17.3.8.3.2 and Annex J */ | ||
47 | if (freq == 2484) | 55 | if (freq == 2484) |
48 | return 14; | 56 | return 14; |
49 | 57 | else if (freq < 2484) | |
50 | if (freq < 2484) | ||
51 | return (freq - 2407) / 5; | 58 | return (freq - 2407) / 5; |
52 | 59 | else if (freq >= 4910 && freq <= 4980) | |
53 | /* FIXME: 802.11j 17.3.8.3.2 */ | 60 | return (freq - 4000) / 5; |
54 | return freq/5 - 1000; | 61 | else |
62 | return (freq - 5000) / 5; | ||
55 | } | 63 | } |
56 | EXPORT_SYMBOL(ieee80211_frequency_to_channel); | 64 | EXPORT_SYMBOL(ieee80211_frequency_to_channel); |
57 | 65 | ||
@@ -159,12 +167,15 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, | |||
159 | 167 | ||
160 | /* | 168 | /* |
161 | * Disallow pairwise keys with non-zero index unless it's WEP | 169 | * Disallow pairwise keys with non-zero index unless it's WEP |
162 | * (because current deployments use pairwise WEP keys with | 170 | * or a vendor specific cipher (because current deployments use |
163 | * non-zero indizes but 802.11i clearly specifies to use zero) | 171 | * pairwise WEP keys with non-zero indices and for vendor specific |
172 | * ciphers this should be validated in the driver or hardware level | ||
173 | * - but 802.11i clearly specifies to use zero) | ||
164 | */ | 174 | */ |
165 | if (pairwise && key_idx && | 175 | if (pairwise && key_idx && |
166 | params->cipher != WLAN_CIPHER_SUITE_WEP40 && | 176 | ((params->cipher == WLAN_CIPHER_SUITE_TKIP) || |
167 | params->cipher != WLAN_CIPHER_SUITE_WEP104) | 177 | (params->cipher == WLAN_CIPHER_SUITE_CCMP) || |
178 | (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC))) | ||
168 | return -EINVAL; | 179 | return -EINVAL; |
169 | 180 | ||
170 | switch (params->cipher) { | 181 | switch (params->cipher) { |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 3e5dbd4e4cd5..7f1f4ec49041 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -267,9 +267,12 @@ int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq) | |||
267 | * -EINVAL for impossible things. | 267 | * -EINVAL for impossible things. |
268 | */ | 268 | */ |
269 | if (freq->e == 0) { | 269 | if (freq->e == 0) { |
270 | enum ieee80211_band band = IEEE80211_BAND_2GHZ; | ||
270 | if (freq->m < 0) | 271 | if (freq->m < 0) |
271 | return 0; | 272 | return 0; |
272 | return ieee80211_channel_to_frequency(freq->m); | 273 | if (freq->m > 14) |
274 | band = IEEE80211_BAND_5GHZ; | ||
275 | return ieee80211_channel_to_frequency(freq->m, band); | ||
273 | } else { | 276 | } else { |
274 | int i, div = 1000000; | 277 | int i, div = 1000000; |
275 | for (i = 0; i < freq->e; i++) | 278 | for (i = 0; i < freq->e; i++) |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 6a8da81ff66f..d5e1e0b08890 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <net/sock.h> | 26 | #include <net/sock.h> |
27 | #include <net/xfrm.h> | 27 | #include <net/xfrm.h> |
28 | #include <net/netlink.h> | 28 | #include <net/netlink.h> |
29 | #include <net/ah.h> | ||
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 31 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
31 | #include <linux/in6.h> | 32 | #include <linux/in6.h> |
@@ -302,7 +303,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, | |||
302 | algo = xfrm_aalg_get_byname(ualg->alg_name, 1); | 303 | algo = xfrm_aalg_get_byname(ualg->alg_name, 1); |
303 | if (!algo) | 304 | if (!algo) |
304 | return -ENOSYS; | 305 | return -ENOSYS; |
305 | if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) | 306 | if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || |
307 | ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) | ||
306 | return -EINVAL; | 308 | return -EINVAL; |
307 | *props = algo->desc.sadb_alg_id; | 309 | *props = algo->desc.sadb_alg_id; |
308 | 310 | ||