diff options
Diffstat (limited to 'net')
160 files changed, 2948 insertions, 1952 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 3fe4fc86055f..1037748c14db 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
| @@ -747,6 +747,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
| 747 | vlan_dev_set_ingress_priority(dev, | 747 | vlan_dev_set_ingress_priority(dev, |
| 748 | args.u.skb_priority, | 748 | args.u.skb_priority, |
| 749 | args.vlan_qos); | 749 | args.vlan_qos); |
| 750 | err = 0; | ||
| 750 | break; | 751 | break; |
| 751 | 752 | ||
| 752 | case SET_VLAN_EGRESS_PRIORITY_CMD: | 753 | case SET_VLAN_EGRESS_PRIORITY_CMD: |
diff --git a/net/9p/Kconfig b/net/9p/Kconfig index 71bc110aebf8..bafc50c9e6ff 100644 --- a/net/9p/Kconfig +++ b/net/9p/Kconfig | |||
| @@ -23,6 +23,13 @@ config NET_9P_FD | |||
| 23 | file descriptors. TCP/IP is the default transport for 9p, | 23 | file descriptors. TCP/IP is the default transport for 9p, |
| 24 | so if you are going to use 9p, you'll likely want this. | 24 | so if you are going to use 9p, you'll likely want this. |
| 25 | 25 | ||
| 26 | config NET_9P_VIRTIO | ||
| 27 | depends on NET_9P && EXPERIMENTAL && VIRTIO | ||
| 28 | tristate "9P Virtio Transport (Experimental)" | ||
| 29 | help | ||
| 30 | This builds support for a transports between | ||
| 31 | guest partitions and a host partition. | ||
| 32 | |||
| 26 | config NET_9P_DEBUG | 33 | config NET_9P_DEBUG |
| 27 | bool "Debug information" | 34 | bool "Debug information" |
| 28 | depends on NET_9P | 35 | depends on NET_9P |
diff --git a/net/9p/Makefile b/net/9p/Makefile index 5059bc06f8f3..d3abb246ccab 100644 --- a/net/9p/Makefile +++ b/net/9p/Makefile | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | obj-$(CONFIG_NET_9P) := 9pnet.o | 1 | obj-$(CONFIG_NET_9P) := 9pnet.o |
| 2 | obj-$(CONFIG_NET_9P_FD) += 9pnet_fd.o | 2 | obj-$(CONFIG_NET_9P_FD) += 9pnet_fd.o |
| 3 | obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o | ||
| 3 | 4 | ||
| 4 | 9pnet-objs := \ | 5 | 9pnet-objs := \ |
| 5 | mod.o \ | 6 | mod.o \ |
| @@ -12,3 +13,6 @@ obj-$(CONFIG_NET_9P_FD) += 9pnet_fd.o | |||
| 12 | 13 | ||
| 13 | 9pnet_fd-objs := \ | 14 | 9pnet_fd-objs := \ |
| 14 | trans_fd.o \ | 15 | trans_fd.o \ |
| 16 | |||
| 17 | 9pnet_virtio-objs := \ | ||
| 18 | trans_virtio.o \ | ||
diff --git a/net/9p/mod.c b/net/9p/mod.c index 41d70f47375d..8f9763a9dc12 100644 --- a/net/9p/mod.c +++ b/net/9p/mod.c | |||
| @@ -76,9 +76,9 @@ struct p9_trans_module *v9fs_match_trans(const substring_t *name) | |||
| 76 | list_for_each(p, &v9fs_trans_list) { | 76 | list_for_each(p, &v9fs_trans_list) { |
| 77 | t = list_entry(p, struct p9_trans_module, list); | 77 | t = list_entry(p, struct p9_trans_module, list); |
| 78 | if (strncmp(t->name, name->from, name->to-name->from) == 0) | 78 | if (strncmp(t->name, name->from, name->to-name->from) == 0) |
| 79 | break; | 79 | return t; |
| 80 | } | 80 | } |
| 81 | return t; | 81 | return NULL; |
| 82 | } | 82 | } |
| 83 | EXPORT_SYMBOL(v9fs_match_trans); | 83 | EXPORT_SYMBOL(v9fs_match_trans); |
| 84 | 84 | ||
diff --git a/net/9p/mux.c b/net/9p/mux.c index f14014793bed..c9f0805048e4 100644 --- a/net/9p/mux.c +++ b/net/9p/mux.c | |||
| @@ -222,8 +222,10 @@ static int p9_mux_poll_start(struct p9_conn *m) | |||
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { | 224 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { |
| 225 | if (vptlast == NULL) | 225 | if (vptlast == NULL) { |
| 226 | mutex_unlock(&p9_mux_task_lock); | ||
| 226 | return -ENOMEM; | 227 | return -ENOMEM; |
| 228 | } | ||
| 227 | 229 | ||
| 228 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); | 230 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); |
| 229 | list_add(&m->mux_list, &vptlast->mux_list); | 231 | list_add(&m->mux_list, &vptlast->mux_list); |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 30269a4ff22a..62332ed9da4a 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
| @@ -62,13 +62,14 @@ struct p9_trans_fd { | |||
| 62 | 62 | ||
| 63 | enum { | 63 | enum { |
| 64 | /* Options that take integer arguments */ | 64 | /* Options that take integer arguments */ |
| 65 | Opt_port, Opt_rfdno, Opt_wfdno, | 65 | Opt_port, Opt_rfdno, Opt_wfdno, Opt_err, |
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | static match_table_t tokens = { | 68 | static match_table_t tokens = { |
| 69 | {Opt_port, "port=%u"}, | 69 | {Opt_port, "port=%u"}, |
| 70 | {Opt_rfdno, "rfdno=%u"}, | 70 | {Opt_rfdno, "rfdno=%u"}, |
| 71 | {Opt_wfdno, "wfdno=%u"}, | 71 | {Opt_wfdno, "wfdno=%u"}, |
| 72 | {Opt_err, NULL}, | ||
| 72 | }; | 73 | }; |
| 73 | 74 | ||
| 74 | /** | 75 | /** |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c new file mode 100644 index 000000000000..40b71a29fc3f --- /dev/null +++ b/net/9p/trans_virtio.c | |||
| @@ -0,0 +1,353 @@ | |||
| 1 | /* | ||
| 2 | * The Guest 9p transport driver | ||
| 3 | * | ||
| 4 | * This is a trivial pipe-based transport driver based on the lguest console | ||
| 5 | * code: we use lguest's DMA mechanism to send bytes out, and register a | ||
| 6 | * DMA buffer to receive bytes in. It is assumed to be present and available | ||
| 7 | * from the very beginning of boot. | ||
| 8 | * | ||
| 9 | * This may be have been done by just instaniating another HVC console, | ||
| 10 | * but HVC's blocksize of 16 bytes is annoying and painful to performance. | ||
| 11 | * | ||
| 12 | * A more efficient transport could be built based on the virtio block driver | ||
| 13 | * but it requires some changes in the 9p transport model (which are in | ||
| 14 | * progress) | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | /* | ||
| 18 | * Copyright (C) 2007 Eric Van Hensbergen, IBM Corporation | ||
| 19 | * | ||
| 20 | * Based on virtio console driver | ||
| 21 | * Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation | ||
| 22 | * | ||
| 23 | * This program is free software; you can redistribute it and/or modify | ||
| 24 | * it under the terms of the GNU General Public License version 2 | ||
| 25 | * as published by the Free Software Foundation. | ||
| 26 | * | ||
| 27 | * This program is distributed in the hope that it will be useful, | ||
| 28 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 29 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 30 | * GNU General Public License for more details. | ||
| 31 | * | ||
| 32 | * You should have received a copy of the GNU General Public License | ||
| 33 | * along with this program; if not, write to: | ||
| 34 | * Free Software Foundation | ||
| 35 | * 51 Franklin Street, Fifth Floor | ||
| 36 | * Boston, MA 02111-1301 USA | ||
| 37 | * | ||
| 38 | */ | ||
| 39 | |||
| 40 | #include <linux/in.h> | ||
| 41 | #include <linux/module.h> | ||
| 42 | #include <linux/net.h> | ||
| 43 | #include <linux/ipv6.h> | ||
| 44 | #include <linux/errno.h> | ||
| 45 | #include <linux/kernel.h> | ||
| 46 | #include <linux/un.h> | ||
| 47 | #include <linux/uaccess.h> | ||
| 48 | #include <linux/inet.h> | ||
| 49 | #include <linux/idr.h> | ||
| 50 | #include <linux/file.h> | ||
| 51 | #include <net/9p/9p.h> | ||
| 52 | #include <linux/parser.h> | ||
| 53 | #include <net/9p/transport.h> | ||
| 54 | #include <linux/scatterlist.h> | ||
| 55 | #include <linux/virtio.h> | ||
| 56 | #include <linux/virtio_9p.h> | ||
| 57 | |||
| 58 | /* a single mutex to manage channel initialization and attachment */ | ||
| 59 | static DECLARE_MUTEX(virtio_9p_lock); | ||
| 60 | /* global which tracks highest initialized channel */ | ||
| 61 | static int chan_index; | ||
| 62 | |||
| 63 | /* We keep all per-channel information in a structure. | ||
| 64 | * This structure is allocated within the devices dev->mem space. | ||
| 65 | * A pointer to the structure will get put in the transport private. | ||
| 66 | */ | ||
| 67 | static struct virtio_chan { | ||
| 68 | bool initialized; /* channel is initialized */ | ||
| 69 | bool inuse; /* channel is in use */ | ||
| 70 | |||
| 71 | struct virtqueue *in_vq, *out_vq; | ||
| 72 | struct virtio_device *vdev; | ||
| 73 | |||
| 74 | /* This is our input buffer, and how much data is left in it. */ | ||
| 75 | unsigned int in_len; | ||
| 76 | char *in, *inbuf; | ||
| 77 | |||
| 78 | wait_queue_head_t wq; /* waitq for buffer */ | ||
| 79 | } channels[MAX_9P_CHAN]; | ||
| 80 | |||
| 81 | /* How many bytes left in this page. */ | ||
| 82 | static unsigned int rest_of_page(void *data) | ||
| 83 | { | ||
| 84 | return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE); | ||
| 85 | } | ||
| 86 | |||
| 87 | static int p9_virtio_write(struct p9_trans *trans, void *buf, int count) | ||
| 88 | { | ||
| 89 | struct virtio_chan *chan = (struct virtio_chan *) trans->priv; | ||
| 90 | struct virtqueue *out_vq = chan->out_vq; | ||
| 91 | struct scatterlist sg[1]; | ||
| 92 | unsigned int len; | ||
| 93 | |||
| 94 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio write (%d)\n", count); | ||
| 95 | |||
| 96 | /* keep it simple - make sure we don't overflow a page */ | ||
| 97 | if (rest_of_page(buf) < count) | ||
| 98 | count = rest_of_page(buf); | ||
| 99 | |||
| 100 | sg_init_one(sg, buf, count); | ||
| 101 | |||
| 102 | /* add_buf wants a token to identify this buffer: we hand it any | ||
| 103 | * non-NULL pointer, since there's only ever one buffer. */ | ||
| 104 | if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) == 0) { | ||
| 105 | /* Tell Host to go! */ | ||
| 106 | out_vq->vq_ops->kick(out_vq); | ||
| 107 | /* Chill out until it's done with the buffer. */ | ||
| 108 | while (!out_vq->vq_ops->get_buf(out_vq, &len)) | ||
| 109 | cpu_relax(); | ||
| 110 | } | ||
| 111 | |||
| 112 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio wrote (%d)\n", count); | ||
| 113 | |||
| 114 | /* We're expected to return the amount of data we wrote: all of it. */ | ||
| 115 | return count; | ||
| 116 | } | ||
| 117 | |||
| 118 | /* Create a scatter-gather list representing our input buffer and put it in the | ||
| 119 | * queue. */ | ||
| 120 | static void add_inbuf(struct virtio_chan *chan) | ||
| 121 | { | ||
| 122 | struct scatterlist sg[1]; | ||
| 123 | |||
| 124 | sg_init_one(sg, chan->inbuf, PAGE_SIZE); | ||
| 125 | |||
| 126 | /* We should always be able to add one buffer to an empty queue. */ | ||
| 127 | if (chan->in_vq->vq_ops->add_buf(chan->in_vq, sg, 0, 1, chan->inbuf)) | ||
| 128 | BUG(); | ||
| 129 | chan->in_vq->vq_ops->kick(chan->in_vq); | ||
| 130 | } | ||
| 131 | |||
| 132 | static int p9_virtio_read(struct p9_trans *trans, void *buf, int count) | ||
| 133 | { | ||
| 134 | struct virtio_chan *chan = (struct virtio_chan *) trans->priv; | ||
| 135 | struct virtqueue *in_vq = chan->in_vq; | ||
| 136 | |||
| 137 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio read (%d)\n", count); | ||
| 138 | |||
| 139 | /* If we don't have an input queue yet, we can't get input. */ | ||
| 140 | BUG_ON(!in_vq); | ||
| 141 | |||
| 142 | /* No buffer? Try to get one. */ | ||
| 143 | if (!chan->in_len) { | ||
| 144 | chan->in = in_vq->vq_ops->get_buf(in_vq, &chan->in_len); | ||
| 145 | if (!chan->in) | ||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 149 | /* You want more than we have to give? Well, try wanting less! */ | ||
| 150 | if (chan->in_len < count) | ||
| 151 | count = chan->in_len; | ||
| 152 | |||
| 153 | /* Copy across to their buffer and increment offset. */ | ||
| 154 | memcpy(buf, chan->in, count); | ||
| 155 | chan->in += count; | ||
| 156 | chan->in_len -= count; | ||
| 157 | |||
| 158 | /* Finished? Re-register buffer so Host will use it again. */ | ||
| 159 | if (chan->in_len == 0) | ||
| 160 | add_inbuf(chan); | ||
| 161 | |||
| 162 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio finished read (%d)\n", | ||
| 163 | count); | ||
| 164 | |||
| 165 | return count; | ||
| 166 | } | ||
| 167 | |||
| 168 | /* The poll function is used by 9p transports to determine if there | ||
| 169 | * is there is activity available on a particular channel. In our case | ||
| 170 | * we use it to wait for a callback from the input routines. | ||
| 171 | */ | ||
| 172 | static unsigned int | ||
| 173 | p9_virtio_poll(struct p9_trans *trans, struct poll_table_struct *pt) | ||
| 174 | { | ||
| 175 | struct virtio_chan *chan = (struct virtio_chan *)trans->priv; | ||
| 176 | struct virtqueue *in_vq = chan->in_vq; | ||
| 177 | int ret = POLLOUT; /* we can always handle more output */ | ||
| 178 | |||
| 179 | poll_wait(NULL, &chan->wq, pt); | ||
| 180 | |||
| 181 | /* No buffer? Try to get one. */ | ||
| 182 | if (!chan->in_len) | ||
| 183 | chan->in = in_vq->vq_ops->get_buf(in_vq, &chan->in_len); | ||
| 184 | |||
| 185 | if (chan->in_len) | ||
| 186 | ret |= POLLIN; | ||
| 187 | |||
| 188 | return ret; | ||
| 189 | } | ||
| 190 | |||
| 191 | static void p9_virtio_close(struct p9_trans *trans) | ||
| 192 | { | ||
| 193 | struct virtio_chan *chan = trans->priv; | ||
| 194 | |||
| 195 | down(&virtio_9p_lock); | ||
| 196 | chan->inuse = false; | ||
| 197 | up(&virtio_9p_lock); | ||
| 198 | |||
| 199 | kfree(trans); | ||
| 200 | } | ||
| 201 | |||
| 202 | static bool p9_virtio_intr(struct virtqueue *q) | ||
| 203 | { | ||
| 204 | struct virtio_chan *chan = q->vdev->priv; | ||
| 205 | |||
| 206 | P9_DPRINTK(P9_DEBUG_TRANS, "9p poll_wakeup: %p\n", &chan->wq); | ||
| 207 | wake_up_interruptible(&chan->wq); | ||
| 208 | |||
| 209 | return true; | ||
| 210 | } | ||
| 211 | |||
| 212 | static int p9_virtio_probe(struct virtio_device *dev) | ||
| 213 | { | ||
| 214 | int err; | ||
| 215 | struct virtio_chan *chan; | ||
| 216 | int index; | ||
| 217 | |||
| 218 | down(&virtio_9p_lock); | ||
| 219 | index = chan_index++; | ||
| 220 | chan = &channels[index]; | ||
| 221 | up(&virtio_9p_lock); | ||
| 222 | |||
| 223 | if (chan_index > MAX_9P_CHAN) { | ||
| 224 | printk(KERN_ERR "9p: virtio: Maximum channels exceeded\n"); | ||
| 225 | BUG(); | ||
| 226 | } | ||
| 227 | |||
| 228 | chan->vdev = dev; | ||
| 229 | |||
| 230 | /* This is the scratch page we use to receive console input */ | ||
| 231 | chan->inbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
| 232 | if (!chan->inbuf) { | ||
| 233 | err = -ENOMEM; | ||
| 234 | goto fail; | ||
| 235 | } | ||
| 236 | |||
| 237 | /* Find the input queue. */ | ||
| 238 | dev->priv = chan; | ||
| 239 | chan->in_vq = dev->config->find_vq(dev, p9_virtio_intr); | ||
| 240 | if (IS_ERR(chan->in_vq)) { | ||
| 241 | err = PTR_ERR(chan->in_vq); | ||
| 242 | goto free; | ||
| 243 | } | ||
| 244 | |||
| 245 | chan->out_vq = dev->config->find_vq(dev, NULL); | ||
| 246 | if (IS_ERR(chan->out_vq)) { | ||
| 247 | err = PTR_ERR(chan->out_vq); | ||
| 248 | goto free_in_vq; | ||
| 249 | } | ||
| 250 | |||
| 251 | init_waitqueue_head(&chan->wq); | ||
| 252 | |||
| 253 | /* Register the input buffer the first time. */ | ||
| 254 | add_inbuf(chan); | ||
| 255 | chan->inuse = false; | ||
| 256 | chan->initialized = true; | ||
| 257 | |||
| 258 | return 0; | ||
| 259 | |||
| 260 | free_in_vq: | ||
| 261 | dev->config->del_vq(chan->in_vq); | ||
| 262 | free: | ||
| 263 | kfree(chan->inbuf); | ||
| 264 | fail: | ||
| 265 | down(&virtio_9p_lock); | ||
| 266 | chan_index--; | ||
| 267 | up(&virtio_9p_lock); | ||
| 268 | return err; | ||
| 269 | } | ||
| 270 | |||
| 271 | /* This sets up a transport channel for 9p communication. Right now | ||
| 272 | * we only match the first available channel, but eventually we couldlook up | ||
| 273 | * alternate channels by matching devname versus a virtio_config entry. | ||
| 274 | * We use a simple reference count mechanism to ensure that only a single | ||
| 275 | * mount has a channel open at a time. */ | ||
| 276 | static struct p9_trans *p9_virtio_create(const char *devname, char *args) | ||
| 277 | { | ||
| 278 | struct p9_trans *trans; | ||
| 279 | int index = 0; | ||
| 280 | struct virtio_chan *chan = channels; | ||
| 281 | |||
| 282 | down(&virtio_9p_lock); | ||
| 283 | while (index < MAX_9P_CHAN) { | ||
| 284 | if (chan->initialized && !chan->inuse) { | ||
| 285 | chan->inuse = true; | ||
| 286 | break; | ||
| 287 | } else { | ||
| 288 | index++; | ||
| 289 | chan = &channels[index]; | ||
| 290 | } | ||
| 291 | } | ||
| 292 | up(&virtio_9p_lock); | ||
| 293 | |||
| 294 | if (index >= MAX_9P_CHAN) { | ||
| 295 | printk(KERN_ERR "9p: virtio: couldn't find a free channel\n"); | ||
| 296 | return NULL; | ||
| 297 | } | ||
| 298 | |||
| 299 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); | ||
| 300 | if (!trans) { | ||
| 301 | printk(KERN_ERR "9p: couldn't allocate transport\n"); | ||
| 302 | return ERR_PTR(-ENOMEM); | ||
| 303 | } | ||
| 304 | |||
| 305 | trans->write = p9_virtio_write; | ||
| 306 | trans->read = p9_virtio_read; | ||
| 307 | trans->close = p9_virtio_close; | ||
| 308 | trans->poll = p9_virtio_poll; | ||
| 309 | trans->priv = chan; | ||
| 310 | |||
| 311 | return trans; | ||
| 312 | } | ||
| 313 | |||
| 314 | #define VIRTIO_ID_9P 9 | ||
| 315 | |||
| 316 | static struct virtio_device_id id_table[] = { | ||
| 317 | { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID }, | ||
| 318 | { 0 }, | ||
| 319 | }; | ||
| 320 | |||
| 321 | /* The standard "struct lguest_driver": */ | ||
| 322 | static struct virtio_driver p9_virtio_drv = { | ||
| 323 | .driver.name = KBUILD_MODNAME, | ||
| 324 | .driver.owner = THIS_MODULE, | ||
| 325 | .id_table = id_table, | ||
| 326 | .probe = p9_virtio_probe, | ||
| 327 | }; | ||
| 328 | |||
| 329 | static struct p9_trans_module p9_virtio_trans = { | ||
| 330 | .name = "virtio", | ||
| 331 | .create = p9_virtio_create, | ||
| 332 | .maxsize = PAGE_SIZE, | ||
| 333 | .def = 0, | ||
| 334 | }; | ||
| 335 | |||
| 336 | /* The standard init function */ | ||
| 337 | static int __init p9_virtio_init(void) | ||
| 338 | { | ||
| 339 | int count; | ||
| 340 | |||
| 341 | for (count = 0; count < MAX_9P_CHAN; count++) | ||
| 342 | channels[count].initialized = false; | ||
| 343 | |||
| 344 | v9fs_register_trans(&p9_virtio_trans); | ||
| 345 | return register_virtio_driver(&p9_virtio_drv); | ||
| 346 | } | ||
| 347 | |||
| 348 | module_init(p9_virtio_init); | ||
| 349 | |||
| 350 | MODULE_DEVICE_TABLE(virtio, id_table); | ||
| 351 | MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>"); | ||
| 352 | MODULE_DESCRIPTION("Virtio 9p Transport"); | ||
| 353 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 7c0b5151d526..e0d37d6dc1f8 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
| @@ -1044,7 +1044,7 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol) | |||
| 1044 | if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) | 1044 | if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) |
| 1045 | goto out; | 1045 | goto out; |
| 1046 | rc = -ENOMEM; | 1046 | rc = -ENOMEM; |
| 1047 | sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, 1); | 1047 | sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto); |
| 1048 | if (!sk) | 1048 | if (!sk) |
| 1049 | goto out; | 1049 | goto out; |
| 1050 | rc = 0; | 1050 | rc = 0; |
diff --git a/net/atm/common.c b/net/atm/common.c index e166d9e0ffd9..eba09a04f6bf 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
| @@ -133,7 +133,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family) | |||
| 133 | sock->sk = NULL; | 133 | sock->sk = NULL; |
| 134 | if (sock->type == SOCK_STREAM) | 134 | if (sock->type == SOCK_STREAM) |
| 135 | return -EINVAL; | 135 | return -EINVAL; |
| 136 | sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto, 1); | 136 | sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto); |
| 137 | if (!sk) | 137 | if (!sk) |
| 138 | return -ENOMEM; | 138 | return -ENOMEM; |
| 139 | sock_init_data(sock, sk); | 139 | sock_init_data(sock, sk); |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 993e5c75e909..8378afd54b30 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
| @@ -836,7 +836,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol) | |||
| 836 | return -ESOCKTNOSUPPORT; | 836 | return -ESOCKTNOSUPPORT; |
| 837 | } | 837 | } |
| 838 | 838 | ||
| 839 | if ((sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, 1)) == NULL) | 839 | sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto); |
| 840 | if (sk == NULL) | ||
| 840 | return -ENOMEM; | 841 | return -ENOMEM; |
| 841 | 842 | ||
| 842 | ax25 = sk->sk_protinfo = ax25_create_cb(); | 843 | ax25 = sk->sk_protinfo = ax25_create_cb(); |
| @@ -861,7 +862,8 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) | |||
| 861 | struct sock *sk; | 862 | struct sock *sk; |
| 862 | ax25_cb *ax25, *oax25; | 863 | ax25_cb *ax25, *oax25; |
| 863 | 864 | ||
| 864 | if ((sk = sk_alloc(osk->sk_net, PF_AX25, GFP_ATOMIC, osk->sk_prot, 1)) == NULL) | 865 | sk = sk_alloc(osk->sk_net, PF_AX25, GFP_ATOMIC, osk->sk_prot); |
| 866 | if (sk == NULL) | ||
| 865 | return NULL; | 867 | return NULL; |
| 866 | 868 | ||
| 867 | if ((ax25 = ax25_create_cb()) == NULL) { | 869 | if ((ax25 = ax25_create_cb()) == NULL) { |
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index f718965f296c..9ebd3c64474d 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
| @@ -213,7 +213,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol) | |||
| 213 | if (sock->type != SOCK_RAW) | 213 | if (sock->type != SOCK_RAW) |
| 214 | return -ESOCKTNOSUPPORT; | 214 | return -ESOCKTNOSUPPORT; |
| 215 | 215 | ||
| 216 | sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, 1); | 216 | sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto); |
| 217 | if (!sk) | 217 | if (!sk) |
| 218 | return -ENOMEM; | 218 | return -ENOMEM; |
| 219 | 219 | ||
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index cf700c20d11e..783edab12ce8 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c | |||
| @@ -204,7 +204,7 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol) | |||
| 204 | if (sock->type != SOCK_RAW) | 204 | if (sock->type != SOCK_RAW) |
| 205 | return -ESOCKTNOSUPPORT; | 205 | return -ESOCKTNOSUPPORT; |
| 206 | 206 | ||
| 207 | sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, 1); | 207 | sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto); |
| 208 | if (!sk) | 208 | if (!sk) |
| 209 | return -ENOMEM; | 209 | return -ENOMEM; |
| 210 | 210 | ||
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 5fdfc9a67d39..9483320f6dad 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
| @@ -78,11 +78,11 @@ void hci_acl_connect(struct hci_conn *conn) | |||
| 78 | 78 | ||
| 79 | cp.pkt_type = cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK); | 79 | cp.pkt_type = cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK); |
| 80 | if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) | 80 | if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) |
| 81 | cp.role_switch = 0x01; | 81 | cp.role_switch = 0x01; |
| 82 | else | 82 | else |
| 83 | cp.role_switch = 0x00; | 83 | cp.role_switch = 0x00; |
| 84 | 84 | ||
| 85 | hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, sizeof(cp), &cp); | 85 | hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static void hci_acl_connect_cancel(struct hci_conn *conn) | 88 | static void hci_acl_connect_cancel(struct hci_conn *conn) |
| @@ -95,8 +95,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn) | |||
| 95 | return; | 95 | return; |
| 96 | 96 | ||
| 97 | bacpy(&cp.bdaddr, &conn->dst); | 97 | bacpy(&cp.bdaddr, &conn->dst); |
| 98 | hci_send_cmd(conn->hdev, OGF_LINK_CTL, | 98 | hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp); |
| 99 | OCF_CREATE_CONN_CANCEL, sizeof(cp), &cp); | ||
| 100 | } | 99 | } |
| 101 | 100 | ||
| 102 | void hci_acl_disconn(struct hci_conn *conn, __u8 reason) | 101 | void hci_acl_disconn(struct hci_conn *conn, __u8 reason) |
| @@ -109,8 +108,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason) | |||
| 109 | 108 | ||
| 110 | cp.handle = cpu_to_le16(conn->handle); | 109 | cp.handle = cpu_to_le16(conn->handle); |
| 111 | cp.reason = reason; | 110 | cp.reason = reason; |
| 112 | hci_send_cmd(conn->hdev, OGF_LINK_CTL, | 111 | hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); |
| 113 | OCF_DISCONNECT, sizeof(cp), &cp); | ||
| 114 | } | 112 | } |
| 115 | 113 | ||
| 116 | void hci_add_sco(struct hci_conn *conn, __u16 handle) | 114 | void hci_add_sco(struct hci_conn *conn, __u16 handle) |
| @@ -126,7 +124,29 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle) | |||
| 126 | cp.handle = cpu_to_le16(handle); | 124 | cp.handle = cpu_to_le16(handle); |
| 127 | cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); | 125 | cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); |
| 128 | 126 | ||
| 129 | hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, sizeof(cp), &cp); | 127 | hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); |
| 128 | } | ||
| 129 | |||
| 130 | void hci_setup_sync(struct hci_conn *conn, __u16 handle) | ||
| 131 | { | ||
| 132 | struct hci_dev *hdev = conn->hdev; | ||
| 133 | struct hci_cp_setup_sync_conn cp; | ||
| 134 | |||
| 135 | BT_DBG("%p", conn); | ||
| 136 | |||
| 137 | conn->state = BT_CONNECT; | ||
| 138 | conn->out = 1; | ||
| 139 | |||
| 140 | cp.handle = cpu_to_le16(handle); | ||
| 141 | cp.pkt_type = cpu_to_le16(hdev->esco_type); | ||
| 142 | |||
| 143 | cp.tx_bandwidth = cpu_to_le32(0x00001f40); | ||
| 144 | cp.rx_bandwidth = cpu_to_le32(0x00001f40); | ||
| 145 | cp.max_latency = cpu_to_le16(0xffff); | ||
| 146 | cp.voice_setting = cpu_to_le16(hdev->voice_setting); | ||
| 147 | cp.retrans_effort = 0xff; | ||
| 148 | |||
| 149 | hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); | ||
| 130 | } | 150 | } |
| 131 | 151 | ||
| 132 | static void hci_conn_timeout(unsigned long arg) | 152 | static void hci_conn_timeout(unsigned long arg) |
| @@ -143,7 +163,10 @@ static void hci_conn_timeout(unsigned long arg) | |||
| 143 | 163 | ||
| 144 | switch (conn->state) { | 164 | switch (conn->state) { |
| 145 | case BT_CONNECT: | 165 | case BT_CONNECT: |
| 146 | hci_acl_connect_cancel(conn); | 166 | if (conn->type == ACL_LINK) |
| 167 | hci_acl_connect_cancel(conn); | ||
| 168 | else | ||
| 169 | hci_acl_disconn(conn, 0x13); | ||
| 147 | break; | 170 | break; |
| 148 | case BT_CONNECTED: | 171 | case BT_CONNECTED: |
| 149 | hci_acl_disconn(conn, 0x13); | 172 | hci_acl_disconn(conn, 0x13); |
| @@ -330,8 +353,12 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
| 330 | hci_conn_hold(sco); | 353 | hci_conn_hold(sco); |
| 331 | 354 | ||
| 332 | if (acl->state == BT_CONNECTED && | 355 | if (acl->state == BT_CONNECTED && |
| 333 | (sco->state == BT_OPEN || sco->state == BT_CLOSED)) | 356 | (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { |
| 334 | hci_add_sco(sco, acl->handle); | 357 | if (lmp_esco_capable(hdev)) |
| 358 | hci_setup_sync(sco, acl->handle); | ||
| 359 | else | ||
| 360 | hci_add_sco(sco, acl->handle); | ||
| 361 | } | ||
| 335 | 362 | ||
| 336 | return sco; | 363 | return sco; |
| 337 | } | 364 | } |
| @@ -348,7 +375,7 @@ int hci_conn_auth(struct hci_conn *conn) | |||
| 348 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { | 375 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { |
| 349 | struct hci_cp_auth_requested cp; | 376 | struct hci_cp_auth_requested cp; |
| 350 | cp.handle = cpu_to_le16(conn->handle); | 377 | cp.handle = cpu_to_le16(conn->handle); |
| 351 | hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_AUTH_REQUESTED, sizeof(cp), &cp); | 378 | hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); |
| 352 | } | 379 | } |
| 353 | return 0; | 380 | return 0; |
| 354 | } | 381 | } |
| @@ -369,7 +396,7 @@ int hci_conn_encrypt(struct hci_conn *conn) | |||
| 369 | struct hci_cp_set_conn_encrypt cp; | 396 | struct hci_cp_set_conn_encrypt cp; |
| 370 | cp.handle = cpu_to_le16(conn->handle); | 397 | cp.handle = cpu_to_le16(conn->handle); |
| 371 | cp.encrypt = 1; | 398 | cp.encrypt = 1; |
| 372 | hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); | 399 | hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); |
| 373 | } | 400 | } |
| 374 | return 0; | 401 | return 0; |
| 375 | } | 402 | } |
| @@ -383,7 +410,7 @@ int hci_conn_change_link_key(struct hci_conn *conn) | |||
| 383 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { | 410 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { |
| 384 | struct hci_cp_change_conn_link_key cp; | 411 | struct hci_cp_change_conn_link_key cp; |
| 385 | cp.handle = cpu_to_le16(conn->handle); | 412 | cp.handle = cpu_to_le16(conn->handle); |
| 386 | hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp); | 413 | hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp); |
| 387 | } | 414 | } |
| 388 | return 0; | 415 | return 0; |
| 389 | } | 416 | } |
| @@ -401,7 +428,7 @@ int hci_conn_switch_role(struct hci_conn *conn, uint8_t role) | |||
| 401 | struct hci_cp_switch_role cp; | 428 | struct hci_cp_switch_role cp; |
| 402 | bacpy(&cp.bdaddr, &conn->dst); | 429 | bacpy(&cp.bdaddr, &conn->dst); |
| 403 | cp.role = role; | 430 | cp.role = role; |
| 404 | hci_send_cmd(conn->hdev, OGF_LINK_POLICY, OCF_SWITCH_ROLE, sizeof(cp), &cp); | 431 | hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); |
| 405 | } | 432 | } |
| 406 | return 0; | 433 | return 0; |
| 407 | } | 434 | } |
| @@ -423,8 +450,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn) | |||
| 423 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | 450 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { |
| 424 | struct hci_cp_exit_sniff_mode cp; | 451 | struct hci_cp_exit_sniff_mode cp; |
| 425 | cp.handle = cpu_to_le16(conn->handle); | 452 | cp.handle = cpu_to_le16(conn->handle); |
| 426 | hci_send_cmd(hdev, OGF_LINK_POLICY, | 453 | hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); |
| 427 | OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp); | ||
| 428 | } | 454 | } |
| 429 | 455 | ||
| 430 | timer: | 456 | timer: |
| @@ -455,8 +481,7 @@ void hci_conn_enter_sniff_mode(struct hci_conn *conn) | |||
| 455 | cp.max_latency = cpu_to_le16(0); | 481 | cp.max_latency = cpu_to_le16(0); |
| 456 | cp.min_remote_timeout = cpu_to_le16(0); | 482 | cp.min_remote_timeout = cpu_to_le16(0); |
| 457 | cp.min_local_timeout = cpu_to_le16(0); | 483 | cp.min_local_timeout = cpu_to_le16(0); |
| 458 | hci_send_cmd(hdev, OGF_LINK_POLICY, | 484 | hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); |
| 459 | OCF_SNIFF_SUBRATE, sizeof(cp), &cp); | ||
| 460 | } | 485 | } |
| 461 | 486 | ||
| 462 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | 487 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { |
| @@ -466,8 +491,7 @@ void hci_conn_enter_sniff_mode(struct hci_conn *conn) | |||
| 466 | cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); | 491 | cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); |
| 467 | cp.attempt = cpu_to_le16(4); | 492 | cp.attempt = cpu_to_le16(4); |
| 468 | cp.timeout = cpu_to_le16(1); | 493 | cp.timeout = cpu_to_le16(1); |
| 469 | hci_send_cmd(hdev, OGF_LINK_POLICY, | 494 | hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); |
| 470 | OCF_SNIFF_MODE, sizeof(cp), &cp); | ||
| 471 | } | 495 | } |
| 472 | } | 496 | } |
| 473 | 497 | ||
| @@ -493,6 +517,22 @@ void hci_conn_hash_flush(struct hci_dev *hdev) | |||
| 493 | } | 517 | } |
| 494 | } | 518 | } |
| 495 | 519 | ||
| 520 | /* Check pending connect attempts */ | ||
| 521 | void hci_conn_check_pending(struct hci_dev *hdev) | ||
| 522 | { | ||
| 523 | struct hci_conn *conn; | ||
| 524 | |||
| 525 | BT_DBG("hdev %s", hdev->name); | ||
| 526 | |||
| 527 | hci_dev_lock(hdev); | ||
| 528 | |||
| 529 | conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); | ||
| 530 | if (conn) | ||
| 531 | hci_acl_connect(conn); | ||
| 532 | |||
| 533 | hci_dev_unlock(hdev); | ||
| 534 | } | ||
| 535 | |||
| 496 | int hci_get_conn_list(void __user *arg) | 536 | int hci_get_conn_list(void __user *arg) |
| 497 | { | 537 | { |
| 498 | struct hci_conn_list_req req, *cl; | 538 | struct hci_conn_list_req req, *cl; |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 18e3afc964df..372b0d3b75a8 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
| @@ -176,7 +176,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) | |||
| 176 | BT_DBG("%s %ld", hdev->name, opt); | 176 | BT_DBG("%s %ld", hdev->name, opt); |
| 177 | 177 | ||
| 178 | /* Reset device */ | 178 | /* Reset device */ |
| 179 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); | 179 | hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | 182 | static void hci_init_req(struct hci_dev *hdev, unsigned long opt) |
| @@ -202,16 +202,16 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | |||
| 202 | 202 | ||
| 203 | /* Reset */ | 203 | /* Reset */ |
| 204 | if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks)) | 204 | if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks)) |
| 205 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); | 205 | hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); |
| 206 | 206 | ||
| 207 | /* Read Local Supported Features */ | 207 | /* Read Local Supported Features */ |
| 208 | hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL); | 208 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); |
| 209 | 209 | ||
| 210 | /* Read Local Version */ | 210 | /* Read Local Version */ |
| 211 | hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL); | 211 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); |
| 212 | 212 | ||
| 213 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ | 213 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ |
| 214 | hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL); | 214 | hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); |
| 215 | 215 | ||
| 216 | #if 0 | 216 | #if 0 |
| 217 | /* Host buffer size */ | 217 | /* Host buffer size */ |
| @@ -221,29 +221,35 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | |||
| 221 | cp.sco_mtu = HCI_MAX_SCO_SIZE; | 221 | cp.sco_mtu = HCI_MAX_SCO_SIZE; |
| 222 | cp.acl_max_pkt = cpu_to_le16(0xffff); | 222 | cp.acl_max_pkt = cpu_to_le16(0xffff); |
| 223 | cp.sco_max_pkt = cpu_to_le16(0xffff); | 223 | cp.sco_max_pkt = cpu_to_le16(0xffff); |
| 224 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp); | 224 | hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp); |
| 225 | } | 225 | } |
| 226 | #endif | 226 | #endif |
| 227 | 227 | ||
| 228 | /* Read BD Address */ | 228 | /* Read BD Address */ |
| 229 | hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL); | 229 | hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); |
| 230 | |||
| 231 | /* Read Class of Device */ | ||
| 232 | hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); | ||
| 233 | |||
| 234 | /* Read Local Name */ | ||
| 235 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); | ||
| 230 | 236 | ||
| 231 | /* Read Voice Setting */ | 237 | /* Read Voice Setting */ |
| 232 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL); | 238 | hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); |
| 233 | 239 | ||
| 234 | /* Optional initialization */ | 240 | /* Optional initialization */ |
| 235 | 241 | ||
| 236 | /* Clear Event Filters */ | 242 | /* Clear Event Filters */ |
| 237 | flt_type = HCI_FLT_CLEAR_ALL; | 243 | flt_type = HCI_FLT_CLEAR_ALL; |
| 238 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &flt_type); | 244 | hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); |
| 239 | 245 | ||
| 240 | /* Page timeout ~20 secs */ | 246 | /* Page timeout ~20 secs */ |
| 241 | param = cpu_to_le16(0x8000); | 247 | param = cpu_to_le16(0x8000); |
| 242 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m); | 248 | hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m); |
| 243 | 249 | ||
| 244 | /* Connection accept timeout ~20 secs */ | 250 | /* Connection accept timeout ~20 secs */ |
| 245 | param = cpu_to_le16(0x7d00); | 251 | param = cpu_to_le16(0x7d00); |
| 246 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m); | 252 | hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); |
| 247 | } | 253 | } |
| 248 | 254 | ||
| 249 | static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) | 255 | static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) |
| @@ -253,7 +259,7 @@ static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) | |||
| 253 | BT_DBG("%s %x", hdev->name, scan); | 259 | BT_DBG("%s %x", hdev->name, scan); |
| 254 | 260 | ||
| 255 | /* Inquiry and Page scans */ | 261 | /* Inquiry and Page scans */ |
| 256 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan); | 262 | hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); |
| 257 | } | 263 | } |
| 258 | 264 | ||
| 259 | static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) | 265 | static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) |
| @@ -263,7 +269,7 @@ static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) | |||
| 263 | BT_DBG("%s %x", hdev->name, auth); | 269 | BT_DBG("%s %x", hdev->name, auth); |
| 264 | 270 | ||
| 265 | /* Authentication */ | 271 | /* Authentication */ |
| 266 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth); | 272 | hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); |
| 267 | } | 273 | } |
| 268 | 274 | ||
| 269 | static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) | 275 | static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) |
| @@ -273,7 +279,7 @@ static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) | |||
| 273 | BT_DBG("%s %x", hdev->name, encrypt); | 279 | BT_DBG("%s %x", hdev->name, encrypt); |
| 274 | 280 | ||
| 275 | /* Authentication */ | 281 | /* Authentication */ |
| 276 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt); | 282 | hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); |
| 277 | } | 283 | } |
| 278 | 284 | ||
| 279 | /* Get HCI device by index. | 285 | /* Get HCI device by index. |
| @@ -384,7 +390,7 @@ static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) | |||
| 384 | memcpy(&cp.lap, &ir->lap, 3); | 390 | memcpy(&cp.lap, &ir->lap, 3); |
| 385 | cp.length = ir->length; | 391 | cp.length = ir->length; |
| 386 | cp.num_rsp = ir->num_rsp; | 392 | cp.num_rsp = ir->num_rsp; |
| 387 | hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp); | 393 | hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); |
| 388 | } | 394 | } |
| 389 | 395 | ||
| 390 | int hci_inquiry(void __user *arg) | 396 | int hci_inquiry(void __user *arg) |
| @@ -1111,13 +1117,13 @@ static int hci_send_frame(struct sk_buff *skb) | |||
| 1111 | } | 1117 | } |
| 1112 | 1118 | ||
| 1113 | /* Send HCI command */ | 1119 | /* Send HCI command */ |
| 1114 | int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param) | 1120 | int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) |
| 1115 | { | 1121 | { |
| 1116 | int len = HCI_COMMAND_HDR_SIZE + plen; | 1122 | int len = HCI_COMMAND_HDR_SIZE + plen; |
| 1117 | struct hci_command_hdr *hdr; | 1123 | struct hci_command_hdr *hdr; |
| 1118 | struct sk_buff *skb; | 1124 | struct sk_buff *skb; |
| 1119 | 1125 | ||
| 1120 | BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen); | 1126 | BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen); |
| 1121 | 1127 | ||
| 1122 | skb = bt_skb_alloc(len, GFP_ATOMIC); | 1128 | skb = bt_skb_alloc(len, GFP_ATOMIC); |
| 1123 | if (!skb) { | 1129 | if (!skb) { |
| @@ -1126,7 +1132,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p | |||
| 1126 | } | 1132 | } |
| 1127 | 1133 | ||
| 1128 | hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); | 1134 | hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); |
| 1129 | hdr->opcode = cpu_to_le16(hci_opcode_pack(ogf, ocf)); | 1135 | hdr->opcode = cpu_to_le16(opcode); |
| 1130 | hdr->plen = plen; | 1136 | hdr->plen = plen; |
| 1131 | 1137 | ||
| 1132 | if (plen) | 1138 | if (plen) |
| @@ -1143,7 +1149,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p | |||
| 1143 | } | 1149 | } |
| 1144 | 1150 | ||
| 1145 | /* Get data from the previously sent command */ | 1151 | /* Get data from the previously sent command */ |
| 1146 | void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf) | 1152 | void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) |
| 1147 | { | 1153 | { |
| 1148 | struct hci_command_hdr *hdr; | 1154 | struct hci_command_hdr *hdr; |
| 1149 | 1155 | ||
| @@ -1152,10 +1158,10 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf) | |||
| 1152 | 1158 | ||
| 1153 | hdr = (void *) hdev->sent_cmd->data; | 1159 | hdr = (void *) hdev->sent_cmd->data; |
| 1154 | 1160 | ||
| 1155 | if (hdr->opcode != cpu_to_le16(hci_opcode_pack(ogf, ocf))) | 1161 | if (hdr->opcode != cpu_to_le16(opcode)) |
| 1156 | return NULL; | 1162 | return NULL; |
| 1157 | 1163 | ||
| 1158 | BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf); | 1164 | BT_DBG("%s opcode 0x%x", hdev->name, opcode); |
| 1159 | 1165 | ||
| 1160 | return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; | 1166 | return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; |
| 1161 | } | 1167 | } |
| @@ -1355,6 +1361,26 @@ static inline void hci_sched_sco(struct hci_dev *hdev) | |||
| 1355 | } | 1361 | } |
| 1356 | } | 1362 | } |
| 1357 | 1363 | ||
| 1364 | static inline void hci_sched_esco(struct hci_dev *hdev) | ||
| 1365 | { | ||
| 1366 | struct hci_conn *conn; | ||
| 1367 | struct sk_buff *skb; | ||
| 1368 | int quote; | ||
| 1369 | |||
| 1370 | BT_DBG("%s", hdev->name); | ||
| 1371 | |||
| 1372 | while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) { | ||
| 1373 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | ||
| 1374 | BT_DBG("skb %p len %d", skb, skb->len); | ||
| 1375 | hci_send_frame(skb); | ||
| 1376 | |||
| 1377 | conn->sent++; | ||
| 1378 | if (conn->sent == ~0) | ||
| 1379 | conn->sent = 0; | ||
| 1380 | } | ||
| 1381 | } | ||
| 1382 | } | ||
| 1383 | |||
| 1358 | static void hci_tx_task(unsigned long arg) | 1384 | static void hci_tx_task(unsigned long arg) |
| 1359 | { | 1385 | { |
| 1360 | struct hci_dev *hdev = (struct hci_dev *) arg; | 1386 | struct hci_dev *hdev = (struct hci_dev *) arg; |
| @@ -1370,6 +1396,8 @@ static void hci_tx_task(unsigned long arg) | |||
| 1370 | 1396 | ||
| 1371 | hci_sched_sco(hdev); | 1397 | hci_sched_sco(hdev); |
| 1372 | 1398 | ||
| 1399 | hci_sched_esco(hdev); | ||
| 1400 | |||
| 1373 | /* Send next queued raw (unknown type) packet */ | 1401 | /* Send next queued raw (unknown type) packet */ |
| 1374 | while ((skb = skb_dequeue(&hdev->raw_q))) | 1402 | while ((skb = skb_dequeue(&hdev->raw_q))) |
| 1375 | hci_send_frame(skb); | 1403 | hci_send_frame(skb); |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 4baea1e38652..46df2e403df8 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
| @@ -52,234 +52,273 @@ | |||
| 52 | 52 | ||
| 53 | /* Handle HCI Event packets */ | 53 | /* Handle HCI Event packets */ |
| 54 | 54 | ||
| 55 | /* Command Complete OGF LINK_CTL */ | 55 | static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) |
| 56 | static void hci_cc_link_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) | ||
| 57 | { | 56 | { |
| 58 | __u8 status; | 57 | __u8 status = *((__u8 *) skb->data); |
| 59 | struct hci_conn *pend; | ||
| 60 | 58 | ||
| 61 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | 59 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 62 | 60 | ||
| 63 | switch (ocf) { | 61 | if (status) |
| 64 | case OCF_INQUIRY_CANCEL: | 62 | return; |
| 65 | case OCF_EXIT_PERIODIC_INQ: | ||
| 66 | status = *((__u8 *) skb->data); | ||
| 67 | 63 | ||
| 68 | if (status) { | 64 | clear_bit(HCI_INQUIRY, &hdev->flags); |
| 69 | BT_DBG("%s Inquiry cancel error: status 0x%x", hdev->name, status); | ||
| 70 | } else { | ||
| 71 | clear_bit(HCI_INQUIRY, &hdev->flags); | ||
| 72 | hci_req_complete(hdev, status); | ||
| 73 | } | ||
| 74 | 65 | ||
| 75 | hci_dev_lock(hdev); | 66 | hci_req_complete(hdev, status); |
| 76 | 67 | ||
| 77 | pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); | 68 | hci_conn_check_pending(hdev); |
| 78 | if (pend) | 69 | } |
| 79 | hci_acl_connect(pend); | ||
| 80 | 70 | ||
| 81 | hci_dev_unlock(hdev); | 71 | static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) |
| 72 | { | ||
| 73 | __u8 status = *((__u8 *) skb->data); | ||
| 82 | 74 | ||
| 83 | break; | 75 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 84 | 76 | ||
| 85 | default: | 77 | if (status) |
| 86 | BT_DBG("%s Command complete: ogf LINK_CTL ocf %x", hdev->name, ocf); | 78 | return; |
| 87 | break; | 79 | |
| 80 | clear_bit(HCI_INQUIRY, &hdev->flags); | ||
| 81 | |||
| 82 | hci_conn_check_pending(hdev); | ||
| 83 | } | ||
| 84 | |||
| 85 | static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 86 | { | ||
| 87 | BT_DBG("%s", hdev->name); | ||
| 88 | } | ||
| 89 | |||
| 90 | static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 91 | { | ||
| 92 | struct hci_rp_role_discovery *rp = (void *) skb->data; | ||
| 93 | struct hci_conn *conn; | ||
| 94 | |||
| 95 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
| 96 | |||
| 97 | if (rp->status) | ||
| 98 | return; | ||
| 99 | |||
| 100 | hci_dev_lock(hdev); | ||
| 101 | |||
| 102 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); | ||
| 103 | if (conn) { | ||
| 104 | if (rp->role) | ||
| 105 | conn->link_mode &= ~HCI_LM_MASTER; | ||
| 106 | else | ||
| 107 | conn->link_mode |= HCI_LM_MASTER; | ||
| 88 | } | 108 | } |
| 109 | |||
| 110 | hci_dev_unlock(hdev); | ||
| 89 | } | 111 | } |
| 90 | 112 | ||
| 91 | /* Command Complete OGF LINK_POLICY */ | 113 | static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) |
| 92 | static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) | ||
| 93 | { | 114 | { |
| 115 | struct hci_rp_write_link_policy *rp = (void *) skb->data; | ||
| 94 | struct hci_conn *conn; | 116 | struct hci_conn *conn; |
| 95 | struct hci_rp_role_discovery *rd; | ||
| 96 | struct hci_rp_write_link_policy *lp; | ||
| 97 | void *sent; | 117 | void *sent; |
| 98 | 118 | ||
| 99 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | 119 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
| 100 | 120 | ||
| 101 | switch (ocf) { | 121 | if (rp->status) |
| 102 | case OCF_ROLE_DISCOVERY: | 122 | return; |
| 103 | rd = (void *) skb->data; | ||
| 104 | 123 | ||
| 105 | if (rd->status) | 124 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); |
| 106 | break; | 125 | if (!sent) |
| 126 | return; | ||
| 107 | 127 | ||
| 108 | hci_dev_lock(hdev); | 128 | hci_dev_lock(hdev); |
| 109 | 129 | ||
| 110 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rd->handle)); | 130 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); |
| 111 | if (conn) { | 131 | if (conn) { |
| 112 | if (rd->role) | 132 | __le16 policy = get_unaligned((__le16 *) (sent + 2)); |
| 113 | conn->link_mode &= ~HCI_LM_MASTER; | 133 | conn->link_policy = __le16_to_cpu(policy); |
| 114 | else | 134 | } |
| 115 | conn->link_mode |= HCI_LM_MASTER; | ||
| 116 | } | ||
| 117 | 135 | ||
| 118 | hci_dev_unlock(hdev); | 136 | hci_dev_unlock(hdev); |
| 119 | break; | 137 | } |
| 120 | 138 | ||
| 121 | case OCF_WRITE_LINK_POLICY: | 139 | static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) |
| 122 | sent = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY); | 140 | { |
| 123 | if (!sent) | 141 | __u8 status = *((__u8 *) skb->data); |
| 124 | break; | ||
| 125 | 142 | ||
| 126 | lp = (struct hci_rp_write_link_policy *) skb->data; | 143 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 127 | 144 | ||
| 128 | if (lp->status) | 145 | hci_req_complete(hdev, status); |
| 129 | break; | 146 | } |
| 130 | 147 | ||
| 131 | hci_dev_lock(hdev); | 148 | static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) |
| 149 | { | ||
| 150 | __u8 status = *((__u8 *) skb->data); | ||
| 151 | void *sent; | ||
| 132 | 152 | ||
| 133 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(lp->handle)); | 153 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 134 | if (conn) { | ||
| 135 | __le16 policy = get_unaligned((__le16 *) (sent + 2)); | ||
| 136 | conn->link_policy = __le16_to_cpu(policy); | ||
| 137 | } | ||
| 138 | 154 | ||
| 139 | hci_dev_unlock(hdev); | 155 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); |
| 140 | break; | 156 | if (!sent) |
| 157 | return; | ||
| 141 | 158 | ||
| 142 | default: | 159 | if (!status) |
| 143 | BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x", | 160 | memcpy(hdev->dev_name, sent, 248); |
| 144 | hdev->name, ocf); | 161 | } |
| 145 | break; | 162 | |
| 163 | static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 164 | { | ||
| 165 | struct hci_rp_read_local_name *rp = (void *) skb->data; | ||
| 166 | |||
| 167 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
| 168 | |||
| 169 | if (rp->status) | ||
| 170 | return; | ||
| 171 | |||
| 172 | memcpy(hdev->dev_name, rp->name, 248); | ||
| 173 | } | ||
| 174 | |||
| 175 | static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 176 | { | ||
| 177 | __u8 status = *((__u8 *) skb->data); | ||
| 178 | void *sent; | ||
| 179 | |||
| 180 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
| 181 | |||
| 182 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); | ||
| 183 | if (!sent) | ||
| 184 | return; | ||
| 185 | |||
| 186 | if (!status) { | ||
| 187 | __u8 param = *((__u8 *) sent); | ||
| 188 | |||
| 189 | if (param == AUTH_ENABLED) | ||
| 190 | set_bit(HCI_AUTH, &hdev->flags); | ||
| 191 | else | ||
| 192 | clear_bit(HCI_AUTH, &hdev->flags); | ||
| 146 | } | 193 | } |
| 194 | |||
| 195 | hci_req_complete(hdev, status); | ||
| 147 | } | 196 | } |
| 148 | 197 | ||
| 149 | /* Command Complete OGF HOST_CTL */ | 198 | static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) |
| 150 | static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) | ||
| 151 | { | 199 | { |
| 152 | __u8 status, param; | 200 | __u8 status = *((__u8 *) skb->data); |
| 153 | __u16 setting; | ||
| 154 | struct hci_rp_read_voice_setting *vs; | ||
| 155 | void *sent; | 201 | void *sent; |
| 156 | 202 | ||
| 157 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | 203 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 158 | 204 | ||
| 159 | switch (ocf) { | 205 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); |
| 160 | case OCF_RESET: | 206 | if (!sent) |
| 161 | status = *((__u8 *) skb->data); | 207 | return; |
| 162 | hci_req_complete(hdev, status); | ||
| 163 | break; | ||
| 164 | 208 | ||
| 165 | case OCF_SET_EVENT_FLT: | 209 | if (!status) { |
| 166 | status = *((__u8 *) skb->data); | 210 | __u8 param = *((__u8 *) sent); |
| 167 | if (status) { | ||
| 168 | BT_DBG("%s SET_EVENT_FLT failed %d", hdev->name, status); | ||
| 169 | } else { | ||
| 170 | BT_DBG("%s SET_EVENT_FLT succeseful", hdev->name); | ||
| 171 | } | ||
| 172 | break; | ||
| 173 | 211 | ||
| 174 | case OCF_WRITE_AUTH_ENABLE: | 212 | if (param) |
| 175 | sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE); | 213 | set_bit(HCI_ENCRYPT, &hdev->flags); |
| 176 | if (!sent) | 214 | else |
| 177 | break; | 215 | clear_bit(HCI_ENCRYPT, &hdev->flags); |
| 216 | } | ||
| 178 | 217 | ||
| 179 | status = *((__u8 *) skb->data); | 218 | hci_req_complete(hdev, status); |
| 180 | param = *((__u8 *) sent); | 219 | } |
| 181 | 220 | ||
| 182 | if (!status) { | 221 | static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) |
| 183 | if (param == AUTH_ENABLED) | 222 | { |
| 184 | set_bit(HCI_AUTH, &hdev->flags); | 223 | __u8 status = *((__u8 *) skb->data); |
| 185 | else | 224 | void *sent; |
| 186 | clear_bit(HCI_AUTH, &hdev->flags); | ||
| 187 | } | ||
| 188 | hci_req_complete(hdev, status); | ||
| 189 | break; | ||
| 190 | 225 | ||
| 191 | case OCF_WRITE_ENCRYPT_MODE: | 226 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 192 | sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE); | ||
| 193 | if (!sent) | ||
| 194 | break; | ||
| 195 | 227 | ||
| 196 | status = *((__u8 *) skb->data); | 228 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); |
| 197 | param = *((__u8 *) sent); | 229 | if (!sent) |
| 230 | return; | ||
| 198 | 231 | ||
| 199 | if (!status) { | 232 | if (!status) { |
| 200 | if (param) | 233 | __u8 param = *((__u8 *) sent); |
| 201 | set_bit(HCI_ENCRYPT, &hdev->flags); | ||
| 202 | else | ||
| 203 | clear_bit(HCI_ENCRYPT, &hdev->flags); | ||
| 204 | } | ||
| 205 | hci_req_complete(hdev, status); | ||
| 206 | break; | ||
| 207 | 234 | ||
| 208 | case OCF_WRITE_CA_TIMEOUT: | 235 | clear_bit(HCI_PSCAN, &hdev->flags); |
| 209 | status = *((__u8 *) skb->data); | 236 | clear_bit(HCI_ISCAN, &hdev->flags); |
| 210 | if (status) { | ||
| 211 | BT_DBG("%s OCF_WRITE_CA_TIMEOUT failed %d", hdev->name, status); | ||
| 212 | } else { | ||
| 213 | BT_DBG("%s OCF_WRITE_CA_TIMEOUT succeseful", hdev->name); | ||
| 214 | } | ||
| 215 | break; | ||
| 216 | 237 | ||
| 217 | case OCF_WRITE_PG_TIMEOUT: | 238 | if (param & SCAN_INQUIRY) |
| 218 | status = *((__u8 *) skb->data); | 239 | set_bit(HCI_ISCAN, &hdev->flags); |
| 219 | if (status) { | ||
| 220 | BT_DBG("%s OCF_WRITE_PG_TIMEOUT failed %d", hdev->name, status); | ||
| 221 | } else { | ||
| 222 | BT_DBG("%s: OCF_WRITE_PG_TIMEOUT succeseful", hdev->name); | ||
| 223 | } | ||
| 224 | break; | ||
| 225 | 240 | ||
| 226 | case OCF_WRITE_SCAN_ENABLE: | 241 | if (param & SCAN_PAGE) |
| 227 | sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE); | 242 | set_bit(HCI_PSCAN, &hdev->flags); |
| 228 | if (!sent) | 243 | } |
| 229 | break; | ||
| 230 | 244 | ||
| 231 | status = *((__u8 *) skb->data); | 245 | hci_req_complete(hdev, status); |
| 232 | param = *((__u8 *) sent); | 246 | } |
| 233 | 247 | ||
| 234 | BT_DBG("param 0x%x", param); | 248 | static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) |
| 249 | { | ||
| 250 | struct hci_rp_read_class_of_dev *rp = (void *) skb->data; | ||
| 235 | 251 | ||
| 236 | if (!status) { | 252 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
| 237 | clear_bit(HCI_PSCAN, &hdev->flags); | ||
| 238 | clear_bit(HCI_ISCAN, &hdev->flags); | ||
| 239 | if (param & SCAN_INQUIRY) | ||
| 240 | set_bit(HCI_ISCAN, &hdev->flags); | ||
| 241 | 253 | ||
| 242 | if (param & SCAN_PAGE) | 254 | if (rp->status) |
| 243 | set_bit(HCI_PSCAN, &hdev->flags); | 255 | return; |
| 244 | } | ||
| 245 | hci_req_complete(hdev, status); | ||
| 246 | break; | ||
| 247 | 256 | ||
| 248 | case OCF_READ_VOICE_SETTING: | 257 | memcpy(hdev->dev_class, rp->dev_class, 3); |
| 249 | vs = (struct hci_rp_read_voice_setting *) skb->data; | ||
| 250 | 258 | ||
| 251 | if (vs->status) { | 259 | BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, |
| 252 | BT_DBG("%s READ_VOICE_SETTING failed %d", hdev->name, vs->status); | 260 | hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); |
| 253 | break; | 261 | } |
| 254 | } | ||
| 255 | 262 | ||
| 256 | setting = __le16_to_cpu(vs->voice_setting); | 263 | static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) |
| 264 | { | ||
| 265 | __u8 status = *((__u8 *) skb->data); | ||
| 266 | void *sent; | ||
| 257 | 267 | ||
| 258 | if (hdev->voice_setting != setting ) { | 268 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 259 | hdev->voice_setting = setting; | ||
| 260 | 269 | ||
| 261 | BT_DBG("%s: voice setting 0x%04x", hdev->name, setting); | 270 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); |
| 271 | if (!sent) | ||
| 272 | return; | ||
| 262 | 273 | ||
| 263 | if (hdev->notify) { | 274 | if (!status) |
| 264 | tasklet_disable(&hdev->tx_task); | 275 | memcpy(hdev->dev_class, sent, 3); |
| 265 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); | 276 | } |
| 266 | tasklet_enable(&hdev->tx_task); | ||
| 267 | } | ||
| 268 | } | ||
| 269 | break; | ||
| 270 | 277 | ||
| 271 | case OCF_WRITE_VOICE_SETTING: | 278 | static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) |
| 272 | sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_VOICE_SETTING); | 279 | { |
| 273 | if (!sent) | 280 | struct hci_rp_read_voice_setting *rp = (void *) skb->data; |
| 274 | break; | 281 | __u16 setting; |
| 282 | |||
| 283 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
| 284 | |||
| 285 | if (rp->status) | ||
| 286 | return; | ||
| 287 | |||
| 288 | setting = __le16_to_cpu(rp->voice_setting); | ||
| 289 | |||
| 290 | if (hdev->voice_setting == setting ) | ||
| 291 | return; | ||
| 292 | |||
| 293 | hdev->voice_setting = setting; | ||
| 275 | 294 | ||
| 276 | status = *((__u8 *) skb->data); | 295 | BT_DBG("%s voice setting 0x%04x", hdev->name, setting); |
| 277 | setting = __le16_to_cpu(get_unaligned((__le16 *) sent)); | ||
| 278 | 296 | ||
| 279 | if (!status && hdev->voice_setting != setting) { | 297 | if (hdev->notify) { |
| 298 | tasklet_disable(&hdev->tx_task); | ||
| 299 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); | ||
| 300 | tasklet_enable(&hdev->tx_task); | ||
| 301 | } | ||
| 302 | } | ||
| 303 | |||
| 304 | static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 305 | { | ||
| 306 | __u8 status = *((__u8 *) skb->data); | ||
| 307 | void *sent; | ||
| 308 | |||
| 309 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
| 310 | |||
| 311 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); | ||
| 312 | if (!sent) | ||
| 313 | return; | ||
| 314 | |||
| 315 | if (!status) { | ||
| 316 | __u16 setting = __le16_to_cpu(get_unaligned((__le16 *) sent)); | ||
| 317 | |||
| 318 | if (hdev->voice_setting != setting) { | ||
| 280 | hdev->voice_setting = setting; | 319 | hdev->voice_setting = setting; |
| 281 | 320 | ||
| 282 | BT_DBG("%s: voice setting 0x%04x", hdev->name, setting); | 321 | BT_DBG("%s voice setting 0x%04x", hdev->name, setting); |
| 283 | 322 | ||
| 284 | if (hdev->notify) { | 323 | if (hdev->notify) { |
| 285 | tasklet_disable(&hdev->tx_task); | 324 | tasklet_disable(&hdev->tx_task); |
| @@ -287,143 +326,153 @@ static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb | |||
| 287 | tasklet_enable(&hdev->tx_task); | 326 | tasklet_enable(&hdev->tx_task); |
| 288 | } | 327 | } |
| 289 | } | 328 | } |
| 290 | hci_req_complete(hdev, status); | ||
| 291 | break; | ||
| 292 | |||
| 293 | case OCF_HOST_BUFFER_SIZE: | ||
| 294 | status = *((__u8 *) skb->data); | ||
| 295 | if (status) { | ||
| 296 | BT_DBG("%s OCF_BUFFER_SIZE failed %d", hdev->name, status); | ||
| 297 | hci_req_complete(hdev, status); | ||
| 298 | } | ||
| 299 | break; | ||
| 300 | |||
| 301 | default: | ||
| 302 | BT_DBG("%s Command complete: ogf HOST_CTL ocf %x", hdev->name, ocf); | ||
| 303 | break; | ||
| 304 | } | 329 | } |
| 305 | } | 330 | } |
| 306 | 331 | ||
| 307 | /* Command Complete OGF INFO_PARAM */ | 332 | static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) |
| 308 | static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) | ||
| 309 | { | 333 | { |
| 310 | struct hci_rp_read_loc_version *lv; | 334 | __u8 status = *((__u8 *) skb->data); |
| 311 | struct hci_rp_read_local_features *lf; | ||
| 312 | struct hci_rp_read_buffer_size *bs; | ||
| 313 | struct hci_rp_read_bd_addr *ba; | ||
| 314 | 335 | ||
| 315 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | 336 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 316 | 337 | ||
| 317 | switch (ocf) { | 338 | hci_req_complete(hdev, status); |
| 318 | case OCF_READ_LOCAL_VERSION: | 339 | } |
| 319 | lv = (struct hci_rp_read_loc_version *) skb->data; | ||
| 320 | 340 | ||
| 321 | if (lv->status) { | 341 | static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) |
| 322 | BT_DBG("%s READ_LOCAL_VERSION failed %d", hdev->name, lf->status); | 342 | { |
| 323 | break; | 343 | struct hci_rp_read_local_version *rp = (void *) skb->data; |
| 324 | } | ||
| 325 | 344 | ||
| 326 | hdev->hci_ver = lv->hci_ver; | 345 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
| 327 | hdev->hci_rev = btohs(lv->hci_rev); | ||
| 328 | hdev->manufacturer = btohs(lv->manufacturer); | ||
| 329 | 346 | ||
| 330 | BT_DBG("%s: manufacturer %d hci_ver %d hci_rev %d", hdev->name, | 347 | if (rp->status) |
| 331 | hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); | 348 | return; |
| 332 | 349 | ||
| 333 | break; | 350 | hdev->hci_ver = rp->hci_ver; |
| 351 | hdev->hci_rev = btohs(rp->hci_rev); | ||
| 352 | hdev->manufacturer = btohs(rp->manufacturer); | ||
| 334 | 353 | ||
| 335 | case OCF_READ_LOCAL_FEATURES: | 354 | BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, |
| 336 | lf = (struct hci_rp_read_local_features *) skb->data; | 355 | hdev->manufacturer, |
| 356 | hdev->hci_ver, hdev->hci_rev); | ||
| 357 | } | ||
| 337 | 358 | ||
| 338 | if (lf->status) { | 359 | static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) |
| 339 | BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status); | 360 | { |
| 340 | break; | 361 | struct hci_rp_read_local_commands *rp = (void *) skb->data; |
| 341 | } | ||
| 342 | 362 | ||
| 343 | memcpy(hdev->features, lf->features, sizeof(hdev->features)); | 363 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
| 344 | 364 | ||
| 345 | /* Adjust default settings according to features | 365 | if (rp->status) |
| 346 | * supported by device. */ | 366 | return; |
| 347 | if (hdev->features[0] & LMP_3SLOT) | ||
| 348 | hdev->pkt_type |= (HCI_DM3 | HCI_DH3); | ||
| 349 | 367 | ||
| 350 | if (hdev->features[0] & LMP_5SLOT) | 368 | memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); |
| 351 | hdev->pkt_type |= (HCI_DM5 | HCI_DH5); | 369 | } |
| 352 | 370 | ||
| 353 | if (hdev->features[1] & LMP_HV2) { | 371 | static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) |
| 354 | hdev->pkt_type |= (HCI_HV2); | 372 | { |
| 355 | hdev->esco_type |= (ESCO_HV2); | 373 | struct hci_rp_read_local_features *rp = (void *) skb->data; |
| 356 | } | ||
| 357 | 374 | ||
| 358 | if (hdev->features[1] & LMP_HV3) { | 375 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
| 359 | hdev->pkt_type |= (HCI_HV3); | ||
| 360 | hdev->esco_type |= (ESCO_HV3); | ||
| 361 | } | ||
| 362 | 376 | ||
| 363 | if (hdev->features[3] & LMP_ESCO) | 377 | if (rp->status) |
| 364 | hdev->esco_type |= (ESCO_EV3); | 378 | return; |
| 365 | 379 | ||
| 366 | if (hdev->features[4] & LMP_EV4) | 380 | memcpy(hdev->features, rp->features, 8); |
| 367 | hdev->esco_type |= (ESCO_EV4); | ||
| 368 | 381 | ||
| 369 | if (hdev->features[4] & LMP_EV5) | 382 | /* Adjust default settings according to features |
| 370 | hdev->esco_type |= (ESCO_EV5); | 383 | * supported by device. */ |
| 371 | 384 | ||
| 372 | BT_DBG("%s: features 0x%x 0x%x 0x%x", hdev->name, | 385 | if (hdev->features[0] & LMP_3SLOT) |
| 373 | lf->features[0], lf->features[1], lf->features[2]); | 386 | hdev->pkt_type |= (HCI_DM3 | HCI_DH3); |
| 374 | 387 | ||
| 375 | break; | 388 | if (hdev->features[0] & LMP_5SLOT) |
| 389 | hdev->pkt_type |= (HCI_DM5 | HCI_DH5); | ||
| 376 | 390 | ||
| 377 | case OCF_READ_BUFFER_SIZE: | 391 | if (hdev->features[1] & LMP_HV2) { |
| 378 | bs = (struct hci_rp_read_buffer_size *) skb->data; | 392 | hdev->pkt_type |= (HCI_HV2); |
| 393 | hdev->esco_type |= (ESCO_HV2); | ||
| 394 | } | ||
| 379 | 395 | ||
| 380 | if (bs->status) { | 396 | if (hdev->features[1] & LMP_HV3) { |
| 381 | BT_DBG("%s READ_BUFFER_SIZE failed %d", hdev->name, bs->status); | 397 | hdev->pkt_type |= (HCI_HV3); |
| 382 | hci_req_complete(hdev, bs->status); | 398 | hdev->esco_type |= (ESCO_HV3); |
| 383 | break; | 399 | } |
| 384 | } | ||
| 385 | 400 | ||
| 386 | hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu); | 401 | if (hdev->features[3] & LMP_ESCO) |
| 387 | hdev->sco_mtu = bs->sco_mtu; | 402 | hdev->esco_type |= (ESCO_EV3); |
| 388 | hdev->acl_pkts = __le16_to_cpu(bs->acl_max_pkt); | ||
| 389 | hdev->sco_pkts = __le16_to_cpu(bs->sco_max_pkt); | ||
| 390 | 403 | ||
| 391 | if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { | 404 | if (hdev->features[4] & LMP_EV4) |
| 392 | hdev->sco_mtu = 64; | 405 | hdev->esco_type |= (ESCO_EV4); |
| 393 | hdev->sco_pkts = 8; | ||
| 394 | } | ||
| 395 | 406 | ||
| 396 | hdev->acl_cnt = hdev->acl_pkts; | 407 | if (hdev->features[4] & LMP_EV5) |
| 397 | hdev->sco_cnt = hdev->sco_pkts; | 408 | hdev->esco_type |= (ESCO_EV5); |
| 398 | 409 | ||
| 399 | BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name, | 410 | BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, |
| 400 | hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts); | 411 | hdev->features[0], hdev->features[1], |
| 401 | break; | 412 | hdev->features[2], hdev->features[3], |
| 413 | hdev->features[4], hdev->features[5], | ||
| 414 | hdev->features[6], hdev->features[7]); | ||
| 415 | } | ||
| 402 | 416 | ||
| 403 | case OCF_READ_BD_ADDR: | 417 | static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) |
| 404 | ba = (struct hci_rp_read_bd_addr *) skb->data; | 418 | { |
| 419 | struct hci_rp_read_buffer_size *rp = (void *) skb->data; | ||
| 405 | 420 | ||
| 406 | if (!ba->status) { | 421 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
| 407 | bacpy(&hdev->bdaddr, &ba->bdaddr); | ||
| 408 | } else { | ||
| 409 | BT_DBG("%s: READ_BD_ADDR failed %d", hdev->name, ba->status); | ||
| 410 | } | ||
| 411 | 422 | ||
| 412 | hci_req_complete(hdev, ba->status); | 423 | if (rp->status) |
| 413 | break; | 424 | return; |
| 414 | 425 | ||
| 415 | default: | 426 | hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); |
| 416 | BT_DBG("%s Command complete: ogf INFO_PARAM ocf %x", hdev->name, ocf); | 427 | hdev->sco_mtu = rp->sco_mtu; |
| 417 | break; | 428 | hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); |
| 429 | hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); | ||
| 430 | |||
| 431 | if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { | ||
| 432 | hdev->sco_mtu = 64; | ||
| 433 | hdev->sco_pkts = 8; | ||
| 418 | } | 434 | } |
| 435 | |||
| 436 | hdev->acl_cnt = hdev->acl_pkts; | ||
| 437 | hdev->sco_cnt = hdev->sco_pkts; | ||
| 438 | |||
| 439 | BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, | ||
| 440 | hdev->acl_mtu, hdev->acl_pkts, | ||
| 441 | hdev->sco_mtu, hdev->sco_pkts); | ||
| 442 | } | ||
| 443 | |||
| 444 | static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 445 | { | ||
| 446 | struct hci_rp_read_bd_addr *rp = (void *) skb->data; | ||
| 447 | |||
| 448 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
| 449 | |||
| 450 | if (!rp->status) | ||
| 451 | bacpy(&hdev->bdaddr, &rp->bdaddr); | ||
| 452 | |||
| 453 | hci_req_complete(hdev, rp->status); | ||
| 454 | } | ||
| 455 | |||
| 456 | static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) | ||
| 457 | { | ||
| 458 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
| 459 | |||
| 460 | if (status) { | ||
| 461 | hci_req_complete(hdev, status); | ||
| 462 | |||
| 463 | hci_conn_check_pending(hdev); | ||
| 464 | } else | ||
| 465 | set_bit(HCI_INQUIRY, &hdev->flags); | ||
| 419 | } | 466 | } |
| 420 | 467 | ||
| 421 | /* Command Status OGF LINK_CTL */ | ||
| 422 | static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) | 468 | static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) |
| 423 | { | 469 | { |
| 470 | struct hci_cp_create_conn *cp; | ||
| 424 | struct hci_conn *conn; | 471 | struct hci_conn *conn; |
| 425 | struct hci_cp_create_conn *cp = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_CREATE_CONN); | ||
| 426 | 472 | ||
| 473 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
| 474 | |||
| 475 | cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); | ||
| 427 | if (!cp) | 476 | if (!cp) |
| 428 | return; | 477 | return; |
| 429 | 478 | ||
| @@ -431,8 +480,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) | |||
| 431 | 480 | ||
| 432 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); | 481 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); |
| 433 | 482 | ||
| 434 | BT_DBG("%s status 0x%x bdaddr %s conn %p", hdev->name, | 483 | BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn); |
| 435 | status, batostr(&cp->bdaddr), conn); | ||
| 436 | 484 | ||
| 437 | if (status) { | 485 | if (status) { |
| 438 | if (conn && conn->state == BT_CONNECT) { | 486 | if (conn && conn->state == BT_CONNECT) { |
| @@ -457,234 +505,138 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) | |||
| 457 | hci_dev_unlock(hdev); | 505 | hci_dev_unlock(hdev); |
| 458 | } | 506 | } |
| 459 | 507 | ||
| 460 | static void hci_cs_link_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status) | 508 | static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) |
| 461 | { | 509 | { |
| 462 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | 510 | struct hci_cp_add_sco *cp; |
| 511 | struct hci_conn *acl, *sco; | ||
| 512 | __u16 handle; | ||
| 463 | 513 | ||
| 464 | switch (ocf) { | 514 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 465 | case OCF_CREATE_CONN: | ||
| 466 | hci_cs_create_conn(hdev, status); | ||
| 467 | break; | ||
| 468 | |||
| 469 | case OCF_ADD_SCO: | ||
| 470 | if (status) { | ||
| 471 | struct hci_conn *acl, *sco; | ||
| 472 | struct hci_cp_add_sco *cp = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_ADD_SCO); | ||
| 473 | __u16 handle; | ||
| 474 | |||
| 475 | if (!cp) | ||
| 476 | break; | ||
| 477 | 515 | ||
| 478 | handle = __le16_to_cpu(cp->handle); | 516 | if (!status) |
| 479 | 517 | return; | |
| 480 | BT_DBG("%s Add SCO error: handle %d status 0x%x", hdev->name, handle, status); | ||
| 481 | 518 | ||
| 482 | hci_dev_lock(hdev); | 519 | cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); |
| 520 | if (!cp) | ||
| 521 | return; | ||
| 483 | 522 | ||
| 484 | acl = hci_conn_hash_lookup_handle(hdev, handle); | 523 | handle = __le16_to_cpu(cp->handle); |
| 485 | if (acl && (sco = acl->link)) { | ||
| 486 | sco->state = BT_CLOSED; | ||
| 487 | 524 | ||
| 488 | hci_proto_connect_cfm(sco, status); | 525 | BT_DBG("%s handle %d", hdev->name, handle); |
| 489 | hci_conn_del(sco); | ||
| 490 | } | ||
| 491 | 526 | ||
| 492 | hci_dev_unlock(hdev); | 527 | hci_dev_lock(hdev); |
| 493 | } | ||
| 494 | break; | ||
| 495 | 528 | ||
| 496 | case OCF_INQUIRY: | 529 | acl = hci_conn_hash_lookup_handle(hdev, handle); |
| 497 | if (status) { | 530 | if (acl && (sco = acl->link)) { |
| 498 | BT_DBG("%s Inquiry error: status 0x%x", hdev->name, status); | 531 | sco->state = BT_CLOSED; |
| 499 | hci_req_complete(hdev, status); | ||
| 500 | } else { | ||
| 501 | set_bit(HCI_INQUIRY, &hdev->flags); | ||
| 502 | } | ||
| 503 | break; | ||
| 504 | 532 | ||
| 505 | default: | 533 | hci_proto_connect_cfm(sco, status); |
| 506 | BT_DBG("%s Command status: ogf LINK_CTL ocf %x status %d", | 534 | hci_conn_del(sco); |
| 507 | hdev->name, ocf, status); | ||
| 508 | break; | ||
| 509 | } | 535 | } |
| 536 | |||
| 537 | hci_dev_unlock(hdev); | ||
| 510 | } | 538 | } |
| 511 | 539 | ||
| 512 | /* Command Status OGF LINK_POLICY */ | 540 | static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) |
| 513 | static void hci_cs_link_policy(struct hci_dev *hdev, __u16 ocf, __u8 status) | ||
| 514 | { | 541 | { |
| 515 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | 542 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 516 | 543 | } | |
| 517 | switch (ocf) { | ||
| 518 | case OCF_SNIFF_MODE: | ||
| 519 | if (status) { | ||
| 520 | struct hci_conn *conn; | ||
| 521 | struct hci_cp_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_SNIFF_MODE); | ||
| 522 | 544 | ||
| 523 | if (!cp) | 545 | static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) |
| 524 | break; | 546 | { |
| 547 | struct hci_cp_setup_sync_conn *cp; | ||
| 548 | struct hci_conn *acl, *sco; | ||
| 549 | __u16 handle; | ||
| 525 | 550 | ||
| 526 | hci_dev_lock(hdev); | 551 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 527 | 552 | ||
| 528 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); | 553 | if (!status) |
| 529 | if (conn) { | 554 | return; |
| 530 | clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); | ||
| 531 | } | ||
| 532 | |||
| 533 | hci_dev_unlock(hdev); | ||
| 534 | } | ||
| 535 | break; | ||
| 536 | 555 | ||
| 537 | case OCF_EXIT_SNIFF_MODE: | 556 | cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); |
| 538 | if (status) { | 557 | if (!cp) |
| 539 | struct hci_conn *conn; | 558 | return; |
| 540 | struct hci_cp_exit_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_EXIT_SNIFF_MODE); | ||
| 541 | 559 | ||
| 542 | if (!cp) | 560 | handle = __le16_to_cpu(cp->handle); |
| 543 | break; | ||
| 544 | 561 | ||
| 545 | hci_dev_lock(hdev); | 562 | BT_DBG("%s handle %d", hdev->name, handle); |
| 546 | 563 | ||
| 547 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); | 564 | hci_dev_lock(hdev); |
| 548 | if (conn) { | ||
| 549 | clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); | ||
| 550 | } | ||
| 551 | 565 | ||
| 552 | hci_dev_unlock(hdev); | 566 | acl = hci_conn_hash_lookup_handle(hdev, handle); |
| 553 | } | 567 | if (acl && (sco = acl->link)) { |
| 554 | break; | 568 | sco->state = BT_CLOSED; |
| 555 | 569 | ||
| 556 | default: | 570 | hci_proto_connect_cfm(sco, status); |
| 557 | BT_DBG("%s Command status: ogf LINK_POLICY ocf %x", hdev->name, ocf); | 571 | hci_conn_del(sco); |
| 558 | break; | ||
| 559 | } | 572 | } |
| 560 | } | ||
| 561 | 573 | ||
| 562 | /* Command Status OGF HOST_CTL */ | 574 | hci_dev_unlock(hdev); |
| 563 | static void hci_cs_host_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status) | ||
| 564 | { | ||
| 565 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | ||
| 566 | |||
| 567 | switch (ocf) { | ||
| 568 | default: | ||
| 569 | BT_DBG("%s Command status: ogf HOST_CTL ocf %x", hdev->name, ocf); | ||
| 570 | break; | ||
| 571 | } | ||
| 572 | } | 575 | } |
| 573 | 576 | ||
| 574 | /* Command Status OGF INFO_PARAM */ | 577 | static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) |
| 575 | static void hci_cs_info_param(struct hci_dev *hdev, __u16 ocf, __u8 status) | ||
| 576 | { | 578 | { |
| 577 | BT_DBG("%s: hci_cs_info_param: ocf 0x%x", hdev->name, ocf); | 579 | struct hci_cp_sniff_mode *cp; |
| 578 | 580 | struct hci_conn *conn; | |
| 579 | switch (ocf) { | ||
| 580 | default: | ||
| 581 | BT_DBG("%s Command status: ogf INFO_PARAM ocf %x", hdev->name, ocf); | ||
| 582 | break; | ||
| 583 | } | ||
| 584 | } | ||
| 585 | 581 | ||
| 586 | /* Inquiry Complete */ | 582 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 587 | static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 588 | { | ||
| 589 | __u8 status = *((__u8 *) skb->data); | ||
| 590 | struct hci_conn *pend; | ||
| 591 | 583 | ||
| 592 | BT_DBG("%s status %d", hdev->name, status); | 584 | if (!status) |
| 585 | return; | ||
| 593 | 586 | ||
| 594 | clear_bit(HCI_INQUIRY, &hdev->flags); | 587 | cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); |
| 595 | hci_req_complete(hdev, status); | 588 | if (!cp) |
| 589 | return; | ||
| 596 | 590 | ||
| 597 | hci_dev_lock(hdev); | 591 | hci_dev_lock(hdev); |
| 598 | 592 | ||
| 599 | pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); | 593 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); |
| 600 | if (pend) | 594 | if (conn) |
| 601 | hci_acl_connect(pend); | 595 | clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); |
| 602 | 596 | ||
| 603 | hci_dev_unlock(hdev); | 597 | hci_dev_unlock(hdev); |
| 604 | } | 598 | } |
| 605 | 599 | ||
| 606 | /* Inquiry Result */ | 600 | static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) |
| 607 | static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 608 | { | 601 | { |
| 609 | struct inquiry_data data; | 602 | struct hci_cp_exit_sniff_mode *cp; |
| 610 | struct inquiry_info *info = (struct inquiry_info *) (skb->data + 1); | 603 | struct hci_conn *conn; |
| 611 | int num_rsp = *((__u8 *) skb->data); | ||
| 612 | 604 | ||
| 613 | BT_DBG("%s num_rsp %d", hdev->name, num_rsp); | 605 | BT_DBG("%s status 0x%x", hdev->name, status); |
| 614 | 606 | ||
| 615 | if (!num_rsp) | 607 | if (!status) |
| 608 | return; | ||
| 609 | |||
| 610 | cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); | ||
| 611 | if (!cp) | ||
| 616 | return; | 612 | return; |
| 617 | 613 | ||
| 618 | hci_dev_lock(hdev); | 614 | hci_dev_lock(hdev); |
| 619 | 615 | ||
| 620 | for (; num_rsp; num_rsp--) { | 616 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); |
| 621 | bacpy(&data.bdaddr, &info->bdaddr); | 617 | if (conn) |
| 622 | data.pscan_rep_mode = info->pscan_rep_mode; | 618 | clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); |
| 623 | data.pscan_period_mode = info->pscan_period_mode; | ||
| 624 | data.pscan_mode = info->pscan_mode; | ||
| 625 | memcpy(data.dev_class, info->dev_class, 3); | ||
| 626 | data.clock_offset = info->clock_offset; | ||
| 627 | data.rssi = 0x00; | ||
| 628 | info++; | ||
| 629 | hci_inquiry_cache_update(hdev, &data); | ||
| 630 | } | ||
| 631 | 619 | ||
| 632 | hci_dev_unlock(hdev); | 620 | hci_dev_unlock(hdev); |
| 633 | } | 621 | } |
| 634 | 622 | ||
| 635 | /* Inquiry Result With RSSI */ | 623 | static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 636 | static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 637 | { | 624 | { |
| 638 | struct inquiry_data data; | 625 | __u8 status = *((__u8 *) skb->data); |
| 639 | int num_rsp = *((__u8 *) skb->data); | ||
| 640 | |||
| 641 | BT_DBG("%s num_rsp %d", hdev->name, num_rsp); | ||
| 642 | |||
| 643 | if (!num_rsp) | ||
| 644 | return; | ||
| 645 | |||
| 646 | hci_dev_lock(hdev); | ||
| 647 | 626 | ||
| 648 | if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { | 627 | BT_DBG("%s status %d", hdev->name, status); |
| 649 | struct inquiry_info_with_rssi_and_pscan_mode *info = | ||
| 650 | (struct inquiry_info_with_rssi_and_pscan_mode *) (skb->data + 1); | ||
| 651 | 628 | ||
| 652 | for (; num_rsp; num_rsp--) { | 629 | clear_bit(HCI_INQUIRY, &hdev->flags); |
| 653 | bacpy(&data.bdaddr, &info->bdaddr); | ||
| 654 | data.pscan_rep_mode = info->pscan_rep_mode; | ||
| 655 | data.pscan_period_mode = info->pscan_period_mode; | ||
| 656 | data.pscan_mode = info->pscan_mode; | ||
| 657 | memcpy(data.dev_class, info->dev_class, 3); | ||
| 658 | data.clock_offset = info->clock_offset; | ||
| 659 | data.rssi = info->rssi; | ||
| 660 | info++; | ||
| 661 | hci_inquiry_cache_update(hdev, &data); | ||
| 662 | } | ||
| 663 | } else { | ||
| 664 | struct inquiry_info_with_rssi *info = | ||
| 665 | (struct inquiry_info_with_rssi *) (skb->data + 1); | ||
| 666 | 630 | ||
| 667 | for (; num_rsp; num_rsp--) { | 631 | hci_req_complete(hdev, status); |
| 668 | bacpy(&data.bdaddr, &info->bdaddr); | ||
| 669 | data.pscan_rep_mode = info->pscan_rep_mode; | ||
| 670 | data.pscan_period_mode = info->pscan_period_mode; | ||
| 671 | data.pscan_mode = 0x00; | ||
| 672 | memcpy(data.dev_class, info->dev_class, 3); | ||
| 673 | data.clock_offset = info->clock_offset; | ||
| 674 | data.rssi = info->rssi; | ||
| 675 | info++; | ||
| 676 | hci_inquiry_cache_update(hdev, &data); | ||
| 677 | } | ||
| 678 | } | ||
| 679 | 632 | ||
| 680 | hci_dev_unlock(hdev); | 633 | hci_conn_check_pending(hdev); |
| 681 | } | 634 | } |
| 682 | 635 | ||
| 683 | /* Extended Inquiry Result */ | 636 | static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 684 | static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 685 | { | 637 | { |
| 686 | struct inquiry_data data; | 638 | struct inquiry_data data; |
| 687 | struct extended_inquiry_info *info = (struct extended_inquiry_info *) (skb->data + 1); | 639 | struct inquiry_info *info = (void *) (skb->data + 1); |
| 688 | int num_rsp = *((__u8 *) skb->data); | 640 | int num_rsp = *((__u8 *) skb->data); |
| 689 | 641 | ||
| 690 | BT_DBG("%s num_rsp %d", hdev->name, num_rsp); | 642 | BT_DBG("%s num_rsp %d", hdev->name, num_rsp); |
| @@ -696,12 +648,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct | |||
| 696 | 648 | ||
| 697 | for (; num_rsp; num_rsp--) { | 649 | for (; num_rsp; num_rsp--) { |
| 698 | bacpy(&data.bdaddr, &info->bdaddr); | 650 | bacpy(&data.bdaddr, &info->bdaddr); |
| 699 | data.pscan_rep_mode = info->pscan_rep_mode; | 651 | data.pscan_rep_mode = info->pscan_rep_mode; |
| 700 | data.pscan_period_mode = info->pscan_period_mode; | 652 | data.pscan_period_mode = info->pscan_period_mode; |
| 701 | data.pscan_mode = 0x00; | 653 | data.pscan_mode = info->pscan_mode; |
| 702 | memcpy(data.dev_class, info->dev_class, 3); | 654 | memcpy(data.dev_class, info->dev_class, 3); |
| 703 | data.clock_offset = info->clock_offset; | 655 | data.clock_offset = info->clock_offset; |
| 704 | data.rssi = info->rssi; | 656 | data.rssi = 0x00; |
| 705 | info++; | 657 | info++; |
| 706 | hci_inquiry_cache_update(hdev, &data); | 658 | hci_inquiry_cache_update(hdev, &data); |
| 707 | } | 659 | } |
| @@ -709,70 +661,18 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct | |||
| 709 | hci_dev_unlock(hdev); | 661 | hci_dev_unlock(hdev); |
| 710 | } | 662 | } |
| 711 | 663 | ||
| 712 | /* Connect Request */ | ||
| 713 | static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 714 | { | ||
| 715 | struct hci_ev_conn_request *ev = (struct hci_ev_conn_request *) skb->data; | ||
| 716 | int mask = hdev->link_mode; | ||
| 717 | |||
| 718 | BT_DBG("%s Connection request: %s type 0x%x", hdev->name, | ||
| 719 | batostr(&ev->bdaddr), ev->link_type); | ||
| 720 | |||
| 721 | mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); | ||
| 722 | |||
| 723 | if (mask & HCI_LM_ACCEPT) { | ||
| 724 | /* Connection accepted */ | ||
| 725 | struct hci_conn *conn; | ||
| 726 | struct hci_cp_accept_conn_req cp; | ||
| 727 | |||
| 728 | hci_dev_lock(hdev); | ||
| 729 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); | ||
| 730 | if (!conn) { | ||
| 731 | if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { | ||
| 732 | BT_ERR("No memmory for new connection"); | ||
| 733 | hci_dev_unlock(hdev); | ||
| 734 | return; | ||
| 735 | } | ||
| 736 | } | ||
| 737 | memcpy(conn->dev_class, ev->dev_class, 3); | ||
| 738 | conn->state = BT_CONNECT; | ||
| 739 | hci_dev_unlock(hdev); | ||
| 740 | |||
| 741 | bacpy(&cp.bdaddr, &ev->bdaddr); | ||
| 742 | |||
| 743 | if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) | ||
| 744 | cp.role = 0x00; /* Become master */ | ||
| 745 | else | ||
| 746 | cp.role = 0x01; /* Remain slave */ | ||
| 747 | |||
| 748 | hci_send_cmd(hdev, OGF_LINK_CTL, | ||
| 749 | OCF_ACCEPT_CONN_REQ, sizeof(cp), &cp); | ||
| 750 | } else { | ||
| 751 | /* Connection rejected */ | ||
| 752 | struct hci_cp_reject_conn_req cp; | ||
| 753 | |||
| 754 | bacpy(&cp.bdaddr, &ev->bdaddr); | ||
| 755 | cp.reason = 0x0f; | ||
| 756 | hci_send_cmd(hdev, OGF_LINK_CTL, | ||
| 757 | OCF_REJECT_CONN_REQ, sizeof(cp), &cp); | ||
| 758 | } | ||
| 759 | } | ||
| 760 | |||
| 761 | /* Connect Complete */ | ||
| 762 | static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 664 | static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 763 | { | 665 | { |
| 764 | struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data; | 666 | struct hci_ev_conn_complete *ev = (void *) skb->data; |
| 765 | struct hci_conn *conn, *pend; | 667 | struct hci_conn *conn; |
| 766 | 668 | ||
| 767 | BT_DBG("%s", hdev->name); | 669 | BT_DBG("%s", hdev->name); |
| 768 | 670 | ||
| 769 | hci_dev_lock(hdev); | 671 | hci_dev_lock(hdev); |
| 770 | 672 | ||
| 771 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); | 673 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); |
| 772 | if (!conn) { | 674 | if (!conn) |
| 773 | hci_dev_unlock(hdev); | 675 | goto unlock; |
| 774 | return; | ||
| 775 | } | ||
| 776 | 676 | ||
| 777 | if (!ev->status) { | 677 | if (!ev->status) { |
| 778 | conn->handle = __le16_to_cpu(ev->handle); | 678 | conn->handle = __le16_to_cpu(ev->handle); |
| @@ -788,8 +688,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 788 | if (conn->type == ACL_LINK) { | 688 | if (conn->type == ACL_LINK) { |
| 789 | struct hci_cp_read_remote_features cp; | 689 | struct hci_cp_read_remote_features cp; |
| 790 | cp.handle = ev->handle; | 690 | cp.handle = ev->handle; |
| 791 | hci_send_cmd(hdev, OGF_LINK_CTL, | 691 | hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp); |
| 792 | OCF_READ_REMOTE_FEATURES, sizeof(cp), &cp); | ||
| 793 | } | 692 | } |
| 794 | 693 | ||
| 795 | /* Set link policy */ | 694 | /* Set link policy */ |
| @@ -797,8 +696,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 797 | struct hci_cp_write_link_policy cp; | 696 | struct hci_cp_write_link_policy cp; |
| 798 | cp.handle = ev->handle; | 697 | cp.handle = ev->handle; |
| 799 | cp.policy = cpu_to_le16(hdev->link_policy); | 698 | cp.policy = cpu_to_le16(hdev->link_policy); |
| 800 | hci_send_cmd(hdev, OGF_LINK_POLICY, | 699 | hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp); |
| 801 | OCF_WRITE_LINK_POLICY, sizeof(cp), &cp); | ||
| 802 | } | 700 | } |
| 803 | 701 | ||
| 804 | /* Set packet type for incoming connection */ | 702 | /* Set packet type for incoming connection */ |
| @@ -809,8 +707,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 809 | cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): | 707 | cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): |
| 810 | cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); | 708 | cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); |
| 811 | 709 | ||
| 812 | hci_send_cmd(hdev, OGF_LINK_CTL, | 710 | hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp); |
| 813 | OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp); | ||
| 814 | } else { | 711 | } else { |
| 815 | /* Update disconnect timer */ | 712 | /* Update disconnect timer */ |
| 816 | hci_conn_hold(conn); | 713 | hci_conn_hold(conn); |
| @@ -822,9 +719,12 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 822 | if (conn->type == ACL_LINK) { | 719 | if (conn->type == ACL_LINK) { |
| 823 | struct hci_conn *sco = conn->link; | 720 | struct hci_conn *sco = conn->link; |
| 824 | if (sco) { | 721 | if (sco) { |
| 825 | if (!ev->status) | 722 | if (!ev->status) { |
| 826 | hci_add_sco(sco, conn->handle); | 723 | if (lmp_esco_capable(hdev)) |
| 827 | else { | 724 | hci_setup_sync(sco, conn->handle); |
| 725 | else | ||
| 726 | hci_add_sco(sco, conn->handle); | ||
| 727 | } else { | ||
| 828 | hci_proto_connect_cfm(sco, ev->status); | 728 | hci_proto_connect_cfm(sco, ev->status); |
| 829 | hci_conn_del(sco); | 729 | hci_conn_del(sco); |
| 830 | } | 730 | } |
| @@ -835,136 +735,104 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 835 | if (ev->status) | 735 | if (ev->status) |
| 836 | hci_conn_del(conn); | 736 | hci_conn_del(conn); |
| 837 | 737 | ||
| 838 | pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); | 738 | unlock: |
| 839 | if (pend) | ||
| 840 | hci_acl_connect(pend); | ||
| 841 | |||
| 842 | hci_dev_unlock(hdev); | 739 | hci_dev_unlock(hdev); |
| 843 | } | ||
| 844 | |||
| 845 | /* Disconnect Complete */ | ||
| 846 | static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 847 | { | ||
| 848 | struct hci_ev_disconn_complete *ev = (struct hci_ev_disconn_complete *) skb->data; | ||
| 849 | struct hci_conn *conn; | ||
| 850 | |||
| 851 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
| 852 | |||
| 853 | if (ev->status) | ||
| 854 | return; | ||
| 855 | 740 | ||
| 856 | hci_dev_lock(hdev); | 741 | hci_conn_check_pending(hdev); |
| 857 | |||
| 858 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
| 859 | if (conn) { | ||
| 860 | conn->state = BT_CLOSED; | ||
| 861 | hci_proto_disconn_ind(conn, ev->reason); | ||
| 862 | hci_conn_del(conn); | ||
| 863 | } | ||
| 864 | |||
| 865 | hci_dev_unlock(hdev); | ||
| 866 | } | 742 | } |
| 867 | 743 | ||
| 868 | /* Number of completed packets */ | 744 | static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 869 | static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 870 | { | 745 | { |
| 871 | struct hci_ev_num_comp_pkts *ev = (struct hci_ev_num_comp_pkts *) skb->data; | 746 | struct hci_ev_conn_request *ev = (void *) skb->data; |
| 872 | __le16 *ptr; | 747 | int mask = hdev->link_mode; |
| 873 | int i; | ||
| 874 | |||
| 875 | skb_pull(skb, sizeof(*ev)); | ||
| 876 | |||
| 877 | BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); | ||
| 878 | 748 | ||
| 879 | if (skb->len < ev->num_hndl * 4) { | 749 | BT_DBG("%s bdaddr %s type 0x%x", hdev->name, |
| 880 | BT_DBG("%s bad parameters", hdev->name); | 750 | batostr(&ev->bdaddr), ev->link_type); |
| 881 | return; | ||
| 882 | } | ||
| 883 | 751 | ||
| 884 | tasklet_disable(&hdev->tx_task); | 752 | mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); |
| 885 | 753 | ||
| 886 | for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { | 754 | if (mask & HCI_LM_ACCEPT) { |
| 755 | /* Connection accepted */ | ||
| 887 | struct hci_conn *conn; | 756 | struct hci_conn *conn; |
| 888 | __u16 handle, count; | ||
| 889 | |||
| 890 | handle = __le16_to_cpu(get_unaligned(ptr++)); | ||
| 891 | count = __le16_to_cpu(get_unaligned(ptr++)); | ||
| 892 | 757 | ||
| 893 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 758 | hci_dev_lock(hdev); |
| 894 | if (conn) { | ||
| 895 | conn->sent -= count; | ||
| 896 | 759 | ||
| 897 | if (conn->type == ACL_LINK) { | 760 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); |
| 898 | if ((hdev->acl_cnt += count) > hdev->acl_pkts) | 761 | if (!conn) { |
| 899 | hdev->acl_cnt = hdev->acl_pkts; | 762 | if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { |
| 900 | } else { | 763 | BT_ERR("No memmory for new connection"); |
| 901 | if ((hdev->sco_cnt += count) > hdev->sco_pkts) | 764 | hci_dev_unlock(hdev); |
| 902 | hdev->sco_cnt = hdev->sco_pkts; | 765 | return; |
| 903 | } | 766 | } |
| 904 | } | 767 | } |
| 905 | } | ||
| 906 | hci_sched_tx(hdev); | ||
| 907 | 768 | ||
| 908 | tasklet_enable(&hdev->tx_task); | 769 | memcpy(conn->dev_class, ev->dev_class, 3); |
| 909 | } | 770 | conn->state = BT_CONNECT; |
| 910 | 771 | ||
| 911 | /* Role Change */ | 772 | hci_dev_unlock(hdev); |
| 912 | static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 913 | { | ||
| 914 | struct hci_ev_role_change *ev = (struct hci_ev_role_change *) skb->data; | ||
| 915 | struct hci_conn *conn; | ||
| 916 | 773 | ||
| 917 | BT_DBG("%s status %d", hdev->name, ev->status); | 774 | if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) { |
| 775 | struct hci_cp_accept_conn_req cp; | ||
| 918 | 776 | ||
| 919 | hci_dev_lock(hdev); | 777 | bacpy(&cp.bdaddr, &ev->bdaddr); |
| 920 | 778 | ||
| 921 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); | 779 | if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) |
| 922 | if (conn) { | 780 | cp.role = 0x00; /* Become master */ |
| 923 | if (!ev->status) { | ||
| 924 | if (ev->role) | ||
| 925 | conn->link_mode &= ~HCI_LM_MASTER; | ||
| 926 | else | 781 | else |
| 927 | conn->link_mode |= HCI_LM_MASTER; | 782 | cp.role = 0x01; /* Remain slave */ |
| 928 | } | ||
| 929 | 783 | ||
| 930 | clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); | 784 | hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, |
| 785 | sizeof(cp), &cp); | ||
| 786 | } else { | ||
| 787 | struct hci_cp_accept_sync_conn_req cp; | ||
| 931 | 788 | ||
| 932 | hci_role_switch_cfm(conn, ev->status, ev->role); | 789 | bacpy(&cp.bdaddr, &ev->bdaddr); |
| 933 | } | 790 | cp.pkt_type = cpu_to_le16(hdev->esco_type); |
| 934 | 791 | ||
| 935 | hci_dev_unlock(hdev); | 792 | cp.tx_bandwidth = cpu_to_le32(0x00001f40); |
| 793 | cp.rx_bandwidth = cpu_to_le32(0x00001f40); | ||
| 794 | cp.max_latency = cpu_to_le16(0xffff); | ||
| 795 | cp.content_format = cpu_to_le16(hdev->voice_setting); | ||
| 796 | cp.retrans_effort = 0xff; | ||
| 797 | |||
| 798 | hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, | ||
| 799 | sizeof(cp), &cp); | ||
| 800 | } | ||
| 801 | } else { | ||
| 802 | /* Connection rejected */ | ||
| 803 | struct hci_cp_reject_conn_req cp; | ||
| 804 | |||
| 805 | bacpy(&cp.bdaddr, &ev->bdaddr); | ||
| 806 | cp.reason = 0x0f; | ||
| 807 | hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); | ||
| 808 | } | ||
| 936 | } | 809 | } |
| 937 | 810 | ||
| 938 | /* Mode Change */ | 811 | static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 939 | static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 940 | { | 812 | { |
| 941 | struct hci_ev_mode_change *ev = (struct hci_ev_mode_change *) skb->data; | 813 | struct hci_ev_disconn_complete *ev = (void *) skb->data; |
| 942 | struct hci_conn *conn; | 814 | struct hci_conn *conn; |
| 943 | 815 | ||
| 944 | BT_DBG("%s status %d", hdev->name, ev->status); | 816 | BT_DBG("%s status %d", hdev->name, ev->status); |
| 945 | 817 | ||
| 818 | if (ev->status) | ||
| 819 | return; | ||
| 820 | |||
| 946 | hci_dev_lock(hdev); | 821 | hci_dev_lock(hdev); |
| 947 | 822 | ||
| 948 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | 823 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
| 949 | if (conn) { | 824 | if (conn) { |
| 950 | conn->mode = ev->mode; | 825 | conn->state = BT_CLOSED; |
| 951 | conn->interval = __le16_to_cpu(ev->interval); | 826 | hci_proto_disconn_ind(conn, ev->reason); |
| 952 | 827 | hci_conn_del(conn); | |
| 953 | if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | ||
| 954 | if (conn->mode == HCI_CM_ACTIVE) | ||
| 955 | conn->power_save = 1; | ||
| 956 | else | ||
| 957 | conn->power_save = 0; | ||
| 958 | } | ||
| 959 | } | 828 | } |
| 960 | 829 | ||
| 961 | hci_dev_unlock(hdev); | 830 | hci_dev_unlock(hdev); |
| 962 | } | 831 | } |
| 963 | 832 | ||
| 964 | /* Authentication Complete */ | ||
| 965 | static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 833 | static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 966 | { | 834 | { |
| 967 | struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data; | 835 | struct hci_ev_auth_complete *ev = (void *) skb->data; |
| 968 | struct hci_conn *conn; | 836 | struct hci_conn *conn; |
| 969 | 837 | ||
| 970 | BT_DBG("%s status %d", hdev->name, ev->status); | 838 | BT_DBG("%s status %d", hdev->name, ev->status); |
| @@ -985,8 +853,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 985 | struct hci_cp_set_conn_encrypt cp; | 853 | struct hci_cp_set_conn_encrypt cp; |
| 986 | cp.handle = cpu_to_le16(conn->handle); | 854 | cp.handle = cpu_to_le16(conn->handle); |
| 987 | cp.encrypt = 1; | 855 | cp.encrypt = 1; |
| 988 | hci_send_cmd(conn->hdev, OGF_LINK_CTL, | 856 | hci_send_cmd(conn->hdev, |
| 989 | OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); | 857 | HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); |
| 990 | } else { | 858 | } else { |
| 991 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); | 859 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); |
| 992 | hci_encrypt_cfm(conn, ev->status, 0x00); | 860 | hci_encrypt_cfm(conn, ev->status, 0x00); |
| @@ -997,10 +865,16 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 997 | hci_dev_unlock(hdev); | 865 | hci_dev_unlock(hdev); |
| 998 | } | 866 | } |
| 999 | 867 | ||
| 1000 | /* Encryption Change */ | 868 | static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 869 | { | ||
| 870 | BT_DBG("%s", hdev->name); | ||
| 871 | |||
| 872 | hci_conn_check_pending(hdev); | ||
| 873 | } | ||
| 874 | |||
| 1001 | static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | 875 | static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1002 | { | 876 | { |
| 1003 | struct hci_ev_encrypt_change *ev = (struct hci_ev_encrypt_change *) skb->data; | 877 | struct hci_ev_encrypt_change *ev = (void *) skb->data; |
| 1004 | struct hci_conn *conn; | 878 | struct hci_conn *conn; |
| 1005 | 879 | ||
| 1006 | BT_DBG("%s status %d", hdev->name, ev->status); | 880 | BT_DBG("%s status %d", hdev->name, ev->status); |
| @@ -1024,10 +898,9 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff * | |||
| 1024 | hci_dev_unlock(hdev); | 898 | hci_dev_unlock(hdev); |
| 1025 | } | 899 | } |
| 1026 | 900 | ||
| 1027 | /* Change Connection Link Key Complete */ | 901 | static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1028 | static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1029 | { | 902 | { |
| 1030 | struct hci_ev_change_conn_link_key_complete *ev = (struct hci_ev_change_conn_link_key_complete *) skb->data; | 903 | struct hci_ev_change_link_key_complete *ev = (void *) skb->data; |
| 1031 | struct hci_conn *conn; | 904 | struct hci_conn *conn; |
| 1032 | 905 | ||
| 1033 | BT_DBG("%s status %d", hdev->name, ev->status); | 906 | BT_DBG("%s status %d", hdev->name, ev->status); |
| @@ -1047,25 +920,263 @@ static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, s | |||
| 1047 | hci_dev_unlock(hdev); | 920 | hci_dev_unlock(hdev); |
| 1048 | } | 921 | } |
| 1049 | 922 | ||
| 1050 | /* Pin Code Request*/ | 923 | static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1051 | static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1052 | { | 924 | { |
| 925 | struct hci_ev_remote_features *ev = (void *) skb->data; | ||
| 926 | struct hci_conn *conn; | ||
| 927 | |||
| 928 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
| 929 | |||
| 930 | if (ev->status) | ||
| 931 | return; | ||
| 932 | |||
| 933 | hci_dev_lock(hdev); | ||
| 934 | |||
| 935 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
| 936 | if (conn) | ||
| 937 | memcpy(conn->features, ev->features, 8); | ||
| 938 | |||
| 939 | hci_dev_unlock(hdev); | ||
| 1053 | } | 940 | } |
| 1054 | 941 | ||
| 1055 | /* Link Key Request */ | 942 | static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1056 | static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1057 | { | 943 | { |
| 944 | BT_DBG("%s", hdev->name); | ||
| 1058 | } | 945 | } |
| 1059 | 946 | ||
| 1060 | /* Link Key Notification */ | 947 | static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1061 | static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1062 | { | 948 | { |
| 949 | BT_DBG("%s", hdev->name); | ||
| 1063 | } | 950 | } |
| 1064 | 951 | ||
| 1065 | /* Remote Features */ | 952 | static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1066 | static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1067 | { | 953 | { |
| 1068 | struct hci_ev_remote_features *ev = (struct hci_ev_remote_features *) skb->data; | 954 | struct hci_ev_cmd_complete *ev = (void *) skb->data; |
| 955 | __u16 opcode; | ||
| 956 | |||
| 957 | skb_pull(skb, sizeof(*ev)); | ||
| 958 | |||
| 959 | opcode = __le16_to_cpu(ev->opcode); | ||
| 960 | |||
| 961 | switch (opcode) { | ||
| 962 | case HCI_OP_INQUIRY_CANCEL: | ||
| 963 | hci_cc_inquiry_cancel(hdev, skb); | ||
| 964 | break; | ||
| 965 | |||
| 966 | case HCI_OP_EXIT_PERIODIC_INQ: | ||
| 967 | hci_cc_exit_periodic_inq(hdev, skb); | ||
| 968 | break; | ||
| 969 | |||
| 970 | case HCI_OP_REMOTE_NAME_REQ_CANCEL: | ||
| 971 | hci_cc_remote_name_req_cancel(hdev, skb); | ||
| 972 | break; | ||
| 973 | |||
| 974 | case HCI_OP_ROLE_DISCOVERY: | ||
| 975 | hci_cc_role_discovery(hdev, skb); | ||
| 976 | break; | ||
| 977 | |||
| 978 | case HCI_OP_WRITE_LINK_POLICY: | ||
| 979 | hci_cc_write_link_policy(hdev, skb); | ||
| 980 | break; | ||
| 981 | |||
| 982 | case HCI_OP_RESET: | ||
| 983 | hci_cc_reset(hdev, skb); | ||
| 984 | break; | ||
| 985 | |||
| 986 | case HCI_OP_WRITE_LOCAL_NAME: | ||
| 987 | hci_cc_write_local_name(hdev, skb); | ||
| 988 | break; | ||
| 989 | |||
| 990 | case HCI_OP_READ_LOCAL_NAME: | ||
| 991 | hci_cc_read_local_name(hdev, skb); | ||
| 992 | break; | ||
| 993 | |||
| 994 | case HCI_OP_WRITE_AUTH_ENABLE: | ||
| 995 | hci_cc_write_auth_enable(hdev, skb); | ||
| 996 | break; | ||
| 997 | |||
| 998 | case HCI_OP_WRITE_ENCRYPT_MODE: | ||
| 999 | hci_cc_write_encrypt_mode(hdev, skb); | ||
| 1000 | break; | ||
| 1001 | |||
| 1002 | case HCI_OP_WRITE_SCAN_ENABLE: | ||
| 1003 | hci_cc_write_scan_enable(hdev, skb); | ||
| 1004 | break; | ||
| 1005 | |||
| 1006 | case HCI_OP_READ_CLASS_OF_DEV: | ||
| 1007 | hci_cc_read_class_of_dev(hdev, skb); | ||
| 1008 | break; | ||
| 1009 | |||
| 1010 | case HCI_OP_WRITE_CLASS_OF_DEV: | ||
| 1011 | hci_cc_write_class_of_dev(hdev, skb); | ||
| 1012 | break; | ||
| 1013 | |||
| 1014 | case HCI_OP_READ_VOICE_SETTING: | ||
| 1015 | hci_cc_read_voice_setting(hdev, skb); | ||
| 1016 | break; | ||
| 1017 | |||
| 1018 | case HCI_OP_WRITE_VOICE_SETTING: | ||
| 1019 | hci_cc_write_voice_setting(hdev, skb); | ||
| 1020 | break; | ||
| 1021 | |||
| 1022 | case HCI_OP_HOST_BUFFER_SIZE: | ||
| 1023 | hci_cc_host_buffer_size(hdev, skb); | ||
| 1024 | break; | ||
| 1025 | |||
| 1026 | case HCI_OP_READ_LOCAL_VERSION: | ||
| 1027 | hci_cc_read_local_version(hdev, skb); | ||
| 1028 | break; | ||
| 1029 | |||
| 1030 | case HCI_OP_READ_LOCAL_COMMANDS: | ||
| 1031 | hci_cc_read_local_commands(hdev, skb); | ||
| 1032 | break; | ||
| 1033 | |||
| 1034 | case HCI_OP_READ_LOCAL_FEATURES: | ||
| 1035 | hci_cc_read_local_features(hdev, skb); | ||
| 1036 | break; | ||
| 1037 | |||
| 1038 | case HCI_OP_READ_BUFFER_SIZE: | ||
| 1039 | hci_cc_read_buffer_size(hdev, skb); | ||
| 1040 | break; | ||
| 1041 | |||
| 1042 | case HCI_OP_READ_BD_ADDR: | ||
| 1043 | hci_cc_read_bd_addr(hdev, skb); | ||
| 1044 | break; | ||
| 1045 | |||
| 1046 | default: | ||
| 1047 | BT_DBG("%s opcode 0x%x", hdev->name, opcode); | ||
| 1048 | break; | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | if (ev->ncmd) { | ||
| 1052 | atomic_set(&hdev->cmd_cnt, 1); | ||
| 1053 | if (!skb_queue_empty(&hdev->cmd_q)) | ||
| 1054 | hci_sched_cmd(hdev); | ||
| 1055 | } | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1059 | { | ||
| 1060 | struct hci_ev_cmd_status *ev = (void *) skb->data; | ||
| 1061 | __u16 opcode; | ||
| 1062 | |||
| 1063 | skb_pull(skb, sizeof(*ev)); | ||
| 1064 | |||
| 1065 | opcode = __le16_to_cpu(ev->opcode); | ||
| 1066 | |||
| 1067 | switch (opcode) { | ||
| 1068 | case HCI_OP_INQUIRY: | ||
| 1069 | hci_cs_inquiry(hdev, ev->status); | ||
| 1070 | break; | ||
| 1071 | |||
| 1072 | case HCI_OP_CREATE_CONN: | ||
| 1073 | hci_cs_create_conn(hdev, ev->status); | ||
| 1074 | break; | ||
| 1075 | |||
| 1076 | case HCI_OP_ADD_SCO: | ||
| 1077 | hci_cs_add_sco(hdev, ev->status); | ||
| 1078 | break; | ||
| 1079 | |||
| 1080 | case HCI_OP_REMOTE_NAME_REQ: | ||
| 1081 | hci_cs_remote_name_req(hdev, ev->status); | ||
| 1082 | break; | ||
| 1083 | |||
| 1084 | case HCI_OP_SETUP_SYNC_CONN: | ||
| 1085 | hci_cs_setup_sync_conn(hdev, ev->status); | ||
| 1086 | break; | ||
| 1087 | |||
| 1088 | case HCI_OP_SNIFF_MODE: | ||
| 1089 | hci_cs_sniff_mode(hdev, ev->status); | ||
| 1090 | break; | ||
| 1091 | |||
| 1092 | case HCI_OP_EXIT_SNIFF_MODE: | ||
| 1093 | hci_cs_exit_sniff_mode(hdev, ev->status); | ||
| 1094 | break; | ||
| 1095 | |||
| 1096 | default: | ||
| 1097 | BT_DBG("%s opcode 0x%x", hdev->name, opcode); | ||
| 1098 | break; | ||
| 1099 | } | ||
| 1100 | |||
| 1101 | if (ev->ncmd) { | ||
| 1102 | atomic_set(&hdev->cmd_cnt, 1); | ||
| 1103 | if (!skb_queue_empty(&hdev->cmd_q)) | ||
| 1104 | hci_sched_cmd(hdev); | ||
| 1105 | } | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1109 | { | ||
| 1110 | struct hci_ev_role_change *ev = (void *) skb->data; | ||
| 1111 | struct hci_conn *conn; | ||
| 1112 | |||
| 1113 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
| 1114 | |||
| 1115 | hci_dev_lock(hdev); | ||
| 1116 | |||
| 1117 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); | ||
| 1118 | if (conn) { | ||
| 1119 | if (!ev->status) { | ||
| 1120 | if (ev->role) | ||
| 1121 | conn->link_mode &= ~HCI_LM_MASTER; | ||
| 1122 | else | ||
| 1123 | conn->link_mode |= HCI_LM_MASTER; | ||
| 1124 | } | ||
| 1125 | |||
| 1126 | clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); | ||
| 1127 | |||
| 1128 | hci_role_switch_cfm(conn, ev->status, ev->role); | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | hci_dev_unlock(hdev); | ||
| 1132 | } | ||
| 1133 | |||
| 1134 | static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1135 | { | ||
| 1136 | struct hci_ev_num_comp_pkts *ev = (void *) skb->data; | ||
| 1137 | __le16 *ptr; | ||
| 1138 | int i; | ||
| 1139 | |||
| 1140 | skb_pull(skb, sizeof(*ev)); | ||
| 1141 | |||
| 1142 | BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); | ||
| 1143 | |||
| 1144 | if (skb->len < ev->num_hndl * 4) { | ||
| 1145 | BT_DBG("%s bad parameters", hdev->name); | ||
| 1146 | return; | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | tasklet_disable(&hdev->tx_task); | ||
| 1150 | |||
| 1151 | for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { | ||
| 1152 | struct hci_conn *conn; | ||
| 1153 | __u16 handle, count; | ||
| 1154 | |||
| 1155 | handle = __le16_to_cpu(get_unaligned(ptr++)); | ||
| 1156 | count = __le16_to_cpu(get_unaligned(ptr++)); | ||
| 1157 | |||
| 1158 | conn = hci_conn_hash_lookup_handle(hdev, handle); | ||
| 1159 | if (conn) { | ||
| 1160 | conn->sent -= count; | ||
| 1161 | |||
| 1162 | if (conn->type == ACL_LINK) { | ||
| 1163 | if ((hdev->acl_cnt += count) > hdev->acl_pkts) | ||
| 1164 | hdev->acl_cnt = hdev->acl_pkts; | ||
| 1165 | } else { | ||
| 1166 | if ((hdev->sco_cnt += count) > hdev->sco_pkts) | ||
| 1167 | hdev->sco_cnt = hdev->sco_pkts; | ||
| 1168 | } | ||
| 1169 | } | ||
| 1170 | } | ||
| 1171 | |||
| 1172 | hci_sched_tx(hdev); | ||
| 1173 | |||
| 1174 | tasklet_enable(&hdev->tx_task); | ||
| 1175 | } | ||
| 1176 | |||
| 1177 | static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1178 | { | ||
| 1179 | struct hci_ev_mode_change *ev = (void *) skb->data; | ||
| 1069 | struct hci_conn *conn; | 1180 | struct hci_conn *conn; |
| 1070 | 1181 | ||
| 1071 | BT_DBG("%s status %d", hdev->name, ev->status); | 1182 | BT_DBG("%s status %d", hdev->name, ev->status); |
| @@ -1073,17 +1184,39 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff | |||
| 1073 | hci_dev_lock(hdev); | 1184 | hci_dev_lock(hdev); |
| 1074 | 1185 | ||
| 1075 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | 1186 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
| 1076 | if (conn && !ev->status) { | 1187 | if (conn) { |
| 1077 | memcpy(conn->features, ev->features, sizeof(conn->features)); | 1188 | conn->mode = ev->mode; |
| 1189 | conn->interval = __le16_to_cpu(ev->interval); | ||
| 1190 | |||
| 1191 | if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | ||
| 1192 | if (conn->mode == HCI_CM_ACTIVE) | ||
| 1193 | conn->power_save = 1; | ||
| 1194 | else | ||
| 1195 | conn->power_save = 0; | ||
| 1196 | } | ||
| 1078 | } | 1197 | } |
| 1079 | 1198 | ||
| 1080 | hci_dev_unlock(hdev); | 1199 | hci_dev_unlock(hdev); |
| 1081 | } | 1200 | } |
| 1082 | 1201 | ||
| 1083 | /* Clock Offset */ | 1202 | static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1203 | { | ||
| 1204 | BT_DBG("%s", hdev->name); | ||
| 1205 | } | ||
| 1206 | |||
| 1207 | static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1208 | { | ||
| 1209 | BT_DBG("%s", hdev->name); | ||
| 1210 | } | ||
| 1211 | |||
| 1212 | static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1213 | { | ||
| 1214 | BT_DBG("%s", hdev->name); | ||
| 1215 | } | ||
| 1216 | |||
| 1084 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1217 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1085 | { | 1218 | { |
| 1086 | struct hci_ev_clock_offset *ev = (struct hci_ev_clock_offset *) skb->data; | 1219 | struct hci_ev_clock_offset *ev = (void *) skb->data; |
| 1087 | struct hci_conn *conn; | 1220 | struct hci_conn *conn; |
| 1088 | 1221 | ||
| 1089 | BT_DBG("%s status %d", hdev->name, ev->status); | 1222 | BT_DBG("%s status %d", hdev->name, ev->status); |
| @@ -1103,10 +1236,9 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
| 1103 | hci_dev_unlock(hdev); | 1236 | hci_dev_unlock(hdev); |
| 1104 | } | 1237 | } |
| 1105 | 1238 | ||
| 1106 | /* Page Scan Repetition Mode */ | ||
| 1107 | static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1239 | static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1108 | { | 1240 | { |
| 1109 | struct hci_ev_pscan_rep_mode *ev = (struct hci_ev_pscan_rep_mode *) skb->data; | 1241 | struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; |
| 1110 | struct inquiry_entry *ie; | 1242 | struct inquiry_entry *ie; |
| 1111 | 1243 | ||
| 1112 | BT_DBG("%s", hdev->name); | 1244 | BT_DBG("%s", hdev->name); |
| @@ -1121,10 +1253,91 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff * | |||
| 1121 | hci_dev_unlock(hdev); | 1253 | hci_dev_unlock(hdev); |
| 1122 | } | 1254 | } |
| 1123 | 1255 | ||
| 1124 | /* Sniff Subrate */ | 1256 | static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1257 | { | ||
| 1258 | struct inquiry_data data; | ||
| 1259 | int num_rsp = *((__u8 *) skb->data); | ||
| 1260 | |||
| 1261 | BT_DBG("%s num_rsp %d", hdev->name, num_rsp); | ||
| 1262 | |||
| 1263 | if (!num_rsp) | ||
| 1264 | return; | ||
| 1265 | |||
| 1266 | hci_dev_lock(hdev); | ||
| 1267 | |||
| 1268 | if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { | ||
| 1269 | struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1); | ||
| 1270 | |||
| 1271 | for (; num_rsp; num_rsp--) { | ||
| 1272 | bacpy(&data.bdaddr, &info->bdaddr); | ||
| 1273 | data.pscan_rep_mode = info->pscan_rep_mode; | ||
| 1274 | data.pscan_period_mode = info->pscan_period_mode; | ||
| 1275 | data.pscan_mode = info->pscan_mode; | ||
| 1276 | memcpy(data.dev_class, info->dev_class, 3); | ||
| 1277 | data.clock_offset = info->clock_offset; | ||
| 1278 | data.rssi = info->rssi; | ||
| 1279 | info++; | ||
| 1280 | hci_inquiry_cache_update(hdev, &data); | ||
| 1281 | } | ||
| 1282 | } else { | ||
| 1283 | struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); | ||
| 1284 | |||
| 1285 | for (; num_rsp; num_rsp--) { | ||
| 1286 | bacpy(&data.bdaddr, &info->bdaddr); | ||
| 1287 | data.pscan_rep_mode = info->pscan_rep_mode; | ||
| 1288 | data.pscan_period_mode = info->pscan_period_mode; | ||
| 1289 | data.pscan_mode = 0x00; | ||
| 1290 | memcpy(data.dev_class, info->dev_class, 3); | ||
| 1291 | data.clock_offset = info->clock_offset; | ||
| 1292 | data.rssi = info->rssi; | ||
| 1293 | info++; | ||
| 1294 | hci_inquiry_cache_update(hdev, &data); | ||
| 1295 | } | ||
| 1296 | } | ||
| 1297 | |||
| 1298 | hci_dev_unlock(hdev); | ||
| 1299 | } | ||
| 1300 | |||
| 1301 | static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1302 | { | ||
| 1303 | BT_DBG("%s", hdev->name); | ||
| 1304 | } | ||
| 1305 | |||
| 1306 | static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1307 | { | ||
| 1308 | struct hci_ev_sync_conn_complete *ev = (void *) skb->data; | ||
| 1309 | struct hci_conn *conn; | ||
| 1310 | |||
| 1311 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
| 1312 | |||
| 1313 | hci_dev_lock(hdev); | ||
| 1314 | |||
| 1315 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); | ||
| 1316 | if (!conn) | ||
| 1317 | goto unlock; | ||
| 1318 | |||
| 1319 | if (!ev->status) { | ||
| 1320 | conn->handle = __le16_to_cpu(ev->handle); | ||
| 1321 | conn->state = BT_CONNECTED; | ||
| 1322 | } else | ||
| 1323 | conn->state = BT_CLOSED; | ||
| 1324 | |||
| 1325 | hci_proto_connect_cfm(conn, ev->status); | ||
| 1326 | if (ev->status) | ||
| 1327 | hci_conn_del(conn); | ||
| 1328 | |||
| 1329 | unlock: | ||
| 1330 | hci_dev_unlock(hdev); | ||
| 1331 | } | ||
| 1332 | |||
| 1333 | static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1334 | { | ||
| 1335 | BT_DBG("%s", hdev->name); | ||
| 1336 | } | ||
| 1337 | |||
| 1125 | static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1338 | static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1126 | { | 1339 | { |
| 1127 | struct hci_ev_sniff_subrate *ev = (struct hci_ev_sniff_subrate *) skb->data; | 1340 | struct hci_ev_sniff_subrate *ev = (void *) skb->data; |
| 1128 | struct hci_conn *conn; | 1341 | struct hci_conn *conn; |
| 1129 | 1342 | ||
| 1130 | BT_DBG("%s status %d", hdev->name, ev->status); | 1343 | BT_DBG("%s status %d", hdev->name, ev->status); |
| @@ -1138,22 +1351,42 @@ static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 1138 | hci_dev_unlock(hdev); | 1351 | hci_dev_unlock(hdev); |
| 1139 | } | 1352 | } |
| 1140 | 1353 | ||
| 1141 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | 1354 | static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 1142 | { | 1355 | { |
| 1143 | struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data; | 1356 | struct inquiry_data data; |
| 1144 | struct hci_ev_cmd_complete *ec; | 1357 | struct extended_inquiry_info *info = (void *) (skb->data + 1); |
| 1145 | struct hci_ev_cmd_status *cs; | 1358 | int num_rsp = *((__u8 *) skb->data); |
| 1146 | u16 opcode, ocf, ogf; | ||
| 1147 | 1359 | ||
| 1148 | skb_pull(skb, HCI_EVENT_HDR_SIZE); | 1360 | BT_DBG("%s num_rsp %d", hdev->name, num_rsp); |
| 1149 | 1361 | ||
| 1150 | BT_DBG("%s evt 0x%x", hdev->name, hdr->evt); | 1362 | if (!num_rsp) |
| 1363 | return; | ||
| 1151 | 1364 | ||
| 1152 | switch (hdr->evt) { | 1365 | hci_dev_lock(hdev); |
| 1153 | case HCI_EV_NUM_COMP_PKTS: | 1366 | |
| 1154 | hci_num_comp_pkts_evt(hdev, skb); | 1367 | for (; num_rsp; num_rsp--) { |
| 1155 | break; | 1368 | bacpy(&data.bdaddr, &info->bdaddr); |
| 1369 | data.pscan_rep_mode = info->pscan_rep_mode; | ||
| 1370 | data.pscan_period_mode = info->pscan_period_mode; | ||
| 1371 | data.pscan_mode = 0x00; | ||
| 1372 | memcpy(data.dev_class, info->dev_class, 3); | ||
| 1373 | data.clock_offset = info->clock_offset; | ||
| 1374 | data.rssi = info->rssi; | ||
| 1375 | info++; | ||
| 1376 | hci_inquiry_cache_update(hdev, &data); | ||
| 1377 | } | ||
| 1156 | 1378 | ||
| 1379 | hci_dev_unlock(hdev); | ||
| 1380 | } | ||
| 1381 | |||
| 1382 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1383 | { | ||
| 1384 | struct hci_event_hdr *hdr = (void *) skb->data; | ||
| 1385 | __u8 event = hdr->evt; | ||
| 1386 | |||
| 1387 | skb_pull(skb, HCI_EVENT_HDR_SIZE); | ||
| 1388 | |||
| 1389 | switch (event) { | ||
| 1157 | case HCI_EV_INQUIRY_COMPLETE: | 1390 | case HCI_EV_INQUIRY_COMPLETE: |
| 1158 | hci_inquiry_complete_evt(hdev, skb); | 1391 | hci_inquiry_complete_evt(hdev, skb); |
| 1159 | break; | 1392 | break; |
| @@ -1162,44 +1395,64 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 1162 | hci_inquiry_result_evt(hdev, skb); | 1395 | hci_inquiry_result_evt(hdev, skb); |
| 1163 | break; | 1396 | break; |
| 1164 | 1397 | ||
| 1165 | case HCI_EV_INQUIRY_RESULT_WITH_RSSI: | 1398 | case HCI_EV_CONN_COMPLETE: |
| 1166 | hci_inquiry_result_with_rssi_evt(hdev, skb); | 1399 | hci_conn_complete_evt(hdev, skb); |
| 1167 | break; | ||
| 1168 | |||
| 1169 | case HCI_EV_EXTENDED_INQUIRY_RESULT: | ||
| 1170 | hci_extended_inquiry_result_evt(hdev, skb); | ||
| 1171 | break; | 1400 | break; |
| 1172 | 1401 | ||
| 1173 | case HCI_EV_CONN_REQUEST: | 1402 | case HCI_EV_CONN_REQUEST: |
| 1174 | hci_conn_request_evt(hdev, skb); | 1403 | hci_conn_request_evt(hdev, skb); |
| 1175 | break; | 1404 | break; |
| 1176 | 1405 | ||
| 1177 | case HCI_EV_CONN_COMPLETE: | ||
| 1178 | hci_conn_complete_evt(hdev, skb); | ||
| 1179 | break; | ||
| 1180 | |||
| 1181 | case HCI_EV_DISCONN_COMPLETE: | 1406 | case HCI_EV_DISCONN_COMPLETE: |
| 1182 | hci_disconn_complete_evt(hdev, skb); | 1407 | hci_disconn_complete_evt(hdev, skb); |
| 1183 | break; | 1408 | break; |
| 1184 | 1409 | ||
| 1185 | case HCI_EV_ROLE_CHANGE: | ||
| 1186 | hci_role_change_evt(hdev, skb); | ||
| 1187 | break; | ||
| 1188 | |||
| 1189 | case HCI_EV_MODE_CHANGE: | ||
| 1190 | hci_mode_change_evt(hdev, skb); | ||
| 1191 | break; | ||
| 1192 | |||
| 1193 | case HCI_EV_AUTH_COMPLETE: | 1410 | case HCI_EV_AUTH_COMPLETE: |
| 1194 | hci_auth_complete_evt(hdev, skb); | 1411 | hci_auth_complete_evt(hdev, skb); |
| 1195 | break; | 1412 | break; |
| 1196 | 1413 | ||
| 1414 | case HCI_EV_REMOTE_NAME: | ||
| 1415 | hci_remote_name_evt(hdev, skb); | ||
| 1416 | break; | ||
| 1417 | |||
| 1197 | case HCI_EV_ENCRYPT_CHANGE: | 1418 | case HCI_EV_ENCRYPT_CHANGE: |
| 1198 | hci_encrypt_change_evt(hdev, skb); | 1419 | hci_encrypt_change_evt(hdev, skb); |
| 1199 | break; | 1420 | break; |
| 1200 | 1421 | ||
| 1201 | case HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE: | 1422 | case HCI_EV_CHANGE_LINK_KEY_COMPLETE: |
| 1202 | hci_change_conn_link_key_complete_evt(hdev, skb); | 1423 | hci_change_link_key_complete_evt(hdev, skb); |
| 1424 | break; | ||
| 1425 | |||
| 1426 | case HCI_EV_REMOTE_FEATURES: | ||
| 1427 | hci_remote_features_evt(hdev, skb); | ||
| 1428 | break; | ||
| 1429 | |||
| 1430 | case HCI_EV_REMOTE_VERSION: | ||
| 1431 | hci_remote_version_evt(hdev, skb); | ||
| 1432 | break; | ||
| 1433 | |||
| 1434 | case HCI_EV_QOS_SETUP_COMPLETE: | ||
| 1435 | hci_qos_setup_complete_evt(hdev, skb); | ||
| 1436 | break; | ||
| 1437 | |||
| 1438 | case HCI_EV_CMD_COMPLETE: | ||
| 1439 | hci_cmd_complete_evt(hdev, skb); | ||
| 1440 | break; | ||
| 1441 | |||
| 1442 | case HCI_EV_CMD_STATUS: | ||
| 1443 | hci_cmd_status_evt(hdev, skb); | ||
| 1444 | break; | ||
| 1445 | |||
| 1446 | case HCI_EV_ROLE_CHANGE: | ||
| 1447 | hci_role_change_evt(hdev, skb); | ||
| 1448 | break; | ||
| 1449 | |||
| 1450 | case HCI_EV_NUM_COMP_PKTS: | ||
| 1451 | hci_num_comp_pkts_evt(hdev, skb); | ||
| 1452 | break; | ||
| 1453 | |||
| 1454 | case HCI_EV_MODE_CHANGE: | ||
| 1455 | hci_mode_change_evt(hdev, skb); | ||
| 1203 | break; | 1456 | break; |
| 1204 | 1457 | ||
| 1205 | case HCI_EV_PIN_CODE_REQ: | 1458 | case HCI_EV_PIN_CODE_REQ: |
| @@ -1214,10 +1467,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 1214 | hci_link_key_notify_evt(hdev, skb); | 1467 | hci_link_key_notify_evt(hdev, skb); |
| 1215 | break; | 1468 | break; |
| 1216 | 1469 | ||
| 1217 | case HCI_EV_REMOTE_FEATURES: | ||
| 1218 | hci_remote_features_evt(hdev, skb); | ||
| 1219 | break; | ||
| 1220 | |||
| 1221 | case HCI_EV_CLOCK_OFFSET: | 1470 | case HCI_EV_CLOCK_OFFSET: |
| 1222 | hci_clock_offset_evt(hdev, skb); | 1471 | hci_clock_offset_evt(hdev, skb); |
| 1223 | break; | 1472 | break; |
| @@ -1226,82 +1475,32 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 1226 | hci_pscan_rep_mode_evt(hdev, skb); | 1475 | hci_pscan_rep_mode_evt(hdev, skb); |
| 1227 | break; | 1476 | break; |
| 1228 | 1477 | ||
| 1229 | case HCI_EV_SNIFF_SUBRATE: | 1478 | case HCI_EV_INQUIRY_RESULT_WITH_RSSI: |
| 1230 | hci_sniff_subrate_evt(hdev, skb); | 1479 | hci_inquiry_result_with_rssi_evt(hdev, skb); |
| 1231 | break; | 1480 | break; |
| 1232 | 1481 | ||
| 1233 | case HCI_EV_CMD_STATUS: | 1482 | case HCI_EV_REMOTE_EXT_FEATURES: |
| 1234 | cs = (struct hci_ev_cmd_status *) skb->data; | 1483 | hci_remote_ext_features_evt(hdev, skb); |
| 1235 | skb_pull(skb, sizeof(cs)); | ||
| 1236 | |||
| 1237 | opcode = __le16_to_cpu(cs->opcode); | ||
| 1238 | ogf = hci_opcode_ogf(opcode); | ||
| 1239 | ocf = hci_opcode_ocf(opcode); | ||
| 1240 | |||
| 1241 | switch (ogf) { | ||
| 1242 | case OGF_INFO_PARAM: | ||
| 1243 | hci_cs_info_param(hdev, ocf, cs->status); | ||
| 1244 | break; | ||
| 1245 | |||
| 1246 | case OGF_HOST_CTL: | ||
| 1247 | hci_cs_host_ctl(hdev, ocf, cs->status); | ||
| 1248 | break; | ||
| 1249 | |||
| 1250 | case OGF_LINK_CTL: | ||
| 1251 | hci_cs_link_ctl(hdev, ocf, cs->status); | ||
| 1252 | break; | ||
| 1253 | |||
| 1254 | case OGF_LINK_POLICY: | ||
| 1255 | hci_cs_link_policy(hdev, ocf, cs->status); | ||
| 1256 | break; | ||
| 1257 | |||
| 1258 | default: | ||
| 1259 | BT_DBG("%s Command Status OGF %x", hdev->name, ogf); | ||
| 1260 | break; | ||
| 1261 | } | ||
| 1262 | |||
| 1263 | if (cs->ncmd) { | ||
| 1264 | atomic_set(&hdev->cmd_cnt, 1); | ||
| 1265 | if (!skb_queue_empty(&hdev->cmd_q)) | ||
| 1266 | hci_sched_cmd(hdev); | ||
| 1267 | } | ||
| 1268 | break; | 1484 | break; |
| 1269 | 1485 | ||
| 1270 | case HCI_EV_CMD_COMPLETE: | 1486 | case HCI_EV_SYNC_CONN_COMPLETE: |
| 1271 | ec = (struct hci_ev_cmd_complete *) skb->data; | 1487 | hci_sync_conn_complete_evt(hdev, skb); |
| 1272 | skb_pull(skb, sizeof(*ec)); | 1488 | break; |
| 1273 | |||
| 1274 | opcode = __le16_to_cpu(ec->opcode); | ||
| 1275 | ogf = hci_opcode_ogf(opcode); | ||
| 1276 | ocf = hci_opcode_ocf(opcode); | ||
| 1277 | |||
| 1278 | switch (ogf) { | ||
| 1279 | case OGF_INFO_PARAM: | ||
| 1280 | hci_cc_info_param(hdev, ocf, skb); | ||
| 1281 | break; | ||
| 1282 | |||
| 1283 | case OGF_HOST_CTL: | ||
| 1284 | hci_cc_host_ctl(hdev, ocf, skb); | ||
| 1285 | break; | ||
| 1286 | 1489 | ||
| 1287 | case OGF_LINK_CTL: | 1490 | case HCI_EV_SYNC_CONN_CHANGED: |
| 1288 | hci_cc_link_ctl(hdev, ocf, skb); | 1491 | hci_sync_conn_changed_evt(hdev, skb); |
| 1289 | break; | 1492 | break; |
| 1290 | 1493 | ||
| 1291 | case OGF_LINK_POLICY: | 1494 | case HCI_EV_SNIFF_SUBRATE: |
| 1292 | hci_cc_link_policy(hdev, ocf, skb); | 1495 | hci_sniff_subrate_evt(hdev, skb); |
| 1293 | break; | 1496 | break; |
| 1294 | 1497 | ||
| 1295 | default: | 1498 | case HCI_EV_EXTENDED_INQUIRY_RESULT: |
| 1296 | BT_DBG("%s Command Completed OGF %x", hdev->name, ogf); | 1499 | hci_extended_inquiry_result_evt(hdev, skb); |
| 1297 | break; | 1500 | break; |
| 1298 | } | ||
| 1299 | 1501 | ||
| 1300 | if (ec->ncmd) { | 1502 | default: |
| 1301 | atomic_set(&hdev->cmd_cnt, 1); | 1503 | BT_DBG("%s event 0x%x", hdev->name, event); |
| 1302 | if (!skb_queue_empty(&hdev->cmd_q)) | ||
| 1303 | hci_sched_cmd(hdev); | ||
| 1304 | } | ||
| 1305 | break; | 1504 | break; |
| 1306 | } | 1505 | } |
| 1307 | 1506 | ||
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 43dd6373bff9..14991323c273 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
| @@ -451,7 +451,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
| 451 | goto drop; | 451 | goto drop; |
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | if (test_bit(HCI_RAW, &hdev->flags) || (ogf == OGF_VENDOR_CMD)) { | 454 | if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { |
| 455 | skb_queue_tail(&hdev->raw_q, skb); | 455 | skb_queue_tail(&hdev->raw_q, skb); |
| 456 | hci_sched_tx(hdev); | 456 | hci_sched_tx(hdev); |
| 457 | } else { | 457 | } else { |
| @@ -645,7 +645,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol) | |||
| 645 | 645 | ||
| 646 | sock->ops = &hci_sock_ops; | 646 | sock->ops = &hci_sock_ops; |
| 647 | 647 | ||
| 648 | sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, 1); | 648 | sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto); |
| 649 | if (!sk) | 649 | if (!sk) |
| 650 | return -ENOMEM; | 650 | return -ENOMEM; |
| 651 | 651 | ||
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 25835403d659..cef1e3e1881c 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
| @@ -41,6 +41,26 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr, char | |||
| 41 | return sprintf(buf, "%s\n", typetostr(hdev->type)); | 41 | return sprintf(buf, "%s\n", typetostr(hdev->type)); |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 45 | { | ||
| 46 | struct hci_dev *hdev = dev_get_drvdata(dev); | ||
| 47 | char name[249]; | ||
| 48 | int i; | ||
| 49 | |||
| 50 | for (i = 0; i < 248; i++) | ||
| 51 | name[i] = hdev->dev_name[i]; | ||
| 52 | |||
| 53 | name[248] = '\0'; | ||
| 54 | return sprintf(buf, "%s\n", name); | ||
| 55 | } | ||
| 56 | |||
| 57 | static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 58 | { | ||
| 59 | struct hci_dev *hdev = dev_get_drvdata(dev); | ||
| 60 | return sprintf(buf, "0x%.2x%.2x%.2x\n", | ||
| 61 | hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); | ||
| 62 | } | ||
| 63 | |||
| 44 | static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) | 64 | static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) |
| 45 | { | 65 | { |
| 46 | struct hci_dev *hdev = dev_get_drvdata(dev); | 66 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| @@ -49,6 +69,17 @@ static ssize_t show_address(struct device *dev, struct device_attribute *attr, c | |||
| 49 | return sprintf(buf, "%s\n", batostr(&bdaddr)); | 69 | return sprintf(buf, "%s\n", batostr(&bdaddr)); |
| 50 | } | 70 | } |
| 51 | 71 | ||
| 72 | static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 73 | { | ||
| 74 | struct hci_dev *hdev = dev_get_drvdata(dev); | ||
| 75 | |||
| 76 | return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | ||
| 77 | hdev->features[0], hdev->features[1], | ||
| 78 | hdev->features[2], hdev->features[3], | ||
| 79 | hdev->features[4], hdev->features[5], | ||
| 80 | hdev->features[6], hdev->features[7]); | ||
| 81 | } | ||
| 82 | |||
| 52 | static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) | 83 | static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) |
| 53 | { | 84 | { |
| 54 | struct hci_dev *hdev = dev_get_drvdata(dev); | 85 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| @@ -170,7 +201,10 @@ static ssize_t store_sniff_min_interval(struct device *dev, struct device_attrib | |||
| 170 | } | 201 | } |
| 171 | 202 | ||
| 172 | static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); | 203 | static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); |
| 204 | static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); | ||
| 205 | static DEVICE_ATTR(class, S_IRUGO, show_class, NULL); | ||
| 173 | static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); | 206 | static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); |
| 207 | static DEVICE_ATTR(features, S_IRUGO, show_features, NULL); | ||
| 174 | static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); | 208 | static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); |
| 175 | static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); | 209 | static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); |
| 176 | static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); | 210 | static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); |
| @@ -185,7 +219,10 @@ static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, | |||
| 185 | 219 | ||
| 186 | static struct device_attribute *bt_attrs[] = { | 220 | static struct device_attribute *bt_attrs[] = { |
| 187 | &dev_attr_type, | 221 | &dev_attr_type, |
| 222 | &dev_attr_name, | ||
| 223 | &dev_attr_class, | ||
| 188 | &dev_attr_address, | 224 | &dev_attr_address, |
| 225 | &dev_attr_features, | ||
| 189 | &dev_attr_manufacturer, | 226 | &dev_attr_manufacturer, |
| 190 | &dev_attr_hci_version, | 227 | &dev_attr_hci_version, |
| 191 | &dev_attr_hci_revision, | 228 | &dev_attr_hci_revision, |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 66c736953cfe..4bbacddeb49d 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
| @@ -247,7 +247,7 @@ static inline int hidp_queue_report(struct hidp_session *session, unsigned char | |||
| 247 | { | 247 | { |
| 248 | struct sk_buff *skb; | 248 | struct sk_buff *skb; |
| 249 | 249 | ||
| 250 | BT_DBG("session %p hid %p data %p size %d", session, device, data, size); | 250 | BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size); |
| 251 | 251 | ||
| 252 | if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { | 252 | if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { |
| 253 | BT_ERR("Can't allocate memory for new frame"); | 253 | BT_ERR("Can't allocate memory for new frame"); |
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 1de2b6fbcac0..3292b956a7c4 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c | |||
| @@ -255,7 +255,7 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol) | |||
| 255 | if (sock->type != SOCK_RAW) | 255 | if (sock->type != SOCK_RAW) |
| 256 | return -ESOCKTNOSUPPORT; | 256 | return -ESOCKTNOSUPPORT; |
| 257 | 257 | ||
| 258 | sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, 1); | 258 | sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto); |
| 259 | if (!sk) | 259 | if (!sk) |
| 260 | return -ENOMEM; | 260 | return -ENOMEM; |
| 261 | 261 | ||
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 36ef27b625db..477e052b17b5 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
| @@ -55,7 +55,9 @@ | |||
| 55 | #define BT_DBG(D...) | 55 | #define BT_DBG(D...) |
| 56 | #endif | 56 | #endif |
| 57 | 57 | ||
| 58 | #define VERSION "2.8" | 58 | #define VERSION "2.9" |
| 59 | |||
| 60 | static u32 l2cap_feat_mask = 0x0000; | ||
| 59 | 61 | ||
| 60 | static const struct proto_ops l2cap_sock_ops; | 62 | static const struct proto_ops l2cap_sock_ops; |
| 61 | 63 | ||
| @@ -258,7 +260,119 @@ static void l2cap_chan_del(struct sock *sk, int err) | |||
| 258 | sk->sk_state_change(sk); | 260 | sk->sk_state_change(sk); |
| 259 | } | 261 | } |
| 260 | 262 | ||
| 263 | static inline u8 l2cap_get_ident(struct l2cap_conn *conn) | ||
| 264 | { | ||
| 265 | u8 id; | ||
| 266 | |||
| 267 | /* Get next available identificator. | ||
| 268 | * 1 - 128 are used by kernel. | ||
| 269 | * 129 - 199 are reserved. | ||
| 270 | * 200 - 254 are used by utilities like l2ping, etc. | ||
| 271 | */ | ||
| 272 | |||
| 273 | spin_lock_bh(&conn->lock); | ||
| 274 | |||
| 275 | if (++conn->tx_ident > 128) | ||
| 276 | conn->tx_ident = 1; | ||
| 277 | |||
| 278 | id = conn->tx_ident; | ||
| 279 | |||
| 280 | spin_unlock_bh(&conn->lock); | ||
| 281 | |||
| 282 | return id; | ||
| 283 | } | ||
| 284 | |||
| 285 | static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) | ||
| 286 | { | ||
| 287 | struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); | ||
| 288 | |||
| 289 | BT_DBG("code 0x%2.2x", code); | ||
| 290 | |||
| 291 | if (!skb) | ||
| 292 | return -ENOMEM; | ||
| 293 | |||
| 294 | return hci_send_acl(conn->hcon, skb, 0); | ||
| 295 | } | ||
| 296 | |||
| 261 | /* ---- L2CAP connections ---- */ | 297 | /* ---- L2CAP connections ---- */ |
| 298 | static void l2cap_conn_start(struct l2cap_conn *conn) | ||
| 299 | { | ||
| 300 | struct l2cap_chan_list *l = &conn->chan_list; | ||
| 301 | struct sock *sk; | ||
| 302 | |||
| 303 | BT_DBG("conn %p", conn); | ||
| 304 | |||
| 305 | read_lock(&l->lock); | ||
| 306 | |||
| 307 | for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { | ||
| 308 | bh_lock_sock(sk); | ||
| 309 | |||
| 310 | if (sk->sk_type != SOCK_SEQPACKET) { | ||
| 311 | l2cap_sock_clear_timer(sk); | ||
| 312 | sk->sk_state = BT_CONNECTED; | ||
| 313 | sk->sk_state_change(sk); | ||
| 314 | } else if (sk->sk_state == BT_CONNECT) { | ||
| 315 | struct l2cap_conn_req req; | ||
| 316 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); | ||
| 317 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | ||
| 318 | req.psm = l2cap_pi(sk)->psm; | ||
| 319 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | ||
| 320 | L2CAP_CONN_REQ, sizeof(req), &req); | ||
| 321 | } | ||
| 322 | |||
| 323 | bh_unlock_sock(sk); | ||
| 324 | } | ||
| 325 | |||
| 326 | read_unlock(&l->lock); | ||
| 327 | } | ||
| 328 | |||
| 329 | static void l2cap_conn_ready(struct l2cap_conn *conn) | ||
| 330 | { | ||
| 331 | BT_DBG("conn %p", conn); | ||
| 332 | |||
| 333 | if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) { | ||
| 334 | struct l2cap_info_req req; | ||
| 335 | |||
| 336 | req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); | ||
| 337 | |||
| 338 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; | ||
| 339 | conn->info_ident = l2cap_get_ident(conn); | ||
| 340 | |||
| 341 | mod_timer(&conn->info_timer, | ||
| 342 | jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); | ||
| 343 | |||
| 344 | l2cap_send_cmd(conn, conn->info_ident, | ||
| 345 | L2CAP_INFO_REQ, sizeof(req), &req); | ||
| 346 | } | ||
| 347 | } | ||
| 348 | |||
| 349 | /* Notify sockets that we cannot guaranty reliability anymore */ | ||
| 350 | static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) | ||
| 351 | { | ||
| 352 | struct l2cap_chan_list *l = &conn->chan_list; | ||
| 353 | struct sock *sk; | ||
| 354 | |||
| 355 | BT_DBG("conn %p", conn); | ||
| 356 | |||
| 357 | read_lock(&l->lock); | ||
| 358 | |||
| 359 | for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { | ||
| 360 | if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE) | ||
| 361 | sk->sk_err = err; | ||
| 362 | } | ||
| 363 | |||
| 364 | read_unlock(&l->lock); | ||
| 365 | } | ||
| 366 | |||
| 367 | static void l2cap_info_timeout(unsigned long arg) | ||
| 368 | { | ||
| 369 | struct l2cap_conn *conn = (void *) arg; | ||
| 370 | |||
| 371 | conn->info_ident = 0; | ||
| 372 | |||
| 373 | l2cap_conn_start(conn); | ||
| 374 | } | ||
| 375 | |||
| 262 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | 376 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) |
| 263 | { | 377 | { |
| 264 | struct l2cap_conn *conn = hcon->l2cap_data; | 378 | struct l2cap_conn *conn = hcon->l2cap_data; |
| @@ -279,6 +393,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | |||
| 279 | conn->src = &hcon->hdev->bdaddr; | 393 | conn->src = &hcon->hdev->bdaddr; |
| 280 | conn->dst = &hcon->dst; | 394 | conn->dst = &hcon->dst; |
| 281 | 395 | ||
| 396 | conn->feat_mask = 0; | ||
| 397 | |||
| 398 | init_timer(&conn->info_timer); | ||
| 399 | conn->info_timer.function = l2cap_info_timeout; | ||
| 400 | conn->info_timer.data = (unsigned long) conn; | ||
| 401 | |||
| 282 | spin_lock_init(&conn->lock); | 402 | spin_lock_init(&conn->lock); |
| 283 | rwlock_init(&conn->chan_list.lock); | 403 | rwlock_init(&conn->chan_list.lock); |
| 284 | 404 | ||
| @@ -318,40 +438,6 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru | |||
| 318 | write_unlock_bh(&l->lock); | 438 | write_unlock_bh(&l->lock); |
| 319 | } | 439 | } |
| 320 | 440 | ||
| 321 | static inline u8 l2cap_get_ident(struct l2cap_conn *conn) | ||
| 322 | { | ||
| 323 | u8 id; | ||
| 324 | |||
| 325 | /* Get next available identificator. | ||
| 326 | * 1 - 128 are used by kernel. | ||
| 327 | * 129 - 199 are reserved. | ||
| 328 | * 200 - 254 are used by utilities like l2ping, etc. | ||
| 329 | */ | ||
| 330 | |||
| 331 | spin_lock_bh(&conn->lock); | ||
| 332 | |||
| 333 | if (++conn->tx_ident > 128) | ||
| 334 | conn->tx_ident = 1; | ||
| 335 | |||
| 336 | id = conn->tx_ident; | ||
| 337 | |||
| 338 | spin_unlock_bh(&conn->lock); | ||
| 339 | |||
| 340 | return id; | ||
| 341 | } | ||
| 342 | |||
| 343 | static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) | ||
| 344 | { | ||
| 345 | struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); | ||
| 346 | |||
| 347 | BT_DBG("code 0x%2.2x", code); | ||
| 348 | |||
| 349 | if (!skb) | ||
| 350 | return -ENOMEM; | ||
| 351 | |||
| 352 | return hci_send_acl(conn->hcon, skb, 0); | ||
| 353 | } | ||
| 354 | |||
| 355 | /* ---- Socket interface ---- */ | 441 | /* ---- Socket interface ---- */ |
| 356 | static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src) | 442 | static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src) |
| 357 | { | 443 | { |
| @@ -508,7 +594,6 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) | |||
| 508 | 594 | ||
| 509 | /* Default config options */ | 595 | /* Default config options */ |
| 510 | pi->conf_len = 0; | 596 | pi->conf_len = 0; |
| 511 | pi->conf_mtu = L2CAP_DEFAULT_MTU; | ||
| 512 | pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; | 597 | pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; |
| 513 | } | 598 | } |
| 514 | 599 | ||
| @@ -522,7 +607,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p | |||
| 522 | { | 607 | { |
| 523 | struct sock *sk; | 608 | struct sock *sk; |
| 524 | 609 | ||
| 525 | sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, 1); | 610 | sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto); |
| 526 | if (!sk) | 611 | if (!sk) |
| 527 | return NULL; | 612 | return NULL; |
| 528 | 613 | ||
| @@ -530,7 +615,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p | |||
| 530 | INIT_LIST_HEAD(&bt_sk(sk)->accept_q); | 615 | INIT_LIST_HEAD(&bt_sk(sk)->accept_q); |
| 531 | 616 | ||
| 532 | sk->sk_destruct = l2cap_sock_destruct; | 617 | sk->sk_destruct = l2cap_sock_destruct; |
| 533 | sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; | 618 | sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); |
| 534 | 619 | ||
| 535 | sock_reset_flag(sk, SOCK_ZAPPED); | 620 | sock_reset_flag(sk, SOCK_ZAPPED); |
| 536 | 621 | ||
| @@ -650,6 +735,11 @@ static int l2cap_do_connect(struct sock *sk) | |||
| 650 | l2cap_sock_set_timer(sk, sk->sk_sndtimeo); | 735 | l2cap_sock_set_timer(sk, sk->sk_sndtimeo); |
| 651 | 736 | ||
| 652 | if (hcon->state == BT_CONNECTED) { | 737 | if (hcon->state == BT_CONNECTED) { |
| 738 | if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { | ||
| 739 | l2cap_conn_ready(conn); | ||
| 740 | goto done; | ||
| 741 | } | ||
| 742 | |||
| 653 | if (sk->sk_type == SOCK_SEQPACKET) { | 743 | if (sk->sk_type == SOCK_SEQPACKET) { |
| 654 | struct l2cap_conn_req req; | 744 | struct l2cap_conn_req req; |
| 655 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); | 745 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); |
| @@ -958,7 +1048,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
| 958 | opts.imtu = l2cap_pi(sk)->imtu; | 1048 | opts.imtu = l2cap_pi(sk)->imtu; |
| 959 | opts.omtu = l2cap_pi(sk)->omtu; | 1049 | opts.omtu = l2cap_pi(sk)->omtu; |
| 960 | opts.flush_to = l2cap_pi(sk)->flush_to; | 1050 | opts.flush_to = l2cap_pi(sk)->flush_to; |
| 961 | opts.mode = 0x00; | 1051 | opts.mode = L2CAP_MODE_BASIC; |
| 962 | 1052 | ||
| 963 | len = min_t(unsigned int, sizeof(opts), optlen); | 1053 | len = min_t(unsigned int, sizeof(opts), optlen); |
| 964 | if (copy_from_user((char *) &opts, optval, len)) { | 1054 | if (copy_from_user((char *) &opts, optval, len)) { |
| @@ -1007,7 +1097,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch | |||
| 1007 | opts.imtu = l2cap_pi(sk)->imtu; | 1097 | opts.imtu = l2cap_pi(sk)->imtu; |
| 1008 | opts.omtu = l2cap_pi(sk)->omtu; | 1098 | opts.omtu = l2cap_pi(sk)->omtu; |
| 1009 | opts.flush_to = l2cap_pi(sk)->flush_to; | 1099 | opts.flush_to = l2cap_pi(sk)->flush_to; |
| 1010 | opts.mode = 0x00; | 1100 | opts.mode = L2CAP_MODE_BASIC; |
| 1011 | 1101 | ||
| 1012 | len = min_t(unsigned int, len, sizeof(opts)); | 1102 | len = min_t(unsigned int, len, sizeof(opts)); |
| 1013 | if (copy_to_user(optval, (char *) &opts, len)) | 1103 | if (copy_to_user(optval, (char *) &opts, len)) |
| @@ -1084,52 +1174,6 @@ static int l2cap_sock_release(struct socket *sock) | |||
| 1084 | return err; | 1174 | return err; |
| 1085 | } | 1175 | } |
| 1086 | 1176 | ||
| 1087 | static void l2cap_conn_ready(struct l2cap_conn *conn) | ||
| 1088 | { | ||
| 1089 | struct l2cap_chan_list *l = &conn->chan_list; | ||
| 1090 | struct sock *sk; | ||
| 1091 | |||
| 1092 | BT_DBG("conn %p", conn); | ||
| 1093 | |||
| 1094 | read_lock(&l->lock); | ||
| 1095 | |||
| 1096 | for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { | ||
| 1097 | bh_lock_sock(sk); | ||
| 1098 | |||
| 1099 | if (sk->sk_type != SOCK_SEQPACKET) { | ||
| 1100 | l2cap_sock_clear_timer(sk); | ||
| 1101 | sk->sk_state = BT_CONNECTED; | ||
| 1102 | sk->sk_state_change(sk); | ||
| 1103 | } else if (sk->sk_state == BT_CONNECT) { | ||
| 1104 | struct l2cap_conn_req req; | ||
| 1105 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); | ||
| 1106 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | ||
| 1107 | req.psm = l2cap_pi(sk)->psm; | ||
| 1108 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req); | ||
| 1109 | } | ||
| 1110 | |||
| 1111 | bh_unlock_sock(sk); | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | read_unlock(&l->lock); | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | /* Notify sockets that we cannot guaranty reliability anymore */ | ||
| 1118 | static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) | ||
| 1119 | { | ||
| 1120 | struct l2cap_chan_list *l = &conn->chan_list; | ||
| 1121 | struct sock *sk; | ||
| 1122 | |||
| 1123 | BT_DBG("conn %p", conn); | ||
| 1124 | |||
| 1125 | read_lock(&l->lock); | ||
| 1126 | for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { | ||
| 1127 | if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE) | ||
| 1128 | sk->sk_err = err; | ||
| 1129 | } | ||
| 1130 | read_unlock(&l->lock); | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | static void l2cap_chan_ready(struct sock *sk) | 1177 | static void l2cap_chan_ready(struct sock *sk) |
| 1134 | { | 1178 | { |
| 1135 | struct sock *parent = bt_sk(sk)->parent; | 1179 | struct sock *parent = bt_sk(sk)->parent; |
| @@ -1256,11 +1300,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned | |||
| 1256 | break; | 1300 | break; |
| 1257 | 1301 | ||
| 1258 | case 2: | 1302 | case 2: |
| 1259 | *val = __le16_to_cpu(*((__le16 *)opt->val)); | 1303 | *val = __le16_to_cpu(*((__le16 *) opt->val)); |
| 1260 | break; | 1304 | break; |
| 1261 | 1305 | ||
| 1262 | case 4: | 1306 | case 4: |
| 1263 | *val = __le32_to_cpu(*((__le32 *)opt->val)); | 1307 | *val = __le32_to_cpu(*((__le32 *) opt->val)); |
| 1264 | break; | 1308 | break; |
| 1265 | 1309 | ||
| 1266 | default: | 1310 | default: |
| @@ -1332,6 +1376,8 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) | |||
| 1332 | int len = pi->conf_len; | 1376 | int len = pi->conf_len; |
| 1333 | int type, hint, olen; | 1377 | int type, hint, olen; |
| 1334 | unsigned long val; | 1378 | unsigned long val; |
| 1379 | struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; | ||
| 1380 | u16 mtu = L2CAP_DEFAULT_MTU; | ||
| 1335 | u16 result = L2CAP_CONF_SUCCESS; | 1381 | u16 result = L2CAP_CONF_SUCCESS; |
| 1336 | 1382 | ||
| 1337 | BT_DBG("sk %p", sk); | 1383 | BT_DBG("sk %p", sk); |
| @@ -1344,7 +1390,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) | |||
| 1344 | 1390 | ||
| 1345 | switch (type) { | 1391 | switch (type) { |
| 1346 | case L2CAP_CONF_MTU: | 1392 | case L2CAP_CONF_MTU: |
| 1347 | pi->conf_mtu = val; | 1393 | mtu = val; |
| 1348 | break; | 1394 | break; |
| 1349 | 1395 | ||
| 1350 | case L2CAP_CONF_FLUSH_TO: | 1396 | case L2CAP_CONF_FLUSH_TO: |
| @@ -1354,6 +1400,11 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) | |||
| 1354 | case L2CAP_CONF_QOS: | 1400 | case L2CAP_CONF_QOS: |
| 1355 | break; | 1401 | break; |
| 1356 | 1402 | ||
| 1403 | case L2CAP_CONF_RFC: | ||
| 1404 | if (olen == sizeof(rfc)) | ||
| 1405 | memcpy(&rfc, (void *) val, olen); | ||
| 1406 | break; | ||
| 1407 | |||
| 1357 | default: | 1408 | default: |
| 1358 | if (hint) | 1409 | if (hint) |
| 1359 | break; | 1410 | break; |
| @@ -1368,12 +1419,24 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) | |||
| 1368 | /* Configure output options and let the other side know | 1419 | /* Configure output options and let the other side know |
| 1369 | * which ones we don't like. */ | 1420 | * which ones we don't like. */ |
| 1370 | 1421 | ||
| 1371 | if (pi->conf_mtu < pi->omtu) | 1422 | if (rfc.mode == L2CAP_MODE_BASIC) { |
| 1423 | if (mtu < pi->omtu) | ||
| 1424 | result = L2CAP_CONF_UNACCEPT; | ||
| 1425 | else { | ||
| 1426 | pi->omtu = mtu; | ||
| 1427 | pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); | ||
| 1431 | } else { | ||
| 1372 | result = L2CAP_CONF_UNACCEPT; | 1432 | result = L2CAP_CONF_UNACCEPT; |
| 1373 | else | ||
| 1374 | pi->omtu = pi->conf_mtu; | ||
| 1375 | 1433 | ||
| 1376 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); | 1434 | memset(&rfc, 0, sizeof(rfc)); |
| 1435 | rfc.mode = L2CAP_MODE_BASIC; | ||
| 1436 | |||
| 1437 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | ||
| 1438 | sizeof(rfc), (unsigned long) &rfc); | ||
| 1439 | } | ||
| 1377 | } | 1440 | } |
| 1378 | 1441 | ||
| 1379 | rsp->scid = cpu_to_le16(pi->dcid); | 1442 | rsp->scid = cpu_to_le16(pi->dcid); |
| @@ -1397,6 +1460,23 @@ static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 fla | |||
| 1397 | return ptr - data; | 1460 | return ptr - data; |
| 1398 | } | 1461 | } |
| 1399 | 1462 | ||
| 1463 | static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | ||
| 1464 | { | ||
| 1465 | struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data; | ||
| 1466 | |||
| 1467 | if (rej->reason != 0x0000) | ||
| 1468 | return 0; | ||
| 1469 | |||
| 1470 | if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && | ||
| 1471 | cmd->ident == conn->info_ident) { | ||
| 1472 | conn->info_ident = 0; | ||
| 1473 | del_timer(&conn->info_timer); | ||
| 1474 | l2cap_conn_start(conn); | ||
| 1475 | } | ||
| 1476 | |||
| 1477 | return 0; | ||
| 1478 | } | ||
| 1479 | |||
| 1400 | static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | 1480 | static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) |
| 1401 | { | 1481 | { |
| 1402 | struct l2cap_chan_list *list = &conn->chan_list; | 1482 | struct l2cap_chan_list *list = &conn->chan_list; |
| @@ -1577,16 +1657,19 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
| 1577 | 1657 | ||
| 1578 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); | 1658 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); |
| 1579 | 1659 | ||
| 1580 | /* Output config done. */ | ||
| 1581 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE; | ||
| 1582 | |||
| 1583 | /* Reset config buffer. */ | 1660 | /* Reset config buffer. */ |
| 1584 | l2cap_pi(sk)->conf_len = 0; | 1661 | l2cap_pi(sk)->conf_len = 0; |
| 1585 | 1662 | ||
| 1663 | if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE)) | ||
| 1664 | goto unlock; | ||
| 1665 | |||
| 1586 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { | 1666 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { |
| 1587 | sk->sk_state = BT_CONNECTED; | 1667 | sk->sk_state = BT_CONNECTED; |
| 1588 | l2cap_chan_ready(sk); | 1668 | l2cap_chan_ready(sk); |
| 1589 | } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { | 1669 | goto unlock; |
| 1670 | } | ||
| 1671 | |||
| 1672 | if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { | ||
| 1590 | u8 req[64]; | 1673 | u8 req[64]; |
| 1591 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 1674 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, |
| 1592 | l2cap_build_conf_req(sk, req), req); | 1675 | l2cap_build_conf_req(sk, req), req); |
| @@ -1646,7 +1729,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
| 1646 | if (flags & 0x01) | 1729 | if (flags & 0x01) |
| 1647 | goto done; | 1730 | goto done; |
| 1648 | 1731 | ||
| 1649 | /* Input config done */ | ||
| 1650 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; | 1732 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; |
| 1651 | 1733 | ||
| 1652 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { | 1734 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { |
| @@ -1711,16 +1793,27 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd | |||
| 1711 | static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | 1793 | static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) |
| 1712 | { | 1794 | { |
| 1713 | struct l2cap_info_req *req = (struct l2cap_info_req *) data; | 1795 | struct l2cap_info_req *req = (struct l2cap_info_req *) data; |
| 1714 | struct l2cap_info_rsp rsp; | ||
| 1715 | u16 type; | 1796 | u16 type; |
| 1716 | 1797 | ||
| 1717 | type = __le16_to_cpu(req->type); | 1798 | type = __le16_to_cpu(req->type); |
| 1718 | 1799 | ||
| 1719 | BT_DBG("type 0x%4.4x", type); | 1800 | BT_DBG("type 0x%4.4x", type); |
| 1720 | 1801 | ||
| 1721 | rsp.type = cpu_to_le16(type); | 1802 | if (type == L2CAP_IT_FEAT_MASK) { |
| 1722 | rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); | 1803 | u8 buf[8]; |
| 1723 | l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); | 1804 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; |
| 1805 | rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); | ||
| 1806 | rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); | ||
| 1807 | put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data); | ||
| 1808 | l2cap_send_cmd(conn, cmd->ident, | ||
| 1809 | L2CAP_INFO_RSP, sizeof(buf), buf); | ||
| 1810 | } else { | ||
| 1811 | struct l2cap_info_rsp rsp; | ||
| 1812 | rsp.type = cpu_to_le16(type); | ||
| 1813 | rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); | ||
| 1814 | l2cap_send_cmd(conn, cmd->ident, | ||
| 1815 | L2CAP_INFO_RSP, sizeof(rsp), &rsp); | ||
| 1816 | } | ||
| 1724 | 1817 | ||
| 1725 | return 0; | 1818 | return 0; |
| 1726 | } | 1819 | } |
| @@ -1735,6 +1828,15 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm | |||
| 1735 | 1828 | ||
| 1736 | BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); | 1829 | BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); |
| 1737 | 1830 | ||
| 1831 | conn->info_ident = 0; | ||
| 1832 | |||
| 1833 | del_timer(&conn->info_timer); | ||
| 1834 | |||
| 1835 | if (type == L2CAP_IT_FEAT_MASK) | ||
| 1836 | conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data)); | ||
| 1837 | |||
| 1838 | l2cap_conn_start(conn); | ||
| 1839 | |||
| 1738 | return 0; | 1840 | return 0; |
| 1739 | } | 1841 | } |
| 1740 | 1842 | ||
| @@ -1764,7 +1866,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk | |||
| 1764 | 1866 | ||
| 1765 | switch (cmd.code) { | 1867 | switch (cmd.code) { |
| 1766 | case L2CAP_COMMAND_REJ: | 1868 | case L2CAP_COMMAND_REJ: |
| 1767 | /* FIXME: We should process this */ | 1869 | l2cap_command_rej(conn, &cmd, data); |
| 1768 | break; | 1870 | break; |
| 1769 | 1871 | ||
| 1770 | case L2CAP_CONN_REQ: | 1872 | case L2CAP_CONN_REQ: |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index bb7220770f2c..e7ac6ba7ecab 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
| @@ -33,11 +33,11 @@ | |||
| 33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
| 34 | #include <linux/signal.h> | 34 | #include <linux/signal.h> |
| 35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
| 36 | #include <linux/freezer.h> | ||
| 37 | #include <linux/wait.h> | 36 | #include <linux/wait.h> |
| 38 | #include <linux/device.h> | 37 | #include <linux/device.h> |
| 39 | #include <linux/net.h> | 38 | #include <linux/net.h> |
| 40 | #include <linux/mutex.h> | 39 | #include <linux/mutex.h> |
| 40 | #include <linux/kthread.h> | ||
| 41 | 41 | ||
| 42 | #include <net/sock.h> | 42 | #include <net/sock.h> |
| 43 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
| @@ -68,7 +68,6 @@ static DEFINE_MUTEX(rfcomm_mutex); | |||
| 68 | static unsigned long rfcomm_event; | 68 | static unsigned long rfcomm_event; |
| 69 | 69 | ||
| 70 | static LIST_HEAD(session_list); | 70 | static LIST_HEAD(session_list); |
| 71 | static atomic_t terminate, running; | ||
| 72 | 71 | ||
| 73 | static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len); | 72 | static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len); |
| 74 | static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci); | 73 | static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci); |
| @@ -1850,26 +1849,6 @@ static inline void rfcomm_process_sessions(void) | |||
| 1850 | rfcomm_unlock(); | 1849 | rfcomm_unlock(); |
| 1851 | } | 1850 | } |
| 1852 | 1851 | ||
| 1853 | static void rfcomm_worker(void) | ||
| 1854 | { | ||
| 1855 | BT_DBG(""); | ||
| 1856 | |||
| 1857 | while (!atomic_read(&terminate)) { | ||
| 1858 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1859 | if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { | ||
| 1860 | /* No pending events. Let's sleep. | ||
| 1861 | * Incoming connections and data will wake us up. */ | ||
| 1862 | schedule(); | ||
| 1863 | } | ||
| 1864 | set_current_state(TASK_RUNNING); | ||
| 1865 | |||
| 1866 | /* Process stuff */ | ||
| 1867 | clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); | ||
| 1868 | rfcomm_process_sessions(); | ||
| 1869 | } | ||
| 1870 | return; | ||
| 1871 | } | ||
| 1872 | |||
| 1873 | static int rfcomm_add_listener(bdaddr_t *ba) | 1852 | static int rfcomm_add_listener(bdaddr_t *ba) |
| 1874 | { | 1853 | { |
| 1875 | struct sockaddr_l2 addr; | 1854 | struct sockaddr_l2 addr; |
| @@ -1935,22 +1914,28 @@ static void rfcomm_kill_listener(void) | |||
| 1935 | 1914 | ||
| 1936 | static int rfcomm_run(void *unused) | 1915 | static int rfcomm_run(void *unused) |
| 1937 | { | 1916 | { |
| 1938 | rfcomm_thread = current; | 1917 | BT_DBG(""); |
| 1939 | |||
| 1940 | atomic_inc(&running); | ||
| 1941 | 1918 | ||
| 1942 | daemonize("krfcommd"); | ||
| 1943 | set_user_nice(current, -10); | 1919 | set_user_nice(current, -10); |
| 1944 | 1920 | ||
| 1945 | BT_DBG(""); | ||
| 1946 | |||
| 1947 | rfcomm_add_listener(BDADDR_ANY); | 1921 | rfcomm_add_listener(BDADDR_ANY); |
| 1948 | 1922 | ||
| 1949 | rfcomm_worker(); | 1923 | while (!kthread_should_stop()) { |
| 1924 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1925 | if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { | ||
| 1926 | /* No pending events. Let's sleep. | ||
| 1927 | * Incoming connections and data will wake us up. */ | ||
| 1928 | schedule(); | ||
| 1929 | } | ||
| 1930 | set_current_state(TASK_RUNNING); | ||
| 1931 | |||
| 1932 | /* Process stuff */ | ||
| 1933 | clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); | ||
| 1934 | rfcomm_process_sessions(); | ||
| 1935 | } | ||
| 1950 | 1936 | ||
| 1951 | rfcomm_kill_listener(); | 1937 | rfcomm_kill_listener(); |
| 1952 | 1938 | ||
| 1953 | atomic_dec(&running); | ||
| 1954 | return 0; | 1939 | return 0; |
| 1955 | } | 1940 | } |
| 1956 | 1941 | ||
| @@ -2059,7 +2044,11 @@ static int __init rfcomm_init(void) | |||
| 2059 | 2044 | ||
| 2060 | hci_register_cb(&rfcomm_cb); | 2045 | hci_register_cb(&rfcomm_cb); |
| 2061 | 2046 | ||
| 2062 | kernel_thread(rfcomm_run, NULL, CLONE_KERNEL); | 2047 | rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); |
| 2048 | if (IS_ERR(rfcomm_thread)) { | ||
| 2049 | hci_unregister_cb(&rfcomm_cb); | ||
| 2050 | return PTR_ERR(rfcomm_thread); | ||
| 2051 | } | ||
| 2063 | 2052 | ||
| 2064 | if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) | 2053 | if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) |
| 2065 | BT_ERR("Failed to create RFCOMM info file"); | 2054 | BT_ERR("Failed to create RFCOMM info file"); |
| @@ -2081,14 +2070,7 @@ static void __exit rfcomm_exit(void) | |||
| 2081 | 2070 | ||
| 2082 | hci_unregister_cb(&rfcomm_cb); | 2071 | hci_unregister_cb(&rfcomm_cb); |
| 2083 | 2072 | ||
| 2084 | /* Terminate working thread. | 2073 | kthread_stop(rfcomm_thread); |
| 2085 | * ie. Set terminate flag and wake it up */ | ||
| 2086 | atomic_inc(&terminate); | ||
| 2087 | rfcomm_schedule(RFCOMM_SCHED_STATE); | ||
| 2088 | |||
| 2089 | /* Wait until thread is running */ | ||
| 2090 | while (atomic_read(&running)) | ||
| 2091 | schedule(); | ||
| 2092 | 2074 | ||
| 2093 | #ifdef CONFIG_BT_RFCOMM_TTY | 2075 | #ifdef CONFIG_BT_RFCOMM_TTY |
| 2094 | rfcomm_cleanup_ttys(); | 2076 | rfcomm_cleanup_ttys(); |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 266b6972667d..c46d51035e77 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
| @@ -287,7 +287,7 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int | |||
| 287 | struct rfcomm_dlc *d; | 287 | struct rfcomm_dlc *d; |
| 288 | struct sock *sk; | 288 | struct sock *sk; |
| 289 | 289 | ||
| 290 | sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, 1); | 290 | sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto); |
| 291 | if (!sk) | 291 | if (!sk) |
| 292 | return NULL; | 292 | return NULL; |
| 293 | 293 | ||
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 22a832098d44..e447651a2dbe 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
| @@ -189,6 +189,23 @@ static struct device *rfcomm_get_device(struct rfcomm_dev *dev) | |||
| 189 | return conn ? &conn->dev : NULL; | 189 | return conn ? &conn->dev : NULL; |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf) | ||
| 193 | { | ||
| 194 | struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); | ||
| 195 | bdaddr_t bdaddr; | ||
| 196 | baswap(&bdaddr, &dev->dst); | ||
| 197 | return sprintf(buf, "%s\n", batostr(&bdaddr)); | ||
| 198 | } | ||
| 199 | |||
| 200 | static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf) | ||
| 201 | { | ||
| 202 | struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); | ||
| 203 | return sprintf(buf, "%d\n", dev->channel); | ||
| 204 | } | ||
| 205 | |||
| 206 | static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); | ||
| 207 | static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); | ||
| 208 | |||
| 192 | static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) | 209 | static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) |
| 193 | { | 210 | { |
| 194 | struct rfcomm_dev *dev; | 211 | struct rfcomm_dev *dev; |
| @@ -281,6 +298,14 @@ out: | |||
| 281 | return err; | 298 | return err; |
| 282 | } | 299 | } |
| 283 | 300 | ||
| 301 | dev_set_drvdata(dev->tty_dev, dev); | ||
| 302 | |||
| 303 | if (device_create_file(dev->tty_dev, &dev_attr_address) < 0) | ||
| 304 | BT_ERR("Failed to create address attribute"); | ||
| 305 | |||
| 306 | if (device_create_file(dev->tty_dev, &dev_attr_channel) < 0) | ||
| 307 | BT_ERR("Failed to create channel attribute"); | ||
| 308 | |||
| 284 | return dev->id; | 309 | return dev->id; |
| 285 | } | 310 | } |
| 286 | 311 | ||
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 65b6fb1c4154..93ad1aae3f38 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
| @@ -189,7 +189,7 @@ static int sco_connect(struct sock *sk) | |||
| 189 | struct sco_conn *conn; | 189 | struct sco_conn *conn; |
| 190 | struct hci_conn *hcon; | 190 | struct hci_conn *hcon; |
| 191 | struct hci_dev *hdev; | 191 | struct hci_dev *hdev; |
| 192 | int err = 0; | 192 | int err, type; |
| 193 | 193 | ||
| 194 | BT_DBG("%s -> %s", batostr(src), batostr(dst)); | 194 | BT_DBG("%s -> %s", batostr(src), batostr(dst)); |
| 195 | 195 | ||
| @@ -200,7 +200,9 @@ static int sco_connect(struct sock *sk) | |||
| 200 | 200 | ||
| 201 | err = -ENOMEM; | 201 | err = -ENOMEM; |
| 202 | 202 | ||
| 203 | hcon = hci_connect(hdev, SCO_LINK, dst); | 203 | type = lmp_esco_capable(hdev) ? ESCO_LINK : SCO_LINK; |
| 204 | |||
| 205 | hcon = hci_connect(hdev, type, dst); | ||
| 204 | if (!hcon) | 206 | if (!hcon) |
| 205 | goto done; | 207 | goto done; |
| 206 | 208 | ||
| @@ -224,6 +226,7 @@ static int sco_connect(struct sock *sk) | |||
| 224 | sk->sk_state = BT_CONNECT; | 226 | sk->sk_state = BT_CONNECT; |
| 225 | sco_sock_set_timer(sk, sk->sk_sndtimeo); | 227 | sco_sock_set_timer(sk, sk->sk_sndtimeo); |
| 226 | } | 228 | } |
| 229 | |||
| 227 | done: | 230 | done: |
| 228 | hci_dev_unlock_bh(hdev); | 231 | hci_dev_unlock_bh(hdev); |
| 229 | hci_dev_put(hdev); | 232 | hci_dev_put(hdev); |
| @@ -418,7 +421,7 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro | |||
| 418 | { | 421 | { |
| 419 | struct sock *sk; | 422 | struct sock *sk; |
| 420 | 423 | ||
| 421 | sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, 1); | 424 | sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto); |
| 422 | if (!sk) | 425 | if (!sk) |
| 423 | return NULL; | 426 | return NULL; |
| 424 | 427 | ||
| @@ -846,7 +849,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status) | |||
| 846 | { | 849 | { |
| 847 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); | 850 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); |
| 848 | 851 | ||
| 849 | if (hcon->type != SCO_LINK) | 852 | if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) |
| 850 | return 0; | 853 | return 0; |
| 851 | 854 | ||
| 852 | if (!status) { | 855 | if (!status) { |
| @@ -865,10 +868,11 @@ static int sco_disconn_ind(struct hci_conn *hcon, __u8 reason) | |||
| 865 | { | 868 | { |
| 866 | BT_DBG("hcon %p reason %d", hcon, reason); | 869 | BT_DBG("hcon %p reason %d", hcon, reason); |
| 867 | 870 | ||
| 868 | if (hcon->type != SCO_LINK) | 871 | if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) |
| 869 | return 0; | 872 | return 0; |
| 870 | 873 | ||
| 871 | sco_conn_del(hcon, bt_err(reason)); | 874 | sco_conn_del(hcon, bt_err(reason)); |
| 875 | |||
| 872 | return 0; | 876 | return 0; |
| 873 | } | 877 | } |
| 874 | 878 | ||
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c index 1a46952a56d9..18141392a9b4 100644 --- a/net/bridge/netfilter/ebt_arp.c +++ b/net/bridge/netfilter/ebt_arp.c | |||
| @@ -34,7 +34,7 @@ static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in | |||
| 34 | ah->ar_pro, EBT_ARP_PTYPE)) | 34 | ah->ar_pro, EBT_ARP_PTYPE)) |
| 35 | return EBT_NOMATCH; | 35 | return EBT_NOMATCH; |
| 36 | 36 | ||
| 37 | if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP)) { | 37 | if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) { |
| 38 | __be32 saddr, daddr, *sap, *dap; | 38 | __be32 saddr, daddr, *sap, *dap; |
| 39 | 39 | ||
| 40 | if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP)) | 40 | if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP)) |
diff --git a/net/core/dev.c b/net/core/dev.c index 38b03da5c1ca..be6cedab5aa8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -120,6 +120,8 @@ | |||
| 120 | #include <linux/ctype.h> | 120 | #include <linux/ctype.h> |
| 121 | #include <linux/if_arp.h> | 121 | #include <linux/if_arp.h> |
| 122 | 122 | ||
| 123 | #include "net-sysfs.h" | ||
| 124 | |||
| 123 | /* | 125 | /* |
| 124 | * The list of packet types we will receive (as opposed to discard) | 126 | * The list of packet types we will receive (as opposed to discard) |
| 125 | * and the routines to invoke. | 127 | * and the routines to invoke. |
| @@ -249,10 +251,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain); | |||
| 249 | 251 | ||
| 250 | DEFINE_PER_CPU(struct softnet_data, softnet_data); | 252 | DEFINE_PER_CPU(struct softnet_data, softnet_data); |
| 251 | 253 | ||
| 252 | extern int netdev_kobject_init(void); | ||
| 253 | extern int netdev_register_kobject(struct net_device *); | ||
| 254 | extern void netdev_unregister_kobject(struct net_device *); | ||
| 255 | |||
| 256 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 254 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 257 | /* | 255 | /* |
| 258 | * register_netdevice() inits dev->_xmit_lock and sets lockdep class | 256 | * register_netdevice() inits dev->_xmit_lock and sets lockdep class |
| @@ -885,6 +883,9 @@ int dev_change_name(struct net_device *dev, char *newname) | |||
| 885 | if (!dev_valid_name(newname)) | 883 | if (!dev_valid_name(newname)) |
| 886 | return -EINVAL; | 884 | return -EINVAL; |
| 887 | 885 | ||
| 886 | if (strncmp(newname, dev->name, IFNAMSIZ) == 0) | ||
| 887 | return 0; | ||
| 888 | |||
| 888 | memcpy(oldname, dev->name, IFNAMSIZ); | 889 | memcpy(oldname, dev->name, IFNAMSIZ); |
| 889 | 890 | ||
| 890 | if (strchr(newname, '%')) { | 891 | if (strchr(newname, '%')) { |
| @@ -1007,17 +1008,20 @@ int dev_open(struct net_device *dev) | |||
| 1007 | * Call device private open method | 1008 | * Call device private open method |
| 1008 | */ | 1009 | */ |
| 1009 | set_bit(__LINK_STATE_START, &dev->state); | 1010 | set_bit(__LINK_STATE_START, &dev->state); |
| 1010 | if (dev->open) { | 1011 | |
| 1012 | if (dev->validate_addr) | ||
| 1013 | ret = dev->validate_addr(dev); | ||
| 1014 | |||
| 1015 | if (!ret && dev->open) | ||
| 1011 | ret = dev->open(dev); | 1016 | ret = dev->open(dev); |
| 1012 | if (ret) | ||
| 1013 | clear_bit(__LINK_STATE_START, &dev->state); | ||
| 1014 | } | ||
| 1015 | 1017 | ||
| 1016 | /* | 1018 | /* |
| 1017 | * If it went open OK then: | 1019 | * If it went open OK then: |
| 1018 | */ | 1020 | */ |
| 1019 | 1021 | ||
| 1020 | if (!ret) { | 1022 | if (ret) |
| 1023 | clear_bit(__LINK_STATE_START, &dev->state); | ||
| 1024 | else { | ||
| 1021 | /* | 1025 | /* |
| 1022 | * Set the flags. | 1026 | * Set the flags. |
| 1023 | */ | 1027 | */ |
| @@ -1038,6 +1042,7 @@ int dev_open(struct net_device *dev) | |||
| 1038 | */ | 1042 | */ |
| 1039 | call_netdevice_notifiers(NETDEV_UP, dev); | 1043 | call_netdevice_notifiers(NETDEV_UP, dev); |
| 1040 | } | 1044 | } |
| 1045 | |||
| 1041 | return ret; | 1046 | return ret; |
| 1042 | } | 1047 | } |
| 1043 | 1048 | ||
| @@ -1553,7 +1558,7 @@ gso: | |||
| 1553 | return rc; | 1558 | return rc; |
| 1554 | } | 1559 | } |
| 1555 | if (unlikely((netif_queue_stopped(dev) || | 1560 | if (unlikely((netif_queue_stopped(dev) || |
| 1556 | netif_subqueue_stopped(dev, skb->queue_mapping)) && | 1561 | netif_subqueue_stopped(dev, skb)) && |
| 1557 | skb->next)) | 1562 | skb->next)) |
| 1558 | return NETDEV_TX_BUSY; | 1563 | return NETDEV_TX_BUSY; |
| 1559 | } while (skb->next); | 1564 | } while (skb->next); |
| @@ -1661,7 +1666,7 @@ gso: | |||
| 1661 | q = dev->qdisc; | 1666 | q = dev->qdisc; |
| 1662 | if (q->enqueue) { | 1667 | if (q->enqueue) { |
| 1663 | /* reset queue_mapping to zero */ | 1668 | /* reset queue_mapping to zero */ |
| 1664 | skb->queue_mapping = 0; | 1669 | skb_set_queue_mapping(skb, 0); |
| 1665 | rc = q->enqueue(skb, q); | 1670 | rc = q->enqueue(skb, q); |
| 1666 | qdisc_run(dev); | 1671 | qdisc_run(dev); |
| 1667 | spin_unlock(&dev->queue_lock); | 1672 | spin_unlock(&dev->queue_lock); |
| @@ -1692,7 +1697,7 @@ gso: | |||
| 1692 | HARD_TX_LOCK(dev, cpu); | 1697 | HARD_TX_LOCK(dev, cpu); |
| 1693 | 1698 | ||
| 1694 | if (!netif_queue_stopped(dev) && | 1699 | if (!netif_queue_stopped(dev) && |
| 1695 | !netif_subqueue_stopped(dev, skb->queue_mapping)) { | 1700 | !netif_subqueue_stopped(dev, skb)) { |
| 1696 | rc = 0; | 1701 | rc = 0; |
| 1697 | if (!dev_hard_start_xmit(skb, dev)) { | 1702 | if (!dev_hard_start_xmit(skb, dev)) { |
| 1698 | HARD_TX_UNLOCK(dev); | 1703 | HARD_TX_UNLOCK(dev); |
| @@ -1746,9 +1751,6 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | |||
| 1746 | * | 1751 | * |
| 1747 | * return values: | 1752 | * return values: |
| 1748 | * NET_RX_SUCCESS (no congestion) | 1753 | * NET_RX_SUCCESS (no congestion) |
| 1749 | * NET_RX_CN_LOW (low congestion) | ||
| 1750 | * NET_RX_CN_MOD (moderate congestion) | ||
| 1751 | * NET_RX_CN_HIGH (high congestion) | ||
| 1752 | * NET_RX_DROP (packet was dropped) | 1754 | * NET_RX_DROP (packet was dropped) |
| 1753 | * | 1755 | * |
| 1754 | */ | 1756 | */ |
| @@ -1996,6 +1998,21 @@ out: | |||
| 1996 | } | 1998 | } |
| 1997 | #endif | 1999 | #endif |
| 1998 | 2000 | ||
| 2001 | /** | ||
| 2002 | * netif_receive_skb - process receive buffer from network | ||
| 2003 | * @skb: buffer to process | ||
| 2004 | * | ||
| 2005 | * netif_receive_skb() is the main receive data processing function. | ||
| 2006 | * It always succeeds. The buffer may be dropped during processing | ||
| 2007 | * for congestion control or by the protocol layers. | ||
| 2008 | * | ||
| 2009 | * This function may only be called from softirq context and interrupts | ||
| 2010 | * should be enabled. | ||
| 2011 | * | ||
| 2012 | * Return values (usually ignored): | ||
| 2013 | * NET_RX_SUCCESS: no congestion | ||
| 2014 | * NET_RX_DROP: packet was dropped | ||
| 2015 | */ | ||
| 1999 | int netif_receive_skb(struct sk_buff *skb) | 2016 | int netif_receive_skb(struct sk_buff *skb) |
| 2000 | { | 2017 | { |
| 2001 | struct packet_type *ptype, *pt_prev; | 2018 | struct packet_type *ptype, *pt_prev; |
| @@ -2167,7 +2184,15 @@ static void net_rx_action(struct softirq_action *h) | |||
| 2167 | 2184 | ||
| 2168 | weight = n->weight; | 2185 | weight = n->weight; |
| 2169 | 2186 | ||
| 2170 | work = n->poll(n, weight); | 2187 | /* This NAPI_STATE_SCHED test is for avoiding a race |
| 2188 | * with netpoll's poll_napi(). Only the entity which | ||
| 2189 | * obtains the lock and sees NAPI_STATE_SCHED set will | ||
| 2190 | * actually make the ->poll() call. Therefore we avoid | ||
| 2191 | * accidently calling ->poll() when NAPI is not scheduled. | ||
| 2192 | */ | ||
| 2193 | work = 0; | ||
| 2194 | if (test_bit(NAPI_STATE_SCHED, &n->state)) | ||
| 2195 | work = n->poll(n, weight); | ||
| 2171 | 2196 | ||
| 2172 | WARN_ON_ONCE(work > weight); | 2197 | WARN_ON_ONCE(work > weight); |
| 2173 | 2198 | ||
| @@ -2663,7 +2688,7 @@ static void __net_exit dev_proc_net_exit(struct net *net) | |||
| 2663 | proc_net_remove(net, "dev"); | 2688 | proc_net_remove(net, "dev"); |
| 2664 | } | 2689 | } |
| 2665 | 2690 | ||
| 2666 | static struct pernet_operations __net_initdata dev_proc_ops = { | 2691 | static struct pernet_operations dev_proc_ops = { |
| 2667 | .init = dev_proc_net_init, | 2692 | .init = dev_proc_net_init, |
| 2668 | .exit = dev_proc_net_exit, | 2693 | .exit = dev_proc_net_exit, |
| 2669 | }; | 2694 | }; |
| @@ -3483,6 +3508,60 @@ static void net_set_todo(struct net_device *dev) | |||
| 3483 | spin_unlock(&net_todo_list_lock); | 3508 | spin_unlock(&net_todo_list_lock); |
| 3484 | } | 3509 | } |
| 3485 | 3510 | ||
| 3511 | static void rollback_registered(struct net_device *dev) | ||
| 3512 | { | ||
| 3513 | BUG_ON(dev_boot_phase); | ||
| 3514 | ASSERT_RTNL(); | ||
| 3515 | |||
| 3516 | /* Some devices call without registering for initialization unwind. */ | ||
| 3517 | if (dev->reg_state == NETREG_UNINITIALIZED) { | ||
| 3518 | printk(KERN_DEBUG "unregister_netdevice: device %s/%p never " | ||
| 3519 | "was registered\n", dev->name, dev); | ||
| 3520 | |||
| 3521 | WARN_ON(1); | ||
| 3522 | return; | ||
| 3523 | } | ||
| 3524 | |||
| 3525 | BUG_ON(dev->reg_state != NETREG_REGISTERED); | ||
| 3526 | |||
| 3527 | /* If device is running, close it first. */ | ||
| 3528 | dev_close(dev); | ||
| 3529 | |||
| 3530 | /* And unlink it from device chain. */ | ||
| 3531 | unlist_netdevice(dev); | ||
| 3532 | |||
| 3533 | dev->reg_state = NETREG_UNREGISTERING; | ||
| 3534 | |||
| 3535 | synchronize_net(); | ||
| 3536 | |||
| 3537 | /* Shutdown queueing discipline. */ | ||
| 3538 | dev_shutdown(dev); | ||
| 3539 | |||
| 3540 | |||
| 3541 | /* Notify protocols, that we are about to destroy | ||
| 3542 | this device. They should clean all the things. | ||
| 3543 | */ | ||
| 3544 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | ||
| 3545 | |||
| 3546 | /* | ||
| 3547 | * Flush the unicast and multicast chains | ||
| 3548 | */ | ||
| 3549 | dev_addr_discard(dev); | ||
| 3550 | |||
| 3551 | if (dev->uninit) | ||
| 3552 | dev->uninit(dev); | ||
| 3553 | |||
| 3554 | /* Notifier chain MUST detach us from master device. */ | ||
| 3555 | BUG_TRAP(!dev->master); | ||
| 3556 | |||
| 3557 | /* Remove entries from kobject tree */ | ||
| 3558 | netdev_unregister_kobject(dev); | ||
| 3559 | |||
| 3560 | synchronize_net(); | ||
| 3561 | |||
| 3562 | dev_put(dev); | ||
| 3563 | } | ||
| 3564 | |||
| 3486 | /** | 3565 | /** |
| 3487 | * register_netdevice - register a network device | 3566 | * register_netdevice - register a network device |
| 3488 | * @dev: device to register | 3567 | * @dev: device to register |
| @@ -3620,8 +3699,10 @@ int register_netdevice(struct net_device *dev) | |||
| 3620 | /* Notify protocols, that a new device appeared. */ | 3699 | /* Notify protocols, that a new device appeared. */ |
| 3621 | ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); | 3700 | ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); |
| 3622 | ret = notifier_to_errno(ret); | 3701 | ret = notifier_to_errno(ret); |
| 3623 | if (ret) | 3702 | if (ret) { |
| 3624 | unregister_netdevice(dev); | 3703 | rollback_registered(dev); |
| 3704 | dev->reg_state = NETREG_UNREGISTERED; | ||
| 3705 | } | ||
| 3625 | 3706 | ||
| 3626 | out: | 3707 | out: |
| 3627 | return ret; | 3708 | return ret; |
| @@ -3898,59 +3979,9 @@ void synchronize_net(void) | |||
| 3898 | 3979 | ||
| 3899 | void unregister_netdevice(struct net_device *dev) | 3980 | void unregister_netdevice(struct net_device *dev) |
| 3900 | { | 3981 | { |
| 3901 | BUG_ON(dev_boot_phase); | 3982 | rollback_registered(dev); |
| 3902 | ASSERT_RTNL(); | ||
| 3903 | |||
| 3904 | /* Some devices call without registering for initialization unwind. */ | ||
| 3905 | if (dev->reg_state == NETREG_UNINITIALIZED) { | ||
| 3906 | printk(KERN_DEBUG "unregister_netdevice: device %s/%p never " | ||
| 3907 | "was registered\n", dev->name, dev); | ||
| 3908 | |||
| 3909 | WARN_ON(1); | ||
| 3910 | return; | ||
| 3911 | } | ||
| 3912 | |||
| 3913 | BUG_ON(dev->reg_state != NETREG_REGISTERED); | ||
| 3914 | |||
| 3915 | /* If device is running, close it first. */ | ||
| 3916 | dev_close(dev); | ||
| 3917 | |||
| 3918 | /* And unlink it from device chain. */ | ||
| 3919 | unlist_netdevice(dev); | ||
| 3920 | |||
| 3921 | dev->reg_state = NETREG_UNREGISTERING; | ||
| 3922 | |||
| 3923 | synchronize_net(); | ||
| 3924 | |||
| 3925 | /* Shutdown queueing discipline. */ | ||
| 3926 | dev_shutdown(dev); | ||
| 3927 | |||
| 3928 | |||
| 3929 | /* Notify protocols, that we are about to destroy | ||
| 3930 | this device. They should clean all the things. | ||
| 3931 | */ | ||
| 3932 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | ||
| 3933 | |||
| 3934 | /* | ||
| 3935 | * Flush the unicast and multicast chains | ||
| 3936 | */ | ||
| 3937 | dev_addr_discard(dev); | ||
| 3938 | |||
| 3939 | if (dev->uninit) | ||
| 3940 | dev->uninit(dev); | ||
| 3941 | |||
| 3942 | /* Notifier chain MUST detach us from master device. */ | ||
| 3943 | BUG_TRAP(!dev->master); | ||
| 3944 | |||
| 3945 | /* Remove entries from kobject tree */ | ||
| 3946 | netdev_unregister_kobject(dev); | ||
| 3947 | |||
| 3948 | /* Finish processing unregister after unlock */ | 3983 | /* Finish processing unregister after unlock */ |
| 3949 | net_set_todo(dev); | 3984 | net_set_todo(dev); |
| 3950 | |||
| 3951 | synchronize_net(); | ||
| 3952 | |||
| 3953 | dev_put(dev); | ||
| 3954 | } | 3985 | } |
| 3955 | 3986 | ||
| 3956 | /** | 3987 | /** |
| @@ -4323,7 +4354,7 @@ static void __net_exit netdev_exit(struct net *net) | |||
| 4323 | kfree(net->dev_index_head); | 4354 | kfree(net->dev_index_head); |
| 4324 | } | 4355 | } |
| 4325 | 4356 | ||
| 4326 | static struct pernet_operations __net_initdata netdev_net_ops = { | 4357 | static struct pernet_operations netdev_net_ops = { |
| 4327 | .init = netdev_init, | 4358 | .init = netdev_init, |
| 4328 | .exit = netdev_exit, | 4359 | .exit = netdev_exit, |
| 4329 | }; | 4360 | }; |
| @@ -4354,7 +4385,7 @@ static void __net_exit default_device_exit(struct net *net) | |||
| 4354 | rtnl_unlock(); | 4385 | rtnl_unlock(); |
| 4355 | } | 4386 | } |
| 4356 | 4387 | ||
| 4357 | static struct pernet_operations __net_initdata default_device_ops = { | 4388 | static struct pernet_operations default_device_ops = { |
| 4358 | .exit = default_device_exit, | 4389 | .exit = default_device_exit, |
| 4359 | }; | 4390 | }; |
| 4360 | 4391 | ||
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index 15241cf48af8..ae354057d84c 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c | |||
| @@ -285,7 +285,7 @@ static void __net_exit dev_mc_net_exit(struct net *net) | |||
| 285 | proc_net_remove(net, "dev_mcast"); | 285 | proc_net_remove(net, "dev_mcast"); |
| 286 | } | 286 | } |
| 287 | 287 | ||
| 288 | static struct pernet_operations __net_initdata dev_mc_net_ops = { | 288 | static struct pernet_operations dev_mc_net_ops = { |
| 289 | .init = dev_mc_net_init, | 289 | .init = dev_mc_net_init, |
| 290 | .exit = dev_mc_net_exit, | 290 | .exit = dev_mc_net_exit, |
| 291 | }; | 291 | }; |
diff --git a/net/core/dst.c b/net/core/dst.c index 16958e64e577..03daead3592a 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
| @@ -18,7 +18,6 @@ | |||
| 18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
| 19 | #include <net/net_namespace.h> | 19 | #include <net/net_namespace.h> |
| 20 | 20 | ||
| 21 | #include <net/net_namespace.h> | ||
| 22 | #include <net/dst.h> | 21 | #include <net/dst.h> |
| 23 | 22 | ||
| 24 | /* | 23 | /* |
diff --git a/net/core/flow.c b/net/core/flow.c index 0ab5234b17d8..3ed2b4b1d6d4 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
| @@ -142,8 +142,6 @@ typedef u64 flow_compare_t; | |||
| 142 | typedef u32 flow_compare_t; | 142 | typedef u32 flow_compare_t; |
| 143 | #endif | 143 | #endif |
| 144 | 144 | ||
| 145 | extern void flowi_is_missized(void); | ||
| 146 | |||
| 147 | /* I hear what you're saying, use memcmp. But memcmp cannot make | 145 | /* I hear what you're saying, use memcmp. But memcmp cannot make |
| 148 | * important assumptions that we can here, such as alignment and | 146 | * important assumptions that we can here, such as alignment and |
| 149 | * constant size. | 147 | * constant size. |
| @@ -153,8 +151,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2) | |||
| 153 | flow_compare_t *k1, *k1_lim, *k2; | 151 | flow_compare_t *k1, *k1_lim, *k2; |
| 154 | const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); | 152 | const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); |
| 155 | 153 | ||
| 156 | if (sizeof(struct flowi) % sizeof(flow_compare_t)) | 154 | BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t)); |
| 157 | flowi_is_missized(); | ||
| 158 | 155 | ||
| 159 | k1 = (flow_compare_t *) key1; | 156 | k1 = (flow_compare_t *) key1; |
| 160 | k1_lim = k1 + n_elem; | 157 | k1_lim = k1 + n_elem; |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 67ba9914e52e..29b8ee4e35d6 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -1435,9 +1435,14 @@ int neigh_table_clear(struct neigh_table *tbl) | |||
| 1435 | kfree(tbl->phash_buckets); | 1435 | kfree(tbl->phash_buckets); |
| 1436 | tbl->phash_buckets = NULL; | 1436 | tbl->phash_buckets = NULL; |
| 1437 | 1437 | ||
| 1438 | remove_proc_entry(tbl->id, init_net.proc_net_stat); | ||
| 1439 | |||
| 1438 | free_percpu(tbl->stats); | 1440 | free_percpu(tbl->stats); |
| 1439 | tbl->stats = NULL; | 1441 | tbl->stats = NULL; |
| 1440 | 1442 | ||
| 1443 | kmem_cache_destroy(tbl->kmem_cachep); | ||
| 1444 | tbl->kmem_cachep = NULL; | ||
| 1445 | |||
| 1441 | return 0; | 1446 | return 0; |
| 1442 | } | 1447 | } |
| 1443 | 1448 | ||
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 6628e457ddc0..61ead1d11132 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
| @@ -18,6 +18,8 @@ | |||
| 18 | #include <linux/wireless.h> | 18 | #include <linux/wireless.h> |
| 19 | #include <net/iw_handler.h> | 19 | #include <net/iw_handler.h> |
| 20 | 20 | ||
| 21 | #include "net-sysfs.h" | ||
| 22 | |||
| 21 | #ifdef CONFIG_SYSFS | 23 | #ifdef CONFIG_SYSFS |
| 22 | static const char fmt_hex[] = "%#x\n"; | 24 | static const char fmt_hex[] = "%#x\n"; |
| 23 | static const char fmt_long_hex[] = "%#lx\n"; | 25 | static const char fmt_long_hex[] = "%#lx\n"; |
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h new file mode 100644 index 000000000000..f5f108db3924 --- /dev/null +++ b/net/core/net-sysfs.h | |||
| @@ -0,0 +1,8 @@ | |||
| 1 | #ifndef __NET_SYSFS_H__ | ||
| 2 | #define __NET_SYSFS_H__ | ||
| 3 | |||
| 4 | int netdev_kobject_init(void); | ||
| 5 | int netdev_register_kobject(struct net_device *); | ||
| 6 | void netdev_unregister_kobject(struct net_device *); | ||
| 7 | |||
| 8 | #endif | ||
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 6f71db8c4428..3f6d37deac45 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
| @@ -17,74 +17,13 @@ static DEFINE_MUTEX(net_mutex); | |||
| 17 | 17 | ||
| 18 | LIST_HEAD(net_namespace_list); | 18 | LIST_HEAD(net_namespace_list); |
| 19 | 19 | ||
| 20 | static struct kmem_cache *net_cachep; | ||
| 21 | |||
| 22 | struct net init_net; | 20 | struct net init_net; |
| 23 | EXPORT_SYMBOL_GPL(init_net); | 21 | EXPORT_SYMBOL_GPL(init_net); |
| 24 | 22 | ||
| 25 | static struct net *net_alloc(void) | ||
| 26 | { | ||
| 27 | return kmem_cache_zalloc(net_cachep, GFP_KERNEL); | ||
| 28 | } | ||
| 29 | |||
| 30 | static void net_free(struct net *net) | ||
| 31 | { | ||
| 32 | if (!net) | ||
| 33 | return; | ||
| 34 | |||
| 35 | if (unlikely(atomic_read(&net->use_count) != 0)) { | ||
| 36 | printk(KERN_EMERG "network namespace not free! Usage: %d\n", | ||
| 37 | atomic_read(&net->use_count)); | ||
| 38 | return; | ||
| 39 | } | ||
| 40 | |||
| 41 | kmem_cache_free(net_cachep, net); | ||
| 42 | } | ||
| 43 | |||
| 44 | static void cleanup_net(struct work_struct *work) | ||
| 45 | { | ||
| 46 | struct pernet_operations *ops; | ||
| 47 | struct net *net; | ||
| 48 | |||
| 49 | net = container_of(work, struct net, work); | ||
| 50 | |||
| 51 | mutex_lock(&net_mutex); | ||
| 52 | |||
| 53 | /* Don't let anyone else find us. */ | ||
| 54 | rtnl_lock(); | ||
| 55 | list_del(&net->list); | ||
| 56 | rtnl_unlock(); | ||
| 57 | |||
| 58 | /* Run all of the network namespace exit methods */ | ||
| 59 | list_for_each_entry_reverse(ops, &pernet_list, list) { | ||
| 60 | if (ops->exit) | ||
| 61 | ops->exit(net); | ||
| 62 | } | ||
| 63 | |||
| 64 | mutex_unlock(&net_mutex); | ||
| 65 | |||
| 66 | /* Ensure there are no outstanding rcu callbacks using this | ||
| 67 | * network namespace. | ||
| 68 | */ | ||
| 69 | rcu_barrier(); | ||
| 70 | |||
| 71 | /* Finally it is safe to free my network namespace structure */ | ||
| 72 | net_free(net); | ||
| 73 | } | ||
| 74 | |||
| 75 | |||
| 76 | void __put_net(struct net *net) | ||
| 77 | { | ||
| 78 | /* Cleanup the network namespace in process context */ | ||
| 79 | INIT_WORK(&net->work, cleanup_net); | ||
| 80 | schedule_work(&net->work); | ||
| 81 | } | ||
| 82 | EXPORT_SYMBOL_GPL(__put_net); | ||
| 83 | |||
| 84 | /* | 23 | /* |
| 85 | * setup_net runs the initializers for the network namespace object. | 24 | * setup_net runs the initializers for the network namespace object. |
| 86 | */ | 25 | */ |
| 87 | static int setup_net(struct net *net) | 26 | static __net_init int setup_net(struct net *net) |
| 88 | { | 27 | { |
| 89 | /* Must be called with net_mutex held */ | 28 | /* Must be called with net_mutex held */ |
| 90 | struct pernet_operations *ops; | 29 | struct pernet_operations *ops; |
| @@ -112,9 +51,33 @@ out_undo: | |||
| 112 | if (ops->exit) | 51 | if (ops->exit) |
| 113 | ops->exit(net); | 52 | ops->exit(net); |
| 114 | } | 53 | } |
| 54 | |||
| 55 | rcu_barrier(); | ||
| 115 | goto out; | 56 | goto out; |
| 116 | } | 57 | } |
| 117 | 58 | ||
| 59 | #ifdef CONFIG_NET_NS | ||
| 60 | static struct kmem_cache *net_cachep; | ||
| 61 | |||
| 62 | static struct net *net_alloc(void) | ||
| 63 | { | ||
| 64 | return kmem_cache_zalloc(net_cachep, GFP_KERNEL); | ||
| 65 | } | ||
| 66 | |||
| 67 | static void net_free(struct net *net) | ||
| 68 | { | ||
| 69 | if (!net) | ||
| 70 | return; | ||
| 71 | |||
| 72 | if (unlikely(atomic_read(&net->use_count) != 0)) { | ||
| 73 | printk(KERN_EMERG "network namespace not free! Usage: %d\n", | ||
| 74 | atomic_read(&net->use_count)); | ||
| 75 | return; | ||
| 76 | } | ||
| 77 | |||
| 78 | kmem_cache_free(net_cachep, net); | ||
| 79 | } | ||
| 80 | |||
| 118 | struct net *copy_net_ns(unsigned long flags, struct net *old_net) | 81 | struct net *copy_net_ns(unsigned long flags, struct net *old_net) |
| 119 | { | 82 | { |
| 120 | struct net *new_net = NULL; | 83 | struct net *new_net = NULL; |
| @@ -125,10 +88,6 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net) | |||
| 125 | if (!(flags & CLONE_NEWNET)) | 88 | if (!(flags & CLONE_NEWNET)) |
| 126 | return old_net; | 89 | return old_net; |
| 127 | 90 | ||
| 128 | #ifndef CONFIG_NET_NS | ||
| 129 | return ERR_PTR(-EINVAL); | ||
| 130 | #endif | ||
| 131 | |||
| 132 | err = -ENOMEM; | 91 | err = -ENOMEM; |
| 133 | new_net = net_alloc(); | 92 | new_net = net_alloc(); |
| 134 | if (!new_net) | 93 | if (!new_net) |
| @@ -155,14 +114,64 @@ out: | |||
| 155 | return new_net; | 114 | return new_net; |
| 156 | } | 115 | } |
| 157 | 116 | ||
| 117 | static void cleanup_net(struct work_struct *work) | ||
| 118 | { | ||
| 119 | struct pernet_operations *ops; | ||
| 120 | struct net *net; | ||
| 121 | |||
| 122 | net = container_of(work, struct net, work); | ||
| 123 | |||
| 124 | mutex_lock(&net_mutex); | ||
| 125 | |||
| 126 | /* Don't let anyone else find us. */ | ||
| 127 | rtnl_lock(); | ||
| 128 | list_del(&net->list); | ||
| 129 | rtnl_unlock(); | ||
| 130 | |||
| 131 | /* Run all of the network namespace exit methods */ | ||
| 132 | list_for_each_entry_reverse(ops, &pernet_list, list) { | ||
| 133 | if (ops->exit) | ||
| 134 | ops->exit(net); | ||
| 135 | } | ||
| 136 | |||
| 137 | mutex_unlock(&net_mutex); | ||
| 138 | |||
| 139 | /* Ensure there are no outstanding rcu callbacks using this | ||
| 140 | * network namespace. | ||
| 141 | */ | ||
| 142 | rcu_barrier(); | ||
| 143 | |||
| 144 | /* Finally it is safe to free my network namespace structure */ | ||
| 145 | net_free(net); | ||
| 146 | } | ||
| 147 | |||
| 148 | void __put_net(struct net *net) | ||
| 149 | { | ||
| 150 | /* Cleanup the network namespace in process context */ | ||
| 151 | INIT_WORK(&net->work, cleanup_net); | ||
| 152 | schedule_work(&net->work); | ||
| 153 | } | ||
| 154 | EXPORT_SYMBOL_GPL(__put_net); | ||
| 155 | |||
| 156 | #else | ||
| 157 | struct net *copy_net_ns(unsigned long flags, struct net *old_net) | ||
| 158 | { | ||
| 159 | if (flags & CLONE_NEWNET) | ||
| 160 | return ERR_PTR(-EINVAL); | ||
| 161 | return old_net; | ||
| 162 | } | ||
| 163 | #endif | ||
| 164 | |||
| 158 | static int __init net_ns_init(void) | 165 | static int __init net_ns_init(void) |
| 159 | { | 166 | { |
| 160 | int err; | 167 | int err; |
| 161 | 168 | ||
| 162 | printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); | 169 | printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); |
| 170 | #ifdef CONFIG_NET_NS | ||
| 163 | net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), | 171 | net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), |
| 164 | SMP_CACHE_BYTES, | 172 | SMP_CACHE_BYTES, |
| 165 | SLAB_PANIC, NULL); | 173 | SLAB_PANIC, NULL); |
| 174 | #endif | ||
| 166 | mutex_lock(&net_mutex); | 175 | mutex_lock(&net_mutex); |
| 167 | err = setup_net(&init_net); | 176 | err = setup_net(&init_net); |
| 168 | 177 | ||
| @@ -185,29 +194,28 @@ static int register_pernet_operations(struct list_head *list, | |||
| 185 | struct net *net, *undo_net; | 194 | struct net *net, *undo_net; |
| 186 | int error; | 195 | int error; |
| 187 | 196 | ||
| 188 | error = 0; | ||
| 189 | list_add_tail(&ops->list, list); | 197 | list_add_tail(&ops->list, list); |
| 190 | for_each_net(net) { | 198 | if (ops->init) { |
| 191 | if (ops->init) { | 199 | for_each_net(net) { |
| 192 | error = ops->init(net); | 200 | error = ops->init(net); |
| 193 | if (error) | 201 | if (error) |
| 194 | goto out_undo; | 202 | goto out_undo; |
| 195 | } | 203 | } |
| 196 | } | 204 | } |
| 197 | out: | 205 | return 0; |
| 198 | return error; | ||
| 199 | 206 | ||
| 200 | out_undo: | 207 | out_undo: |
| 201 | /* If I have an error cleanup all namespaces I initialized */ | 208 | /* If I have an error cleanup all namespaces I initialized */ |
| 202 | list_del(&ops->list); | 209 | list_del(&ops->list); |
| 203 | for_each_net(undo_net) { | 210 | if (ops->exit) { |
| 204 | if (undo_net == net) | 211 | for_each_net(undo_net) { |
| 205 | goto undone; | 212 | if (undo_net == net) |
| 206 | if (ops->exit) | 213 | goto undone; |
| 207 | ops->exit(undo_net); | 214 | ops->exit(undo_net); |
| 215 | } | ||
| 208 | } | 216 | } |
| 209 | undone: | 217 | undone: |
| 210 | goto out; | 218 | return error; |
| 211 | } | 219 | } |
| 212 | 220 | ||
| 213 | static void unregister_pernet_operations(struct pernet_operations *ops) | 221 | static void unregister_pernet_operations(struct pernet_operations *ops) |
| @@ -215,8 +223,8 @@ static void unregister_pernet_operations(struct pernet_operations *ops) | |||
| 215 | struct net *net; | 223 | struct net *net; |
| 216 | 224 | ||
| 217 | list_del(&ops->list); | 225 | list_del(&ops->list); |
| 218 | for_each_net(net) | 226 | if (ops->exit) |
| 219 | if (ops->exit) | 227 | for_each_net(net) |
| 220 | ops->exit(net); | 228 | ops->exit(net); |
| 221 | } | 229 | } |
| 222 | 230 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 95daba624967..c499b5c69bed 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -67,7 +67,7 @@ static void queue_process(struct work_struct *work) | |||
| 67 | local_irq_save(flags); | 67 | local_irq_save(flags); |
| 68 | netif_tx_lock(dev); | 68 | netif_tx_lock(dev); |
| 69 | if ((netif_queue_stopped(dev) || | 69 | if ((netif_queue_stopped(dev) || |
| 70 | netif_subqueue_stopped(dev, skb->queue_mapping)) || | 70 | netif_subqueue_stopped(dev, skb)) || |
| 71 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { | 71 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { |
| 72 | skb_queue_head(&npinfo->txq, skb); | 72 | skb_queue_head(&npinfo->txq, skb); |
| 73 | netif_tx_unlock(dev); | 73 | netif_tx_unlock(dev); |
| @@ -116,6 +116,29 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, | |||
| 116 | * network adapter, forcing superfluous retries and possibly timeouts. | 116 | * network adapter, forcing superfluous retries and possibly timeouts. |
| 117 | * Thus, we set our budget to greater than 1. | 117 | * Thus, we set our budget to greater than 1. |
| 118 | */ | 118 | */ |
| 119 | static int poll_one_napi(struct netpoll_info *npinfo, | ||
| 120 | struct napi_struct *napi, int budget) | ||
| 121 | { | ||
| 122 | int work; | ||
| 123 | |||
| 124 | /* net_rx_action's ->poll() invocations and our's are | ||
| 125 | * synchronized by this test which is only made while | ||
| 126 | * holding the napi->poll_lock. | ||
| 127 | */ | ||
| 128 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) | ||
| 129 | return budget; | ||
| 130 | |||
| 131 | npinfo->rx_flags |= NETPOLL_RX_DROP; | ||
| 132 | atomic_inc(&trapped); | ||
| 133 | |||
| 134 | work = napi->poll(napi, budget); | ||
| 135 | |||
| 136 | atomic_dec(&trapped); | ||
| 137 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; | ||
| 138 | |||
| 139 | return budget - work; | ||
| 140 | } | ||
| 141 | |||
| 119 | static void poll_napi(struct netpoll *np) | 142 | static void poll_napi(struct netpoll *np) |
| 120 | { | 143 | { |
| 121 | struct netpoll_info *npinfo = np->dev->npinfo; | 144 | struct netpoll_info *npinfo = np->dev->npinfo; |
| @@ -123,17 +146,13 @@ static void poll_napi(struct netpoll *np) | |||
| 123 | int budget = 16; | 146 | int budget = 16; |
| 124 | 147 | ||
| 125 | list_for_each_entry(napi, &np->dev->napi_list, dev_list) { | 148 | list_for_each_entry(napi, &np->dev->napi_list, dev_list) { |
| 126 | if (test_bit(NAPI_STATE_SCHED, &napi->state) && | 149 | if (napi->poll_owner != smp_processor_id() && |
| 127 | napi->poll_owner != smp_processor_id() && | ||
| 128 | spin_trylock(&napi->poll_lock)) { | 150 | spin_trylock(&napi->poll_lock)) { |
| 129 | npinfo->rx_flags |= NETPOLL_RX_DROP; | 151 | budget = poll_one_napi(npinfo, napi, budget); |
| 130 | atomic_inc(&trapped); | ||
| 131 | |||
| 132 | napi->poll(napi, budget); | ||
| 133 | |||
| 134 | atomic_dec(&trapped); | ||
| 135 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; | ||
| 136 | spin_unlock(&napi->poll_lock); | 152 | spin_unlock(&napi->poll_lock); |
| 153 | |||
| 154 | if (!budget) | ||
| 155 | break; | ||
| 137 | } | 156 | } |
| 138 | } | 157 | } |
| 139 | } | 158 | } |
| @@ -269,7 +288,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | |||
| 269 | tries > 0; --tries) { | 288 | tries > 0; --tries) { |
| 270 | if (netif_tx_trylock(dev)) { | 289 | if (netif_tx_trylock(dev)) { |
| 271 | if (!netif_queue_stopped(dev) && | 290 | if (!netif_queue_stopped(dev) && |
| 272 | !netif_subqueue_stopped(dev, skb->queue_mapping)) | 291 | !netif_subqueue_stopped(dev, skb)) |
| 273 | status = dev->hard_start_xmit(skb, dev); | 292 | status = dev->hard_start_xmit(skb, dev); |
| 274 | netif_tx_unlock(dev); | 293 | netif_tx_unlock(dev); |
| 275 | 294 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index c4719edb55c0..de33f36947e9 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -2603,8 +2603,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
| 2603 | skb->network_header = skb->tail; | 2603 | skb->network_header = skb->tail; |
| 2604 | skb->transport_header = skb->network_header + sizeof(struct iphdr); | 2604 | skb->transport_header = skb->network_header + sizeof(struct iphdr); |
| 2605 | skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); | 2605 | skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); |
| 2606 | skb->queue_mapping = pkt_dev->cur_queue_map; | 2606 | skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); |
| 2607 | |||
| 2608 | iph = ip_hdr(skb); | 2607 | iph = ip_hdr(skb); |
| 2609 | udph = udp_hdr(skb); | 2608 | udph = udp_hdr(skb); |
| 2610 | 2609 | ||
| @@ -2941,8 +2940,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
| 2941 | skb->network_header = skb->tail; | 2940 | skb->network_header = skb->tail; |
| 2942 | skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); | 2941 | skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); |
| 2943 | skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); | 2942 | skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); |
| 2944 | skb->queue_mapping = pkt_dev->cur_queue_map; | 2943 | skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); |
| 2945 | |||
| 2946 | iph = ipv6_hdr(skb); | 2944 | iph = ipv6_hdr(skb); |
| 2947 | udph = udp_hdr(skb); | 2945 | udph = udp_hdr(skb); |
| 2948 | 2946 | ||
| @@ -3385,7 +3383,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
| 3385 | 3383 | ||
| 3386 | if ((netif_queue_stopped(odev) || | 3384 | if ((netif_queue_stopped(odev) || |
| 3387 | (pkt_dev->skb && | 3385 | (pkt_dev->skb && |
| 3388 | netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) || | 3386 | netif_subqueue_stopped(odev, pkt_dev->skb))) || |
| 3389 | need_resched()) { | 3387 | need_resched()) { |
| 3390 | idle_start = getCurUs(); | 3388 | idle_start = getCurUs(); |
| 3391 | 3389 | ||
| @@ -3402,7 +3400,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
| 3402 | pkt_dev->idle_acc += getCurUs() - idle_start; | 3400 | pkt_dev->idle_acc += getCurUs() - idle_start; |
| 3403 | 3401 | ||
| 3404 | if (netif_queue_stopped(odev) || | 3402 | if (netif_queue_stopped(odev) || |
| 3405 | netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { | 3403 | netif_subqueue_stopped(odev, pkt_dev->skb)) { |
| 3406 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ | 3404 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ |
| 3407 | pkt_dev->next_tx_ns = 0; | 3405 | pkt_dev->next_tx_ns = 0; |
| 3408 | goto out; /* Try the next interface */ | 3406 | goto out; /* Try the next interface */ |
| @@ -3431,7 +3429,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
| 3431 | 3429 | ||
| 3432 | netif_tx_lock_bh(odev); | 3430 | netif_tx_lock_bh(odev); |
| 3433 | if (!netif_queue_stopped(odev) && | 3431 | if (!netif_queue_stopped(odev) && |
| 3434 | !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { | 3432 | !netif_subqueue_stopped(odev, pkt_dev->skb)) { |
| 3435 | 3433 | ||
| 3436 | atomic_inc(&(pkt_dev->skb->users)); | 3434 | atomic_inc(&(pkt_dev->skb->users)); |
| 3437 | retry_now: | 3435 | retry_now: |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4a2640d38261..e1ba26fb4bf2 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -742,7 +742,7 @@ static struct net *get_net_ns_by_pid(pid_t pid) | |||
| 742 | /* Lookup the network namespace */ | 742 | /* Lookup the network namespace */ |
| 743 | net = ERR_PTR(-ESRCH); | 743 | net = ERR_PTR(-ESRCH); |
| 744 | rcu_read_lock(); | 744 | rcu_read_lock(); |
| 745 | tsk = find_task_by_pid(pid); | 745 | tsk = find_task_by_vpid(pid); |
| 746 | if (tsk) { | 746 | if (tsk) { |
| 747 | struct nsproxy *nsproxy; | 747 | struct nsproxy *nsproxy; |
| 748 | nsproxy = task_nsproxy(tsk); | 748 | nsproxy = task_nsproxy(tsk); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 70d9b5da96ae..32d5826b7177 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -415,13 +415,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
| 415 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | 415 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
| 416 | n->nohdr = 0; | 416 | n->nohdr = 0; |
| 417 | n->destructor = NULL; | 417 | n->destructor = NULL; |
| 418 | #ifdef CONFIG_NET_CLS_ACT | ||
| 419 | /* FIXME What is this and why don't we do it in copy_skb_header? */ | ||
| 420 | n->tc_verd = SET_TC_VERD(n->tc_verd,0); | ||
| 421 | n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); | ||
| 422 | n->tc_verd = CLR_TC_MUNGED(n->tc_verd); | ||
| 423 | C(iif); | ||
| 424 | #endif | ||
| 425 | C(truesize); | 418 | C(truesize); |
| 426 | atomic_set(&n->users, 1); | 419 | atomic_set(&n->users, 1); |
| 427 | C(head); | 420 | C(head); |
| @@ -2035,8 +2028,8 @@ void __init skb_init(void) | |||
| 2035 | * Fill the specified scatter-gather list with mappings/pointers into a | 2028 | * Fill the specified scatter-gather list with mappings/pointers into a |
| 2036 | * region of the buffer space attached to a socket buffer. | 2029 | * region of the buffer space attached to a socket buffer. |
| 2037 | */ | 2030 | */ |
| 2038 | int | 2031 | static int |
| 2039 | skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | 2032 | __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
| 2040 | { | 2033 | { |
| 2041 | int start = skb_headlen(skb); | 2034 | int start = skb_headlen(skb); |
| 2042 | int i, copy = start - offset; | 2035 | int i, copy = start - offset; |
| @@ -2045,9 +2038,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
| 2045 | if (copy > 0) { | 2038 | if (copy > 0) { |
| 2046 | if (copy > len) | 2039 | if (copy > len) |
| 2047 | copy = len; | 2040 | copy = len; |
| 2048 | sg[elt].page = virt_to_page(skb->data + offset); | 2041 | sg_set_buf(sg, skb->data + offset, copy); |
| 2049 | sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; | ||
| 2050 | sg[elt].length = copy; | ||
| 2051 | elt++; | 2042 | elt++; |
| 2052 | if ((len -= copy) == 0) | 2043 | if ((len -= copy) == 0) |
| 2053 | return elt; | 2044 | return elt; |
| @@ -2065,9 +2056,8 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
| 2065 | 2056 | ||
| 2066 | if (copy > len) | 2057 | if (copy > len) |
| 2067 | copy = len; | 2058 | copy = len; |
| 2068 | sg[elt].page = frag->page; | 2059 | sg_set_page(&sg[elt], frag->page, copy, |
| 2069 | sg[elt].offset = frag->page_offset+offset-start; | 2060 | frag->page_offset+offset-start); |
| 2070 | sg[elt].length = copy; | ||
| 2071 | elt++; | 2061 | elt++; |
| 2072 | if (!(len -= copy)) | 2062 | if (!(len -= copy)) |
| 2073 | return elt; | 2063 | return elt; |
| @@ -2088,7 +2078,8 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
| 2088 | if ((copy = end - offset) > 0) { | 2078 | if ((copy = end - offset) > 0) { |
| 2089 | if (copy > len) | 2079 | if (copy > len) |
| 2090 | copy = len; | 2080 | copy = len; |
| 2091 | elt += skb_to_sgvec(list, sg+elt, offset - start, copy); | 2081 | elt += __skb_to_sgvec(list, sg+elt, offset - start, |
| 2082 | copy); | ||
| 2092 | if ((len -= copy) == 0) | 2083 | if ((len -= copy) == 0) |
| 2093 | return elt; | 2084 | return elt; |
| 2094 | offset += copy; | 2085 | offset += copy; |
| @@ -2100,6 +2091,15 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
| 2100 | return elt; | 2091 | return elt; |
| 2101 | } | 2092 | } |
| 2102 | 2093 | ||
| 2094 | int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | ||
| 2095 | { | ||
| 2096 | int nsg = __skb_to_sgvec(skb, sg, offset, len); | ||
| 2097 | |||
| 2098 | sg_mark_end(&sg[nsg - 1]); | ||
| 2099 | |||
| 2100 | return nsg; | ||
| 2101 | } | ||
| 2102 | |||
| 2103 | /** | 2103 | /** |
| 2104 | * skb_cow_data - Check that a socket buffer's data buffers are writable | 2104 | * skb_cow_data - Check that a socket buffer's data buffers are writable |
| 2105 | * @skb: The socket buffer to check. | 2105 | * @skb: The socket buffer to check. |
diff --git a/net/core/sock.c b/net/core/sock.c index febbcbcf8022..8fc2f84209e4 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -857,46 +857,43 @@ static inline void sock_lock_init(struct sock *sk) | |||
| 857 | af_family_keys + sk->sk_family); | 857 | af_family_keys + sk->sk_family); |
| 858 | } | 858 | } |
| 859 | 859 | ||
| 860 | /** | 860 | static void sock_copy(struct sock *nsk, const struct sock *osk) |
| 861 | * sk_alloc - All socket objects are allocated here | ||
| 862 | * @net: the applicable net namespace | ||
| 863 | * @family: protocol family | ||
| 864 | * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) | ||
| 865 | * @prot: struct proto associated with this new sock instance | ||
| 866 | * @zero_it: if we should zero the newly allocated sock | ||
| 867 | */ | ||
| 868 | struct sock *sk_alloc(struct net *net, int family, gfp_t priority, | ||
| 869 | struct proto *prot, int zero_it) | ||
| 870 | { | 861 | { |
| 871 | struct sock *sk = NULL; | 862 | #ifdef CONFIG_SECURITY_NETWORK |
| 872 | struct kmem_cache *slab = prot->slab; | 863 | void *sptr = nsk->sk_security; |
| 864 | #endif | ||
| 873 | 865 | ||
| 866 | memcpy(nsk, osk, osk->sk_prot->obj_size); | ||
| 867 | #ifdef CONFIG_SECURITY_NETWORK | ||
| 868 | nsk->sk_security = sptr; | ||
| 869 | security_sk_clone(osk, nsk); | ||
| 870 | #endif | ||
| 871 | } | ||
| 872 | |||
| 873 | static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, | ||
| 874 | int family) | ||
| 875 | { | ||
| 876 | struct sock *sk; | ||
| 877 | struct kmem_cache *slab; | ||
| 878 | |||
| 879 | slab = prot->slab; | ||
| 874 | if (slab != NULL) | 880 | if (slab != NULL) |
| 875 | sk = kmem_cache_alloc(slab, priority); | 881 | sk = kmem_cache_alloc(slab, priority); |
| 876 | else | 882 | else |
| 877 | sk = kmalloc(prot->obj_size, priority); | 883 | sk = kmalloc(prot->obj_size, priority); |
| 878 | 884 | ||
| 879 | if (sk) { | 885 | if (sk != NULL) { |
| 880 | if (zero_it) { | ||
| 881 | memset(sk, 0, prot->obj_size); | ||
| 882 | sk->sk_family = family; | ||
| 883 | /* | ||
| 884 | * See comment in struct sock definition to understand | ||
| 885 | * why we need sk_prot_creator -acme | ||
| 886 | */ | ||
| 887 | sk->sk_prot = sk->sk_prot_creator = prot; | ||
| 888 | sock_lock_init(sk); | ||
| 889 | sk->sk_net = get_net(net); | ||
| 890 | } | ||
| 891 | |||
| 892 | if (security_sk_alloc(sk, family, priority)) | 886 | if (security_sk_alloc(sk, family, priority)) |
| 893 | goto out_free; | 887 | goto out_free; |
| 894 | 888 | ||
| 895 | if (!try_module_get(prot->owner)) | 889 | if (!try_module_get(prot->owner)) |
| 896 | goto out_free; | 890 | goto out_free_sec; |
| 897 | } | 891 | } |
| 892 | |||
| 898 | return sk; | 893 | return sk; |
| 899 | 894 | ||
| 895 | out_free_sec: | ||
| 896 | security_sk_free(sk); | ||
| 900 | out_free: | 897 | out_free: |
| 901 | if (slab != NULL) | 898 | if (slab != NULL) |
| 902 | kmem_cache_free(slab, sk); | 899 | kmem_cache_free(slab, sk); |
| @@ -905,10 +902,53 @@ out_free: | |||
| 905 | return NULL; | 902 | return NULL; |
| 906 | } | 903 | } |
| 907 | 904 | ||
| 905 | static void sk_prot_free(struct proto *prot, struct sock *sk) | ||
| 906 | { | ||
| 907 | struct kmem_cache *slab; | ||
| 908 | struct module *owner; | ||
| 909 | |||
| 910 | owner = prot->owner; | ||
| 911 | slab = prot->slab; | ||
| 912 | |||
| 913 | security_sk_free(sk); | ||
| 914 | if (slab != NULL) | ||
| 915 | kmem_cache_free(slab, sk); | ||
| 916 | else | ||
| 917 | kfree(sk); | ||
| 918 | module_put(owner); | ||
| 919 | } | ||
| 920 | |||
| 921 | /** | ||
| 922 | * sk_alloc - All socket objects are allocated here | ||
| 923 | * @net: the applicable net namespace | ||
| 924 | * @family: protocol family | ||
| 925 | * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) | ||
| 926 | * @prot: struct proto associated with this new sock instance | ||
| 927 | * @zero_it: if we should zero the newly allocated sock | ||
| 928 | */ | ||
| 929 | struct sock *sk_alloc(struct net *net, int family, gfp_t priority, | ||
| 930 | struct proto *prot) | ||
| 931 | { | ||
| 932 | struct sock *sk; | ||
| 933 | |||
| 934 | sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); | ||
| 935 | if (sk) { | ||
| 936 | sk->sk_family = family; | ||
| 937 | /* | ||
| 938 | * See comment in struct sock definition to understand | ||
| 939 | * why we need sk_prot_creator -acme | ||
| 940 | */ | ||
| 941 | sk->sk_prot = sk->sk_prot_creator = prot; | ||
| 942 | sock_lock_init(sk); | ||
| 943 | sk->sk_net = get_net(net); | ||
| 944 | } | ||
| 945 | |||
| 946 | return sk; | ||
| 947 | } | ||
| 948 | |||
| 908 | void sk_free(struct sock *sk) | 949 | void sk_free(struct sock *sk) |
| 909 | { | 950 | { |
| 910 | struct sk_filter *filter; | 951 | struct sk_filter *filter; |
| 911 | struct module *owner = sk->sk_prot_creator->owner; | ||
| 912 | 952 | ||
| 913 | if (sk->sk_destruct) | 953 | if (sk->sk_destruct) |
| 914 | sk->sk_destruct(sk); | 954 | sk->sk_destruct(sk); |
| @@ -925,25 +965,22 @@ void sk_free(struct sock *sk) | |||
| 925 | printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", | 965 | printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", |
| 926 | __FUNCTION__, atomic_read(&sk->sk_omem_alloc)); | 966 | __FUNCTION__, atomic_read(&sk->sk_omem_alloc)); |
| 927 | 967 | ||
| 928 | security_sk_free(sk); | ||
| 929 | put_net(sk->sk_net); | 968 | put_net(sk->sk_net); |
| 930 | if (sk->sk_prot_creator->slab != NULL) | 969 | sk_prot_free(sk->sk_prot_creator, sk); |
| 931 | kmem_cache_free(sk->sk_prot_creator->slab, sk); | ||
| 932 | else | ||
| 933 | kfree(sk); | ||
| 934 | module_put(owner); | ||
| 935 | } | 970 | } |
| 936 | 971 | ||
| 937 | struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | 972 | struct sock *sk_clone(const struct sock *sk, const gfp_t priority) |
| 938 | { | 973 | { |
| 939 | struct sock *newsk = sk_alloc(sk->sk_net, sk->sk_family, priority, sk->sk_prot, 0); | 974 | struct sock *newsk; |
| 940 | 975 | ||
| 976 | newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); | ||
| 941 | if (newsk != NULL) { | 977 | if (newsk != NULL) { |
| 942 | struct sk_filter *filter; | 978 | struct sk_filter *filter; |
| 943 | 979 | ||
| 944 | sock_copy(newsk, sk); | 980 | sock_copy(newsk, sk); |
| 945 | 981 | ||
| 946 | /* SANITY */ | 982 | /* SANITY */ |
| 983 | get_net(newsk->sk_net); | ||
| 947 | sk_node_init(&newsk->sk_node); | 984 | sk_node_init(&newsk->sk_node); |
| 948 | sock_lock_init(newsk); | 985 | sock_lock_init(newsk); |
| 949 | bh_lock_sock(newsk); | 986 | bh_lock_sock(newsk); |
| @@ -1649,7 +1686,6 @@ void sock_enable_timestamp(struct sock *sk) | |||
| 1649 | net_enable_timestamp(); | 1686 | net_enable_timestamp(); |
| 1650 | } | 1687 | } |
| 1651 | } | 1688 | } |
| 1652 | EXPORT_SYMBOL(sock_enable_timestamp); | ||
| 1653 | 1689 | ||
| 1654 | /* | 1690 | /* |
| 1655 | * Get a socket option on an socket. | 1691 | * Get a socket option on an socket. |
| @@ -1765,11 +1801,65 @@ EXPORT_SYMBOL(sk_common_release); | |||
| 1765 | static DEFINE_RWLOCK(proto_list_lock); | 1801 | static DEFINE_RWLOCK(proto_list_lock); |
| 1766 | static LIST_HEAD(proto_list); | 1802 | static LIST_HEAD(proto_list); |
| 1767 | 1803 | ||
| 1804 | #ifdef CONFIG_SMP | ||
| 1805 | /* | ||
| 1806 | * Define default functions to keep track of inuse sockets per protocol | ||
| 1807 | * Note that often used protocols use dedicated functions to get a speed increase. | ||
| 1808 | * (see DEFINE_PROTO_INUSE/REF_PROTO_INUSE) | ||
| 1809 | */ | ||
| 1810 | static void inuse_add(struct proto *prot, int inc) | ||
| 1811 | { | ||
| 1812 | per_cpu_ptr(prot->inuse_ptr, smp_processor_id())[0] += inc; | ||
| 1813 | } | ||
| 1814 | |||
| 1815 | static int inuse_get(const struct proto *prot) | ||
| 1816 | { | ||
| 1817 | int res = 0, cpu; | ||
| 1818 | for_each_possible_cpu(cpu) | ||
| 1819 | res += per_cpu_ptr(prot->inuse_ptr, cpu)[0]; | ||
| 1820 | return res; | ||
| 1821 | } | ||
| 1822 | |||
| 1823 | static int inuse_init(struct proto *prot) | ||
| 1824 | { | ||
| 1825 | if (!prot->inuse_getval || !prot->inuse_add) { | ||
| 1826 | prot->inuse_ptr = alloc_percpu(int); | ||
| 1827 | if (prot->inuse_ptr == NULL) | ||
| 1828 | return -ENOBUFS; | ||
| 1829 | |||
| 1830 | prot->inuse_getval = inuse_get; | ||
| 1831 | prot->inuse_add = inuse_add; | ||
| 1832 | } | ||
| 1833 | return 0; | ||
| 1834 | } | ||
| 1835 | |||
| 1836 | static void inuse_fini(struct proto *prot) | ||
| 1837 | { | ||
| 1838 | if (prot->inuse_ptr != NULL) { | ||
| 1839 | free_percpu(prot->inuse_ptr); | ||
| 1840 | prot->inuse_ptr = NULL; | ||
| 1841 | prot->inuse_getval = NULL; | ||
| 1842 | prot->inuse_add = NULL; | ||
| 1843 | } | ||
| 1844 | } | ||
| 1845 | #else | ||
| 1846 | static inline int inuse_init(struct proto *prot) | ||
| 1847 | { | ||
| 1848 | return 0; | ||
| 1849 | } | ||
| 1850 | |||
| 1851 | static inline void inuse_fini(struct proto *prot) | ||
| 1852 | { | ||
| 1853 | } | ||
| 1854 | #endif | ||
| 1855 | |||
| 1768 | int proto_register(struct proto *prot, int alloc_slab) | 1856 | int proto_register(struct proto *prot, int alloc_slab) |
| 1769 | { | 1857 | { |
| 1770 | char *request_sock_slab_name = NULL; | 1858 | char *request_sock_slab_name = NULL; |
| 1771 | char *timewait_sock_slab_name; | 1859 | char *timewait_sock_slab_name; |
| 1772 | int rc = -ENOBUFS; | 1860 | |
| 1861 | if (inuse_init(prot)) | ||
| 1862 | goto out; | ||
| 1773 | 1863 | ||
| 1774 | if (alloc_slab) { | 1864 | if (alloc_slab) { |
| 1775 | prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, | 1865 | prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, |
| @@ -1778,7 +1868,7 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
| 1778 | if (prot->slab == NULL) { | 1868 | if (prot->slab == NULL) { |
| 1779 | printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", | 1869 | printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", |
| 1780 | prot->name); | 1870 | prot->name); |
| 1781 | goto out; | 1871 | goto out_free_inuse; |
| 1782 | } | 1872 | } |
| 1783 | 1873 | ||
| 1784 | if (prot->rsk_prot != NULL) { | 1874 | if (prot->rsk_prot != NULL) { |
| @@ -1822,9 +1912,8 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
| 1822 | write_lock(&proto_list_lock); | 1912 | write_lock(&proto_list_lock); |
| 1823 | list_add(&prot->node, &proto_list); | 1913 | list_add(&prot->node, &proto_list); |
| 1824 | write_unlock(&proto_list_lock); | 1914 | write_unlock(&proto_list_lock); |
| 1825 | rc = 0; | 1915 | return 0; |
| 1826 | out: | 1916 | |
| 1827 | return rc; | ||
| 1828 | out_free_timewait_sock_slab_name: | 1917 | out_free_timewait_sock_slab_name: |
| 1829 | kfree(timewait_sock_slab_name); | 1918 | kfree(timewait_sock_slab_name); |
| 1830 | out_free_request_sock_slab: | 1919 | out_free_request_sock_slab: |
| @@ -1837,7 +1926,10 @@ out_free_request_sock_slab_name: | |||
| 1837 | out_free_sock_slab: | 1926 | out_free_sock_slab: |
| 1838 | kmem_cache_destroy(prot->slab); | 1927 | kmem_cache_destroy(prot->slab); |
| 1839 | prot->slab = NULL; | 1928 | prot->slab = NULL; |
| 1840 | goto out; | 1929 | out_free_inuse: |
| 1930 | inuse_fini(prot); | ||
| 1931 | out: | ||
| 1932 | return -ENOBUFS; | ||
| 1841 | } | 1933 | } |
| 1842 | 1934 | ||
| 1843 | EXPORT_SYMBOL(proto_register); | 1935 | EXPORT_SYMBOL(proto_register); |
| @@ -1848,6 +1940,7 @@ void proto_unregister(struct proto *prot) | |||
| 1848 | list_del(&prot->node); | 1940 | list_del(&prot->node); |
| 1849 | write_unlock(&proto_list_lock); | 1941 | write_unlock(&proto_list_lock); |
| 1850 | 1942 | ||
| 1943 | inuse_fini(prot); | ||
| 1851 | if (prot->slab != NULL) { | 1944 | if (prot->slab != NULL) { |
| 1852 | kmem_cache_destroy(prot->slab); | 1945 | kmem_cache_destroy(prot->slab); |
| 1853 | prot->slab = NULL; | 1946 | prot->slab = NULL; |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 6d5ea9762040..113cc728dc31 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
| @@ -9,25 +9,12 @@ | |||
| 9 | #include <linux/sysctl.h> | 9 | #include <linux/sysctl.h> |
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/socket.h> | 11 | #include <linux/socket.h> |
| 12 | #include <linux/netdevice.h> | ||
| 12 | #include <net/sock.h> | 13 | #include <net/sock.h> |
| 14 | #include <net/xfrm.h> | ||
| 13 | 15 | ||
| 14 | #ifdef CONFIG_SYSCTL | 16 | #ifdef CONFIG_SYSCTL |
| 15 | 17 | ||
| 16 | extern int netdev_max_backlog; | ||
| 17 | extern int weight_p; | ||
| 18 | |||
| 19 | extern __u32 sysctl_wmem_max; | ||
| 20 | extern __u32 sysctl_rmem_max; | ||
| 21 | |||
| 22 | extern int sysctl_core_destroy_delay; | ||
| 23 | |||
| 24 | #ifdef CONFIG_XFRM | ||
| 25 | extern u32 sysctl_xfrm_aevent_etime; | ||
| 26 | extern u32 sysctl_xfrm_aevent_rseqth; | ||
| 27 | extern int sysctl_xfrm_larval_drop; | ||
| 28 | extern u32 sysctl_xfrm_acq_expires; | ||
| 29 | #endif | ||
| 30 | |||
| 31 | ctl_table core_table[] = { | 18 | ctl_table core_table[] = { |
| 32 | #ifdef CONFIG_NET | 19 | #ifdef CONFIG_NET |
| 33 | { | 20 | { |
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 426008e3b7e3..d694656b8800 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
| @@ -750,20 +750,16 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | |||
| 750 | */ | 750 | */ |
| 751 | hctx->ccid2hctx_ssthresh = ~0; | 751 | hctx->ccid2hctx_ssthresh = ~0; |
| 752 | hctx->ccid2hctx_numdupack = 3; | 752 | hctx->ccid2hctx_numdupack = 3; |
| 753 | hctx->ccid2hctx_seqbufc = 0; | ||
| 754 | 753 | ||
| 755 | /* XXX init ~ to window size... */ | 754 | /* XXX init ~ to window size... */ |
| 756 | if (ccid2_hc_tx_alloc_seq(hctx)) | 755 | if (ccid2_hc_tx_alloc_seq(hctx)) |
| 757 | return -ENOMEM; | 756 | return -ENOMEM; |
| 758 | 757 | ||
| 759 | hctx->ccid2hctx_sent = 0; | ||
| 760 | hctx->ccid2hctx_rto = 3 * HZ; | 758 | hctx->ccid2hctx_rto = 3 * HZ; |
| 761 | ccid2_change_srtt(hctx, -1); | 759 | ccid2_change_srtt(hctx, -1); |
| 762 | hctx->ccid2hctx_rttvar = -1; | 760 | hctx->ccid2hctx_rttvar = -1; |
| 763 | hctx->ccid2hctx_lastrtt = 0; | ||
| 764 | hctx->ccid2hctx_rpdupack = -1; | 761 | hctx->ccid2hctx_rpdupack = -1; |
| 765 | hctx->ccid2hctx_last_cong = jiffies; | 762 | hctx->ccid2hctx_last_cong = jiffies; |
| 766 | hctx->ccid2hctx_high_ack = 0; | ||
| 767 | 763 | ||
| 768 | hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; | 764 | hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; |
| 769 | hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; | 765 | hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 25772c326172..19b33586333d 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
| @@ -40,6 +40,8 @@ | |||
| 40 | #include "lib/tfrc.h" | 40 | #include "lib/tfrc.h" |
| 41 | #include "ccid3.h" | 41 | #include "ccid3.h" |
| 42 | 42 | ||
| 43 | #include <asm/unaligned.h> | ||
| 44 | |||
| 43 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG | 45 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
| 44 | static int ccid3_debug; | 46 | static int ccid3_debug; |
| 45 | #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) | 47 | #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) |
| @@ -544,6 +546,7 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
| 544 | const struct dccp_sock *dp = dccp_sk(sk); | 546 | const struct dccp_sock *dp = dccp_sk(sk); |
| 545 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 547 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); |
| 546 | struct ccid3_options_received *opt_recv; | 548 | struct ccid3_options_received *opt_recv; |
| 549 | __be32 opt_val; | ||
| 547 | 550 | ||
| 548 | opt_recv = &hctx->ccid3hctx_options_received; | 551 | opt_recv = &hctx->ccid3hctx_options_received; |
| 549 | 552 | ||
| @@ -563,8 +566,8 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
| 563 | dccp_role(sk), sk, len); | 566 | dccp_role(sk), sk, len); |
| 564 | rc = -EINVAL; | 567 | rc = -EINVAL; |
| 565 | } else { | 568 | } else { |
| 566 | opt_recv->ccid3or_loss_event_rate = | 569 | opt_val = get_unaligned((__be32 *)value); |
| 567 | ntohl(*(__be32 *)value); | 570 | opt_recv->ccid3or_loss_event_rate = ntohl(opt_val); |
| 568 | ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n", | 571 | ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n", |
| 569 | dccp_role(sk), sk, | 572 | dccp_role(sk), sk, |
| 570 | opt_recv->ccid3or_loss_event_rate); | 573 | opt_recv->ccid3or_loss_event_rate); |
| @@ -585,8 +588,8 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
| 585 | dccp_role(sk), sk, len); | 588 | dccp_role(sk), sk, len); |
| 586 | rc = -EINVAL; | 589 | rc = -EINVAL; |
| 587 | } else { | 590 | } else { |
| 588 | opt_recv->ccid3or_receive_rate = | 591 | opt_val = get_unaligned((__be32 *)value); |
| 589 | ntohl(*(__be32 *)value); | 592 | opt_recv->ccid3or_receive_rate = ntohl(opt_val); |
| 590 | ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n", | 593 | ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n", |
| 591 | dccp_role(sk), sk, | 594 | dccp_role(sk), sk, |
| 592 | opt_recv->ccid3or_receive_rate); | 595 | opt_recv->ccid3or_receive_rate); |
| @@ -601,8 +604,6 @@ static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) | |||
| 601 | { | 604 | { |
| 602 | struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); | 605 | struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); |
| 603 | 606 | ||
| 604 | hctx->ccid3hctx_s = 0; | ||
| 605 | hctx->ccid3hctx_rtt = 0; | ||
| 606 | hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; | 607 | hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; |
| 607 | INIT_LIST_HEAD(&hctx->ccid3hctx_hist); | 608 | INIT_LIST_HEAD(&hctx->ccid3hctx_hist); |
| 608 | 609 | ||
| @@ -963,8 +964,6 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) | |||
| 963 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); | 964 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); |
| 964 | hcrx->ccid3hcrx_tstamp_last_feedback = | 965 | hcrx->ccid3hcrx_tstamp_last_feedback = |
| 965 | hcrx->ccid3hcrx_tstamp_last_ack = ktime_get_real(); | 966 | hcrx->ccid3hcrx_tstamp_last_ack = ktime_get_real(); |
| 966 | hcrx->ccid3hcrx_s = 0; | ||
| 967 | hcrx->ccid3hcrx_rtt = 0; | ||
| 968 | return 0; | 967 | return 0; |
| 969 | } | 968 | } |
| 970 | 969 | ||
diff --git a/net/dccp/diag.c b/net/dccp/diag.c index 0f3745585a94..d8a3509b26f6 100644 --- a/net/dccp/diag.c +++ b/net/dccp/diag.c | |||
| @@ -68,3 +68,4 @@ module_exit(dccp_diag_fini); | |||
| 68 | MODULE_LICENSE("GPL"); | 68 | MODULE_LICENSE("GPL"); |
| 69 | MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); | 69 | MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); |
| 70 | MODULE_DESCRIPTION("DCCP inet_diag handler"); | 70 | MODULE_DESCRIPTION("DCCP inet_diag handler"); |
| 71 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, DCCPDIAG_GETSOCK); | ||
diff --git a/net/dccp/input.c b/net/dccp/input.c index 3560a2a875a0..1ce101062824 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
| @@ -58,6 +58,42 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) | |||
| 58 | dccp_send_close(sk, 0); | 58 | dccp_send_close(sk, 0); |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static u8 dccp_reset_code_convert(const u8 code) | ||
| 62 | { | ||
| 63 | const u8 error_code[] = { | ||
| 64 | [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ | ||
| 65 | [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ | ||
| 66 | [DCCP_RESET_CODE_ABORTED] = ECONNRESET, | ||
| 67 | |||
| 68 | [DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED, | ||
| 69 | [DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED, | ||
| 70 | [DCCP_RESET_CODE_TOO_BUSY] = EUSERS, | ||
| 71 | [DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT, | ||
| 72 | |||
| 73 | [DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG, | ||
| 74 | [DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR, | ||
| 75 | [DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC, | ||
| 76 | [DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ, | ||
| 77 | [DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP, | ||
| 78 | }; | ||
| 79 | |||
| 80 | return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code]; | ||
| 81 | } | ||
| 82 | |||
| 83 | static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) | ||
| 84 | { | ||
| 85 | u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); | ||
| 86 | |||
| 87 | sk->sk_err = err; | ||
| 88 | |||
| 89 | /* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */ | ||
| 90 | dccp_fin(sk, skb); | ||
| 91 | |||
| 92 | if (err && !sock_flag(sk, SOCK_DEAD)) | ||
| 93 | sk_wake_async(sk, 0, POLL_ERR); | ||
| 94 | dccp_time_wait(sk, DCCP_TIME_WAIT, 0); | ||
| 95 | } | ||
| 96 | |||
| 61 | static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) | 97 | static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) |
| 62 | { | 98 | { |
| 63 | struct dccp_sock *dp = dccp_sk(sk); | 99 | struct dccp_sock *dp = dccp_sk(sk); |
| @@ -191,9 +227,8 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
| 191 | * S.state := TIMEWAIT | 227 | * S.state := TIMEWAIT |
| 192 | * Set TIMEWAIT timer | 228 | * Set TIMEWAIT timer |
| 193 | * Drop packet and return | 229 | * Drop packet and return |
| 194 | */ | 230 | */ |
| 195 | dccp_fin(sk, skb); | 231 | dccp_rcv_reset(sk, skb); |
| 196 | dccp_time_wait(sk, DCCP_TIME_WAIT, 0); | ||
| 197 | return 0; | 232 | return 0; |
| 198 | case DCCP_PKT_CLOSEREQ: | 233 | case DCCP_PKT_CLOSEREQ: |
| 199 | dccp_rcv_closereq(sk, skb); | 234 | dccp_rcv_closereq(sk, skb); |
| @@ -521,12 +556,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 521 | * Drop packet and return | 556 | * Drop packet and return |
| 522 | */ | 557 | */ |
| 523 | if (dh->dccph_type == DCCP_PKT_RESET) { | 558 | if (dh->dccph_type == DCCP_PKT_RESET) { |
| 524 | /* | 559 | dccp_rcv_reset(sk, skb); |
| 525 | * Queue the equivalent of TCP fin so that dccp_recvmsg | ||
| 526 | * exits the loop | ||
| 527 | */ | ||
| 528 | dccp_fin(sk, skb); | ||
| 529 | dccp_time_wait(sk, DCCP_TIME_WAIT, 0); | ||
| 530 | return 0; | 560 | return 0; |
| 531 | /* | 561 | /* |
| 532 | * Step 7: Check for unexpected packet types | 562 | * Step 7: Check for unexpected packet types |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 44f6e17e105f..db17b83e8d3e 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
| @@ -241,8 +241,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
| 241 | goto out; | 241 | goto out; |
| 242 | 242 | ||
| 243 | dp = dccp_sk(sk); | 243 | dp = dccp_sk(sk); |
| 244 | seq = dccp_hdr_seq(skb); | 244 | seq = dccp_hdr_seq(dh); |
| 245 | if (sk->sk_state != DCCP_LISTEN && | 245 | if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && |
| 246 | !between48(seq, dp->dccps_swl, dp->dccps_swh)) { | 246 | !between48(seq, dp->dccps_swl, dp->dccps_swh)) { |
| 247 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 247 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); |
| 248 | goto out; | 248 | goto out; |
| @@ -795,7 +795,7 @@ static int dccp_v4_rcv(struct sk_buff *skb) | |||
| 795 | 795 | ||
| 796 | dh = dccp_hdr(skb); | 796 | dh = dccp_hdr(skb); |
| 797 | 797 | ||
| 798 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); | 798 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh); |
| 799 | DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; | 799 | DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; |
| 800 | 800 | ||
| 801 | dccp_pr_debug("%8.8s " | 801 | dccp_pr_debug("%8.8s " |
| @@ -922,6 +922,8 @@ static struct timewait_sock_ops dccp_timewait_sock_ops = { | |||
| 922 | .twsk_obj_size = sizeof(struct inet_timewait_sock), | 922 | .twsk_obj_size = sizeof(struct inet_timewait_sock), |
| 923 | }; | 923 | }; |
| 924 | 924 | ||
| 925 | DEFINE_PROTO_INUSE(dccp_v4) | ||
| 926 | |||
| 925 | static struct proto dccp_v4_prot = { | 927 | static struct proto dccp_v4_prot = { |
| 926 | .name = "DCCP", | 928 | .name = "DCCP", |
| 927 | .owner = THIS_MODULE, | 929 | .owner = THIS_MODULE, |
| @@ -950,6 +952,7 @@ static struct proto dccp_v4_prot = { | |||
| 950 | .compat_setsockopt = compat_dccp_setsockopt, | 952 | .compat_setsockopt = compat_dccp_setsockopt, |
| 951 | .compat_getsockopt = compat_dccp_getsockopt, | 953 | .compat_getsockopt = compat_dccp_getsockopt, |
| 952 | #endif | 954 | #endif |
| 955 | REF_PROTO_INUSE(dccp_v4) | ||
| 953 | }; | 956 | }; |
| 954 | 957 | ||
| 955 | static struct net_protocol dccp_v4_protocol = { | 958 | static struct net_protocol dccp_v4_protocol = { |
| @@ -1037,8 +1040,8 @@ module_exit(dccp_v4_exit); | |||
| 1037 | * values directly, Also cover the case where the protocol is not specified, | 1040 | * values directly, Also cover the case where the protocol is not specified, |
| 1038 | * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP | 1041 | * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP |
| 1039 | */ | 1042 | */ |
| 1040 | MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6"); | 1043 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6); |
| 1041 | MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6"); | 1044 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6); |
| 1042 | MODULE_LICENSE("GPL"); | 1045 | MODULE_LICENSE("GPL"); |
| 1043 | MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); | 1046 | MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); |
| 1044 | MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol"); | 1047 | MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol"); |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index cac53548c2d8..87c98fb86fa8 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
| @@ -173,7 +173,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 173 | 173 | ||
| 174 | icmpv6_err_convert(type, code, &err); | 174 | icmpv6_err_convert(type, code, &err); |
| 175 | 175 | ||
| 176 | seq = DCCP_SKB_CB(skb)->dccpd_seq; | 176 | seq = dccp_hdr_seq(dh); |
| 177 | /* Might be for an request_sock */ | 177 | /* Might be for an request_sock */ |
| 178 | switch (sk->sk_state) { | 178 | switch (sk->sk_state) { |
| 179 | struct request_sock *req, **prev; | 179 | struct request_sock *req, **prev; |
| @@ -787,7 +787,7 @@ static int dccp_v6_rcv(struct sk_buff *skb) | |||
| 787 | 787 | ||
| 788 | dh = dccp_hdr(skb); | 788 | dh = dccp_hdr(skb); |
| 789 | 789 | ||
| 790 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); | 790 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh); |
| 791 | DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; | 791 | DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; |
| 792 | 792 | ||
| 793 | if (dccp_packet_without_ack(skb)) | 793 | if (dccp_packet_without_ack(skb)) |
| @@ -1107,6 +1107,8 @@ static struct timewait_sock_ops dccp6_timewait_sock_ops = { | |||
| 1107 | .twsk_obj_size = sizeof(struct dccp6_timewait_sock), | 1107 | .twsk_obj_size = sizeof(struct dccp6_timewait_sock), |
| 1108 | }; | 1108 | }; |
| 1109 | 1109 | ||
| 1110 | DEFINE_PROTO_INUSE(dccp_v6) | ||
| 1111 | |||
| 1110 | static struct proto dccp_v6_prot = { | 1112 | static struct proto dccp_v6_prot = { |
| 1111 | .name = "DCCPv6", | 1113 | .name = "DCCPv6", |
| 1112 | .owner = THIS_MODULE, | 1114 | .owner = THIS_MODULE, |
| @@ -1135,6 +1137,7 @@ static struct proto dccp_v6_prot = { | |||
| 1135 | .compat_setsockopt = compat_dccp_setsockopt, | 1137 | .compat_setsockopt = compat_dccp_setsockopt, |
| 1136 | .compat_getsockopt = compat_dccp_getsockopt, | 1138 | .compat_getsockopt = compat_dccp_getsockopt, |
| 1137 | #endif | 1139 | #endif |
| 1140 | REF_PROTO_INUSE(dccp_v6) | ||
| 1138 | }; | 1141 | }; |
| 1139 | 1142 | ||
| 1140 | static struct inet6_protocol dccp_v6_protocol = { | 1143 | static struct inet6_protocol dccp_v6_protocol = { |
| @@ -1219,8 +1222,8 @@ module_exit(dccp_v6_exit); | |||
| 1219 | * values directly, Also cover the case where the protocol is not specified, | 1222 | * values directly, Also cover the case where the protocol is not specified, |
| 1220 | * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP | 1223 | * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP |
| 1221 | */ | 1224 | */ |
| 1222 | MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6"); | 1225 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6); |
| 1223 | MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6"); | 1226 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6); |
| 1224 | MODULE_LICENSE("GPL"); | 1227 | MODULE_LICENSE("GPL"); |
| 1225 | MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); | 1228 | MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); |
| 1226 | MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol"); | 1229 | MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol"); |
diff --git a/net/dccp/options.c b/net/dccp/options.c index d361b5533309..d286cffe2c49 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/dccp.h> | 14 | #include <linux/dccp.h> |
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 17 | #include <asm/unaligned.h> | ||
| 17 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 18 | #include <linux/skbuff.h> | 19 | #include <linux/skbuff.h> |
| 19 | 20 | ||
| @@ -59,6 +60,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
| 59 | unsigned char opt, len; | 60 | unsigned char opt, len; |
| 60 | unsigned char *value; | 61 | unsigned char *value; |
| 61 | u32 elapsed_time; | 62 | u32 elapsed_time; |
| 63 | __be32 opt_val; | ||
| 62 | int rc; | 64 | int rc; |
| 63 | int mandatory = 0; | 65 | int mandatory = 0; |
| 64 | 66 | ||
| @@ -145,7 +147,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
| 145 | if (len != 4) | 147 | if (len != 4) |
| 146 | goto out_invalid_option; | 148 | goto out_invalid_option; |
| 147 | 149 | ||
| 148 | opt_recv->dccpor_timestamp = ntohl(*(__be32 *)value); | 150 | opt_val = get_unaligned((__be32 *)value); |
| 151 | opt_recv->dccpor_timestamp = ntohl(opt_val); | ||
| 149 | 152 | ||
| 150 | dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; | 153 | dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; |
| 151 | dp->dccps_timestamp_time = ktime_get_real(); | 154 | dp->dccps_timestamp_time = ktime_get_real(); |
| @@ -159,7 +162,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
| 159 | if (len != 4 && len != 6 && len != 8) | 162 | if (len != 4 && len != 6 && len != 8) |
| 160 | goto out_invalid_option; | 163 | goto out_invalid_option; |
| 161 | 164 | ||
| 162 | opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value); | 165 | opt_val = get_unaligned((__be32 *)value); |
| 166 | opt_recv->dccpor_timestamp_echo = ntohl(opt_val); | ||
| 163 | 167 | ||
| 164 | dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, " | 168 | dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, " |
| 165 | "ackno=%llu", dccp_role(sk), | 169 | "ackno=%llu", dccp_role(sk), |
| @@ -168,16 +172,20 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
| 168 | (unsigned long long) | 172 | (unsigned long long) |
| 169 | DCCP_SKB_CB(skb)->dccpd_ack_seq); | 173 | DCCP_SKB_CB(skb)->dccpd_ack_seq); |
| 170 | 174 | ||
| 175 | value += 4; | ||
| 171 | 176 | ||
| 172 | if (len == 4) { | 177 | if (len == 4) { /* no elapsed time included */ |
| 173 | dccp_pr_debug_cat("\n"); | 178 | dccp_pr_debug_cat("\n"); |
| 174 | break; | 179 | break; |
| 175 | } | 180 | } |
| 176 | 181 | ||
| 177 | if (len == 6) | 182 | if (len == 6) { /* 2-byte elapsed time */ |
| 178 | elapsed_time = ntohs(*(__be16 *)(value + 4)); | 183 | __be16 opt_val2 = get_unaligned((__be16 *)value); |
| 179 | else | 184 | elapsed_time = ntohs(opt_val2); |
| 180 | elapsed_time = ntohl(*(__be32 *)(value + 4)); | 185 | } else { /* 4-byte elapsed time */ |
| 186 | opt_val = get_unaligned((__be32 *)value); | ||
| 187 | elapsed_time = ntohl(opt_val); | ||
| 188 | } | ||
| 181 | 189 | ||
| 182 | dccp_pr_debug_cat(", ELAPSED_TIME=%u\n", elapsed_time); | 190 | dccp_pr_debug_cat(", ELAPSED_TIME=%u\n", elapsed_time); |
| 183 | 191 | ||
| @@ -192,10 +200,13 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
| 192 | if (pkt_type == DCCP_PKT_DATA) | 200 | if (pkt_type == DCCP_PKT_DATA) |
| 193 | continue; | 201 | continue; |
| 194 | 202 | ||
| 195 | if (len == 2) | 203 | if (len == 2) { |
| 196 | elapsed_time = ntohs(*(__be16 *)value); | 204 | __be16 opt_val2 = get_unaligned((__be16 *)value); |
| 197 | else | 205 | elapsed_time = ntohs(opt_val2); |
| 198 | elapsed_time = ntohl(*(__be32 *)value); | 206 | } else { |
| 207 | opt_val = get_unaligned((__be32 *)value); | ||
| 208 | elapsed_time = ntohl(opt_val); | ||
| 209 | } | ||
| 199 | 210 | ||
| 200 | if (elapsed_time > opt_recv->dccpor_elapsed_time) | 211 | if (elapsed_time > opt_recv->dccpor_elapsed_time) |
| 201 | opt_recv->dccpor_elapsed_time = elapsed_time; | 212 | opt_recv->dccpor_elapsed_time = elapsed_time; |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index cc9bf1cb2646..7a3bea9c28c1 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <net/sock.h> | 26 | #include <net/sock.h> |
| 27 | #include <net/xfrm.h> | 27 | #include <net/xfrm.h> |
| 28 | 28 | ||
| 29 | #include <asm/ioctls.h> | ||
| 29 | #include <asm/semaphore.h> | 30 | #include <asm/semaphore.h> |
| 30 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
| 31 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
| @@ -378,8 +379,36 @@ EXPORT_SYMBOL_GPL(dccp_poll); | |||
| 378 | 379 | ||
| 379 | int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) | 380 | int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
| 380 | { | 381 | { |
| 381 | dccp_pr_debug("entry\n"); | 382 | int rc = -ENOTCONN; |
| 382 | return -ENOIOCTLCMD; | 383 | |
| 384 | lock_sock(sk); | ||
| 385 | |||
| 386 | if (sk->sk_state == DCCP_LISTEN) | ||
| 387 | goto out; | ||
| 388 | |||
| 389 | switch (cmd) { | ||
| 390 | case SIOCINQ: { | ||
| 391 | struct sk_buff *skb; | ||
| 392 | unsigned long amount = 0; | ||
| 393 | |||
| 394 | skb = skb_peek(&sk->sk_receive_queue); | ||
| 395 | if (skb != NULL) { | ||
| 396 | /* | ||
| 397 | * We will only return the amount of this packet since | ||
| 398 | * that is all that will be read. | ||
| 399 | */ | ||
| 400 | amount = skb->len; | ||
| 401 | } | ||
| 402 | rc = put_user(amount, (int __user *)arg); | ||
| 403 | } | ||
| 404 | break; | ||
| 405 | default: | ||
| 406 | rc = -ENOIOCTLCMD; | ||
| 407 | break; | ||
| 408 | } | ||
| 409 | out: | ||
| 410 | release_sock(sk); | ||
| 411 | return rc; | ||
| 383 | } | 412 | } |
| 384 | 413 | ||
| 385 | EXPORT_SYMBOL_GPL(dccp_ioctl); | 414 | EXPORT_SYMBOL_GPL(dccp_ioctl); |
| @@ -1043,11 +1072,13 @@ static int __init dccp_init(void) | |||
| 1043 | } | 1072 | } |
| 1044 | 1073 | ||
| 1045 | for (i = 0; i < dccp_hashinfo.ehash_size; i++) { | 1074 | for (i = 0; i < dccp_hashinfo.ehash_size; i++) { |
| 1046 | rwlock_init(&dccp_hashinfo.ehash[i].lock); | ||
| 1047 | INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain); | 1075 | INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain); |
| 1048 | INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain); | 1076 | INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain); |
| 1049 | } | 1077 | } |
| 1050 | 1078 | ||
| 1079 | if (inet_ehash_locks_alloc(&dccp_hashinfo)) | ||
| 1080 | goto out_free_dccp_ehash; | ||
| 1081 | |||
| 1051 | bhash_order = ehash_order; | 1082 | bhash_order = ehash_order; |
| 1052 | 1083 | ||
| 1053 | do { | 1084 | do { |
| @@ -1062,7 +1093,7 @@ static int __init dccp_init(void) | |||
| 1062 | 1093 | ||
| 1063 | if (!dccp_hashinfo.bhash) { | 1094 | if (!dccp_hashinfo.bhash) { |
| 1064 | DCCP_CRIT("Failed to allocate DCCP bind hash table"); | 1095 | DCCP_CRIT("Failed to allocate DCCP bind hash table"); |
| 1065 | goto out_free_dccp_ehash; | 1096 | goto out_free_dccp_locks; |
| 1066 | } | 1097 | } |
| 1067 | 1098 | ||
| 1068 | for (i = 0; i < dccp_hashinfo.bhash_size; i++) { | 1099 | for (i = 0; i < dccp_hashinfo.bhash_size; i++) { |
| @@ -1092,6 +1123,8 @@ out_free_dccp_mib: | |||
| 1092 | out_free_dccp_bhash: | 1123 | out_free_dccp_bhash: |
| 1093 | free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); | 1124 | free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); |
| 1094 | dccp_hashinfo.bhash = NULL; | 1125 | dccp_hashinfo.bhash = NULL; |
| 1126 | out_free_dccp_locks: | ||
| 1127 | inet_ehash_locks_free(&dccp_hashinfo); | ||
| 1095 | out_free_dccp_ehash: | 1128 | out_free_dccp_ehash: |
| 1096 | free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); | 1129 | free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); |
| 1097 | dccp_hashinfo.ehash = NULL; | 1130 | dccp_hashinfo.ehash = NULL; |
| @@ -1110,6 +1143,7 @@ static void __exit dccp_fini(void) | |||
| 1110 | free_pages((unsigned long)dccp_hashinfo.ehash, | 1143 | free_pages((unsigned long)dccp_hashinfo.ehash, |
| 1111 | get_order(dccp_hashinfo.ehash_size * | 1144 | get_order(dccp_hashinfo.ehash_size * |
| 1112 | sizeof(struct inet_ehash_bucket))); | 1145 | sizeof(struct inet_ehash_bucket))); |
| 1146 | inet_ehash_locks_free(&dccp_hashinfo); | ||
| 1113 | kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); | 1147 | kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); |
| 1114 | dccp_ackvec_exit(); | 1148 | dccp_ackvec_exit(); |
| 1115 | dccp_sysctl_exit(); | 1149 | dccp_sysctl_exit(); |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index aabe98d9402f..57d574951838 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
| @@ -474,7 +474,7 @@ static struct proto dn_proto = { | |||
| 474 | static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp) | 474 | static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp) |
| 475 | { | 475 | { |
| 476 | struct dn_scp *scp; | 476 | struct dn_scp *scp; |
| 477 | struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, 1); | 477 | struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto); |
| 478 | 478 | ||
| 479 | if (!sk) | 479 | if (!sk) |
| 480 | goto out; | 480 | goto out; |
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 26130afd8029..66e266fb5908 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
| @@ -1439,7 +1439,7 @@ static const struct file_operations dn_dev_seq_fops = { | |||
| 1439 | 1439 | ||
| 1440 | #endif /* CONFIG_PROC_FS */ | 1440 | #endif /* CONFIG_PROC_FS */ |
| 1441 | 1441 | ||
| 1442 | static int __initdata addr[2]; | 1442 | static int addr[2]; |
| 1443 | module_param_array(addr, int, NULL, 0444); | 1443 | module_param_array(addr, int, NULL, 0444); |
| 1444 | MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node"); | 1444 | MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node"); |
| 1445 | 1445 | ||
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 9cae16b4e0b7..f70df073c588 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c | |||
| @@ -624,7 +624,7 @@ static int econet_create(struct net *net, struct socket *sock, int protocol) | |||
| 624 | sock->state = SS_UNCONNECTED; | 624 | sock->state = SS_UNCONNECTED; |
| 625 | 625 | ||
| 626 | err = -ENOBUFS; | 626 | err = -ENOBUFS; |
| 627 | sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto, 1); | 627 | sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto); |
| 628 | if (sk == NULL) | 628 | if (sk == NULL) |
| 629 | goto out; | 629 | goto out; |
| 630 | 630 | ||
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index ed8a3d49487d..6b2e454ae313 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
| @@ -298,6 +298,14 @@ static int eth_change_mtu(struct net_device *dev, int new_mtu) | |||
| 298 | return 0; | 298 | return 0; |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | static int eth_validate_addr(struct net_device *dev) | ||
| 302 | { | ||
| 303 | if (!is_valid_ether_addr(dev->dev_addr)) | ||
| 304 | return -EINVAL; | ||
| 305 | |||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | |||
| 301 | const struct header_ops eth_header_ops ____cacheline_aligned = { | 309 | const struct header_ops eth_header_ops ____cacheline_aligned = { |
| 302 | .create = eth_header, | 310 | .create = eth_header, |
| 303 | .parse = eth_header_parse, | 311 | .parse = eth_header_parse, |
| @@ -317,6 +325,7 @@ void ether_setup(struct net_device *dev) | |||
| 317 | 325 | ||
| 318 | dev->change_mtu = eth_change_mtu; | 326 | dev->change_mtu = eth_change_mtu; |
| 319 | dev->set_mac_address = eth_mac_addr; | 327 | dev->set_mac_address = eth_mac_addr; |
| 328 | dev->validate_addr = eth_validate_addr; | ||
| 320 | 329 | ||
| 321 | dev->type = ARPHRD_ETHER; | 330 | dev->type = ARPHRD_ETHER; |
| 322 | dev->hard_header_len = ETH_HLEN; | 331 | dev->hard_header_len = ETH_HLEN; |
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 0936a3e0210b..c6d760d9fbbe 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | #include <net/ieee80211.h> | 25 | #include <net/ieee80211.h> |
| 26 | 26 | ||
| 27 | #include <linux/crypto.h> | 27 | #include <linux/crypto.h> |
| 28 | #include <asm/scatterlist.h> | ||
| 29 | 28 | ||
| 30 | MODULE_AUTHOR("Jouni Malinen"); | 29 | MODULE_AUTHOR("Jouni Malinen"); |
| 31 | MODULE_DESCRIPTION("Host AP crypt: CCMP"); | 30 | MODULE_DESCRIPTION("Host AP crypt: CCMP"); |
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 72e6ab66834f..58b22619ab15 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/random.h> | 16 | #include <linux/random.h> |
| 17 | #include <linux/scatterlist.h> | ||
| 17 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
| 18 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
| 19 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
| @@ -24,7 +25,6 @@ | |||
| 24 | #include <net/ieee80211.h> | 25 | #include <net/ieee80211.h> |
| 25 | 26 | ||
| 26 | #include <linux/crypto.h> | 27 | #include <linux/crypto.h> |
| 27 | #include <asm/scatterlist.h> | ||
| 28 | #include <linux/crc32.h> | 28 | #include <linux/crc32.h> |
| 29 | 29 | ||
| 30 | MODULE_AUTHOR("Jouni Malinen"); | 30 | MODULE_AUTHOR("Jouni Malinen"); |
| @@ -390,9 +390,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
| 390 | icv[3] = crc >> 24; | 390 | icv[3] = crc >> 24; |
| 391 | 391 | ||
| 392 | crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); | 392 | crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); |
| 393 | sg.page = virt_to_page(pos); | 393 | sg_init_one(&sg, pos, len + 4); |
| 394 | sg.offset = offset_in_page(pos); | ||
| 395 | sg.length = len + 4; | ||
| 396 | return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); | 394 | return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); |
| 397 | } | 395 | } |
| 398 | 396 | ||
| @@ -485,9 +483,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
| 485 | plen = skb->len - hdr_len - 12; | 483 | plen = skb->len - hdr_len - 12; |
| 486 | 484 | ||
| 487 | crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); | 485 | crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); |
| 488 | sg.page = virt_to_page(pos); | 486 | sg_init_one(&sg, pos, plen + 4); |
| 489 | sg.offset = offset_in_page(pos); | ||
| 490 | sg.length = plen + 4; | ||
| 491 | if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { | 487 | if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { |
| 492 | if (net_ratelimit()) { | 488 | if (net_ratelimit()) { |
| 493 | printk(KERN_DEBUG ": TKIP: failed to decrypt " | 489 | printk(KERN_DEBUG ": TKIP: failed to decrypt " |
| @@ -539,13 +535,9 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr, | |||
| 539 | printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); | 535 | printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); |
| 540 | return -1; | 536 | return -1; |
| 541 | } | 537 | } |
| 542 | sg[0].page = virt_to_page(hdr); | 538 | sg_init_table(sg, 2); |
| 543 | sg[0].offset = offset_in_page(hdr); | 539 | sg_set_buf(&sg[0], hdr, 16); |
| 544 | sg[0].length = 16; | 540 | sg_set_buf(&sg[1], data, data_len); |
| 545 | |||
| 546 | sg[1].page = virt_to_page(data); | ||
| 547 | sg[1].offset = offset_in_page(data); | ||
| 548 | sg[1].length = data_len; | ||
| 549 | 541 | ||
| 550 | if (crypto_hash_setkey(tfm_michael, key, 8)) | 542 | if (crypto_hash_setkey(tfm_michael, key, 8)) |
| 551 | return -1; | 543 | return -1; |
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index 8d182459344e..3fa30c40779f 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/random.h> | 16 | #include <linux/random.h> |
| 17 | #include <linux/scatterlist.h> | ||
| 17 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
| 18 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
| 19 | #include <asm/string.h> | 20 | #include <asm/string.h> |
| @@ -21,7 +22,6 @@ | |||
| 21 | #include <net/ieee80211.h> | 22 | #include <net/ieee80211.h> |
| 22 | 23 | ||
| 23 | #include <linux/crypto.h> | 24 | #include <linux/crypto.h> |
| 24 | #include <asm/scatterlist.h> | ||
| 25 | #include <linux/crc32.h> | 25 | #include <linux/crc32.h> |
| 26 | 26 | ||
| 27 | MODULE_AUTHOR("Jouni Malinen"); | 27 | MODULE_AUTHOR("Jouni Malinen"); |
| @@ -170,9 +170,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
| 170 | icv[3] = crc >> 24; | 170 | icv[3] = crc >> 24; |
| 171 | 171 | ||
| 172 | crypto_blkcipher_setkey(wep->tx_tfm, key, klen); | 172 | crypto_blkcipher_setkey(wep->tx_tfm, key, klen); |
| 173 | sg.page = virt_to_page(pos); | 173 | sg_init_one(&sg, pos, len + 4); |
| 174 | sg.offset = offset_in_page(pos); | ||
| 175 | sg.length = len + 4; | ||
| 176 | return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); | 174 | return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); |
| 177 | } | 175 | } |
| 178 | 176 | ||
| @@ -212,9 +210,7 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
| 212 | plen = skb->len - hdr_len - 8; | 210 | plen = skb->len - hdr_len - 8; |
| 213 | 211 | ||
| 214 | crypto_blkcipher_setkey(wep->rx_tfm, key, klen); | 212 | crypto_blkcipher_setkey(wep->rx_tfm, key, klen); |
| 215 | sg.page = virt_to_page(pos); | 213 | sg_init_one(&sg, pos, plen + 4); |
| 216 | sg.offset = offset_in_page(pos); | ||
| 217 | sg.length = plen + 4; | ||
| 218 | if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) | 214 | if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) |
| 219 | return -7; | 215 | return -7; |
| 220 | 216 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 621b128897d7..d2f22e74b267 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -323,7 +323,7 @@ lookup_protocol: | |||
| 323 | BUG_TRAP(answer_prot->slab != NULL); | 323 | BUG_TRAP(answer_prot->slab != NULL); |
| 324 | 324 | ||
| 325 | err = -ENOBUFS; | 325 | err = -ENOBUFS; |
| 326 | sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, 1); | 326 | sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); |
| 327 | if (sk == NULL) | 327 | if (sk == NULL) |
| 328 | goto out; | 328 | goto out; |
| 329 | 329 | ||
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 4e8e3b079f5b..5fc346d8b566 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
| 9 | #include <net/icmp.h> | 9 | #include <net/icmp.h> |
| 10 | #include <net/protocol.h> | 10 | #include <net/protocol.h> |
| 11 | #include <asm/scatterlist.h> | ||
| 12 | 11 | ||
| 13 | 12 | ||
| 14 | /* Clear mutable options and find final destination to substitute | 13 | /* Clear mutable options and find final destination to substitute |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 805a78e6ed55..f18e88bc86ec 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
| @@ -504,22 +504,16 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def) | |||
| 504 | INIT_RCU_HEAD(&doi_def->rcu); | 504 | INIT_RCU_HEAD(&doi_def->rcu); |
| 505 | INIT_LIST_HEAD(&doi_def->dom_list); | 505 | INIT_LIST_HEAD(&doi_def->dom_list); |
| 506 | 506 | ||
| 507 | rcu_read_lock(); | ||
| 508 | if (cipso_v4_doi_search(doi_def->doi) != NULL) | ||
| 509 | goto doi_add_failure_rlock; | ||
| 510 | spin_lock(&cipso_v4_doi_list_lock); | 507 | spin_lock(&cipso_v4_doi_list_lock); |
| 511 | if (cipso_v4_doi_search(doi_def->doi) != NULL) | 508 | if (cipso_v4_doi_search(doi_def->doi) != NULL) |
| 512 | goto doi_add_failure_slock; | 509 | goto doi_add_failure; |
| 513 | list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); | 510 | list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); |
| 514 | spin_unlock(&cipso_v4_doi_list_lock); | 511 | spin_unlock(&cipso_v4_doi_list_lock); |
| 515 | rcu_read_unlock(); | ||
| 516 | 512 | ||
| 517 | return 0; | 513 | return 0; |
| 518 | 514 | ||
| 519 | doi_add_failure_slock: | 515 | doi_add_failure: |
| 520 | spin_unlock(&cipso_v4_doi_list_lock); | 516 | spin_unlock(&cipso_v4_doi_list_lock); |
| 521 | doi_add_failure_rlock: | ||
| 522 | rcu_read_unlock(); | ||
| 523 | return -EEXIST; | 517 | return -EEXIST; |
| 524 | } | 518 | } |
| 525 | 519 | ||
| @@ -543,29 +537,23 @@ int cipso_v4_doi_remove(u32 doi, | |||
| 543 | struct cipso_v4_doi *doi_def; | 537 | struct cipso_v4_doi *doi_def; |
| 544 | struct cipso_v4_domhsh_entry *dom_iter; | 538 | struct cipso_v4_domhsh_entry *dom_iter; |
| 545 | 539 | ||
| 546 | rcu_read_lock(); | 540 | spin_lock(&cipso_v4_doi_list_lock); |
| 547 | if (cipso_v4_doi_search(doi) != NULL) { | 541 | doi_def = cipso_v4_doi_search(doi); |
| 548 | spin_lock(&cipso_v4_doi_list_lock); | 542 | if (doi_def != NULL) { |
| 549 | doi_def = cipso_v4_doi_search(doi); | ||
| 550 | if (doi_def == NULL) { | ||
| 551 | spin_unlock(&cipso_v4_doi_list_lock); | ||
| 552 | rcu_read_unlock(); | ||
| 553 | return -ENOENT; | ||
| 554 | } | ||
| 555 | doi_def->valid = 0; | 543 | doi_def->valid = 0; |
| 556 | list_del_rcu(&doi_def->list); | 544 | list_del_rcu(&doi_def->list); |
| 557 | spin_unlock(&cipso_v4_doi_list_lock); | 545 | spin_unlock(&cipso_v4_doi_list_lock); |
| 546 | rcu_read_lock(); | ||
| 558 | list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list) | 547 | list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list) |
| 559 | if (dom_iter->valid) | 548 | if (dom_iter->valid) |
| 560 | netlbl_domhsh_remove(dom_iter->domain, | 549 | netlbl_domhsh_remove(dom_iter->domain, |
| 561 | audit_info); | 550 | audit_info); |
| 562 | cipso_v4_cache_invalidate(); | ||
| 563 | rcu_read_unlock(); | 551 | rcu_read_unlock(); |
| 564 | 552 | cipso_v4_cache_invalidate(); | |
| 565 | call_rcu(&doi_def->rcu, callback); | 553 | call_rcu(&doi_def->rcu, callback); |
| 566 | return 0; | 554 | return 0; |
| 567 | } | 555 | } |
| 568 | rcu_read_unlock(); | 556 | spin_unlock(&cipso_v4_doi_list_lock); |
| 569 | 557 | ||
| 570 | return -ENOENT; | 558 | return -ENOENT; |
| 571 | } | 559 | } |
| @@ -653,22 +641,19 @@ int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def, const char *domain) | |||
| 653 | new_dom->valid = 1; | 641 | new_dom->valid = 1; |
| 654 | INIT_RCU_HEAD(&new_dom->rcu); | 642 | INIT_RCU_HEAD(&new_dom->rcu); |
| 655 | 643 | ||
| 656 | rcu_read_lock(); | ||
| 657 | spin_lock(&cipso_v4_doi_list_lock); | 644 | spin_lock(&cipso_v4_doi_list_lock); |
| 658 | list_for_each_entry_rcu(iter, &doi_def->dom_list, list) | 645 | list_for_each_entry(iter, &doi_def->dom_list, list) |
| 659 | if (iter->valid && | 646 | if (iter->valid && |
| 660 | ((domain != NULL && iter->domain != NULL && | 647 | ((domain != NULL && iter->domain != NULL && |
| 661 | strcmp(iter->domain, domain) == 0) || | 648 | strcmp(iter->domain, domain) == 0) || |
| 662 | (domain == NULL && iter->domain == NULL))) { | 649 | (domain == NULL && iter->domain == NULL))) { |
| 663 | spin_unlock(&cipso_v4_doi_list_lock); | 650 | spin_unlock(&cipso_v4_doi_list_lock); |
| 664 | rcu_read_unlock(); | ||
| 665 | kfree(new_dom->domain); | 651 | kfree(new_dom->domain); |
| 666 | kfree(new_dom); | 652 | kfree(new_dom); |
| 667 | return -EEXIST; | 653 | return -EEXIST; |
| 668 | } | 654 | } |
| 669 | list_add_tail_rcu(&new_dom->list, &doi_def->dom_list); | 655 | list_add_tail_rcu(&new_dom->list, &doi_def->dom_list); |
| 670 | spin_unlock(&cipso_v4_doi_list_lock); | 656 | spin_unlock(&cipso_v4_doi_list_lock); |
| 671 | rcu_read_unlock(); | ||
| 672 | 657 | ||
| 673 | return 0; | 658 | return 0; |
| 674 | } | 659 | } |
| @@ -689,9 +674,8 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def, | |||
| 689 | { | 674 | { |
| 690 | struct cipso_v4_domhsh_entry *iter; | 675 | struct cipso_v4_domhsh_entry *iter; |
| 691 | 676 | ||
| 692 | rcu_read_lock(); | ||
| 693 | spin_lock(&cipso_v4_doi_list_lock); | 677 | spin_lock(&cipso_v4_doi_list_lock); |
| 694 | list_for_each_entry_rcu(iter, &doi_def->dom_list, list) | 678 | list_for_each_entry(iter, &doi_def->dom_list, list) |
| 695 | if (iter->valid && | 679 | if (iter->valid && |
| 696 | ((domain != NULL && iter->domain != NULL && | 680 | ((domain != NULL && iter->domain != NULL && |
| 697 | strcmp(iter->domain, domain) == 0) || | 681 | strcmp(iter->domain, domain) == 0) || |
| @@ -699,13 +683,10 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def, | |||
| 699 | iter->valid = 0; | 683 | iter->valid = 0; |
| 700 | list_del_rcu(&iter->list); | 684 | list_del_rcu(&iter->list); |
| 701 | spin_unlock(&cipso_v4_doi_list_lock); | 685 | spin_unlock(&cipso_v4_doi_list_lock); |
| 702 | rcu_read_unlock(); | ||
| 703 | call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free); | 686 | call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free); |
| 704 | |||
| 705 | return 0; | 687 | return 0; |
| 706 | } | 688 | } |
| 707 | spin_unlock(&cipso_v4_doi_list_lock); | 689 | spin_unlock(&cipso_v4_doi_list_lock); |
| 708 | rcu_read_unlock(); | ||
| 709 | 690 | ||
| 710 | return -ENOENT; | 691 | return -ENOENT; |
| 711 | } | 692 | } |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 6b1a31a74cf2..c31bccb9b526 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | #include <net/ip.h> | 3 | #include <net/ip.h> |
| 4 | #include <net/xfrm.h> | 4 | #include <net/xfrm.h> |
| 5 | #include <net/esp.h> | 5 | #include <net/esp.h> |
| 6 | #include <asm/scatterlist.h> | 6 | #include <linux/scatterlist.h> |
| 7 | #include <linux/crypto.h> | 7 | #include <linux/crypto.h> |
| 8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
| 9 | #include <linux/pfkeyv2.h> | 9 | #include <linux/pfkeyv2.h> |
| @@ -110,7 +110,11 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
| 110 | if (!sg) | 110 | if (!sg) |
| 111 | goto unlock; | 111 | goto unlock; |
| 112 | } | 112 | } |
| 113 | skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); | 113 | sg_init_table(sg, nfrags); |
| 114 | skb_to_sgvec(skb, sg, | ||
| 115 | esph->enc_data + | ||
| 116 | esp->conf.ivlen - | ||
| 117 | skb->data, clen); | ||
| 114 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); | 118 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); |
| 115 | if (unlikely(sg != &esp->sgbuf[0])) | 119 | if (unlikely(sg != &esp->sgbuf[0])) |
| 116 | kfree(sg); | 120 | kfree(sg); |
| @@ -201,7 +205,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
| 201 | if (!sg) | 205 | if (!sg) |
| 202 | goto out; | 206 | goto out; |
| 203 | } | 207 | } |
| 204 | skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); | 208 | sg_init_table(sg, nfrags); |
| 209 | skb_to_sgvec(skb, sg, | ||
| 210 | sizeof(*esph) + esp->conf.ivlen, | ||
| 211 | elen); | ||
| 205 | err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); | 212 | err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); |
| 206 | if (unlikely(sg != &esp->sgbuf[0])) | 213 | if (unlikely(sg != &esp->sgbuf[0])) |
| 207 | kfree(sg); | 214 | kfree(sg); |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 78b514ba1414..732d8f088b13 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -59,6 +59,13 @@ struct fib_table *ip_fib_main_table; | |||
| 59 | #define FIB_TABLE_HASHSZ 1 | 59 | #define FIB_TABLE_HASHSZ 1 |
| 60 | static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ]; | 60 | static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ]; |
| 61 | 61 | ||
| 62 | static void __init fib4_rules_init(void) | ||
| 63 | { | ||
| 64 | ip_fib_local_table = fib_hash_init(RT_TABLE_LOCAL); | ||
| 65 | hlist_add_head_rcu(&ip_fib_local_table->tb_hlist, &fib_table_hash[0]); | ||
| 66 | ip_fib_main_table = fib_hash_init(RT_TABLE_MAIN); | ||
| 67 | hlist_add_head_rcu(&ip_fib_main_table->tb_hlist, &fib_table_hash[0]); | ||
| 68 | } | ||
| 62 | #else | 69 | #else |
| 63 | 70 | ||
| 64 | #define FIB_TABLE_HASHSZ 256 | 71 | #define FIB_TABLE_HASHSZ 256 |
| @@ -128,13 +135,14 @@ struct net_device * ip_dev_find(__be32 addr) | |||
| 128 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; | 135 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; |
| 129 | struct fib_result res; | 136 | struct fib_result res; |
| 130 | struct net_device *dev = NULL; | 137 | struct net_device *dev = NULL; |
| 138 | struct fib_table *local_table; | ||
| 131 | 139 | ||
| 132 | #ifdef CONFIG_IP_MULTIPLE_TABLES | 140 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
| 133 | res.r = NULL; | 141 | res.r = NULL; |
| 134 | #endif | 142 | #endif |
| 135 | 143 | ||
| 136 | if (!ip_fib_local_table || | 144 | local_table = fib_get_table(RT_TABLE_LOCAL); |
| 137 | ip_fib_local_table->tb_lookup(ip_fib_local_table, &fl, &res)) | 145 | if (!local_table || local_table->tb_lookup(local_table, &fl, &res)) |
| 138 | return NULL; | 146 | return NULL; |
| 139 | if (res.type != RTN_LOCAL) | 147 | if (res.type != RTN_LOCAL) |
| 140 | goto out; | 148 | goto out; |
| @@ -152,6 +160,7 @@ unsigned inet_addr_type(__be32 addr) | |||
| 152 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; | 160 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; |
| 153 | struct fib_result res; | 161 | struct fib_result res; |
| 154 | unsigned ret = RTN_BROADCAST; | 162 | unsigned ret = RTN_BROADCAST; |
| 163 | struct fib_table *local_table; | ||
| 155 | 164 | ||
| 156 | if (ZERONET(addr) || BADCLASS(addr)) | 165 | if (ZERONET(addr) || BADCLASS(addr)) |
| 157 | return RTN_BROADCAST; | 166 | return RTN_BROADCAST; |
| @@ -162,10 +171,10 @@ unsigned inet_addr_type(__be32 addr) | |||
| 162 | res.r = NULL; | 171 | res.r = NULL; |
| 163 | #endif | 172 | #endif |
| 164 | 173 | ||
| 165 | if (ip_fib_local_table) { | 174 | local_table = fib_get_table(RT_TABLE_LOCAL); |
| 175 | if (local_table) { | ||
| 166 | ret = RTN_UNICAST; | 176 | ret = RTN_UNICAST; |
| 167 | if (!ip_fib_local_table->tb_lookup(ip_fib_local_table, | 177 | if (!local_table->tb_lookup(local_table, &fl, &res)) { |
| 168 | &fl, &res)) { | ||
| 169 | ret = res.type; | 178 | ret = res.type; |
| 170 | fib_res_put(&res); | 179 | fib_res_put(&res); |
| 171 | } | 180 | } |
| @@ -903,14 +912,8 @@ void __init ip_fib_init(void) | |||
| 903 | 912 | ||
| 904 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) | 913 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) |
| 905 | INIT_HLIST_HEAD(&fib_table_hash[i]); | 914 | INIT_HLIST_HEAD(&fib_table_hash[i]); |
| 906 | #ifndef CONFIG_IP_MULTIPLE_TABLES | 915 | |
| 907 | ip_fib_local_table = fib_hash_init(RT_TABLE_LOCAL); | ||
| 908 | hlist_add_head_rcu(&ip_fib_local_table->tb_hlist, &fib_table_hash[0]); | ||
| 909 | ip_fib_main_table = fib_hash_init(RT_TABLE_MAIN); | ||
| 910 | hlist_add_head_rcu(&ip_fib_main_table->tb_hlist, &fib_table_hash[0]); | ||
| 911 | #else | ||
| 912 | fib4_rules_init(); | 916 | fib4_rules_init(); |
| 913 | #endif | ||
| 914 | 917 | ||
| 915 | register_netdevice_notifier(&fib_netdev_notifier); | 918 | register_netdevice_notifier(&fib_netdev_notifier); |
| 916 | register_inetaddr_notifier(&fib_inetaddr_notifier); | 919 | register_inetaddr_notifier(&fib_inetaddr_notifier); |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 272c69e106e9..233de0634298 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
| @@ -1104,5 +1104,4 @@ void __init icmp_init(struct net_proto_family *ops) | |||
| 1104 | EXPORT_SYMBOL(icmp_err_convert); | 1104 | EXPORT_SYMBOL(icmp_err_convert); |
| 1105 | EXPORT_SYMBOL(icmp_send); | 1105 | EXPORT_SYMBOL(icmp_send); |
| 1106 | EXPORT_SYMBOL(icmp_statistics); | 1106 | EXPORT_SYMBOL(icmp_statistics); |
| 1107 | EXPORT_SYMBOL(icmpmsg_statistics); | ||
| 1108 | EXPORT_SYMBOL(xrlim_allow); | 1107 | EXPORT_SYMBOL(xrlim_allow); |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 7eb83ebed2ec..b0170732b5e9 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
| @@ -747,13 +747,14 @@ skip_listen_ht: | |||
| 747 | 747 | ||
| 748 | for (i = s_i; i < hashinfo->ehash_size; i++) { | 748 | for (i = s_i; i < hashinfo->ehash_size; i++) { |
| 749 | struct inet_ehash_bucket *head = &hashinfo->ehash[i]; | 749 | struct inet_ehash_bucket *head = &hashinfo->ehash[i]; |
| 750 | rwlock_t *lock = inet_ehash_lockp(hashinfo, i); | ||
| 750 | struct sock *sk; | 751 | struct sock *sk; |
| 751 | struct hlist_node *node; | 752 | struct hlist_node *node; |
| 752 | 753 | ||
| 753 | if (i > s_i) | 754 | if (i > s_i) |
| 754 | s_num = 0; | 755 | s_num = 0; |
| 755 | 756 | ||
| 756 | read_lock_bh(&head->lock); | 757 | read_lock_bh(lock); |
| 757 | num = 0; | 758 | num = 0; |
| 758 | sk_for_each(sk, node, &head->chain) { | 759 | sk_for_each(sk, node, &head->chain) { |
| 759 | struct inet_sock *inet = inet_sk(sk); | 760 | struct inet_sock *inet = inet_sk(sk); |
| @@ -769,7 +770,7 @@ skip_listen_ht: | |||
| 769 | r->id.idiag_dport) | 770 | r->id.idiag_dport) |
| 770 | goto next_normal; | 771 | goto next_normal; |
| 771 | if (inet_csk_diag_dump(sk, skb, cb) < 0) { | 772 | if (inet_csk_diag_dump(sk, skb, cb) < 0) { |
| 772 | read_unlock_bh(&head->lock); | 773 | read_unlock_bh(lock); |
| 773 | goto done; | 774 | goto done; |
| 774 | } | 775 | } |
| 775 | next_normal: | 776 | next_normal: |
| @@ -791,14 +792,14 @@ next_normal: | |||
| 791 | r->id.idiag_dport) | 792 | r->id.idiag_dport) |
| 792 | goto next_dying; | 793 | goto next_dying; |
| 793 | if (inet_twsk_diag_dump(tw, skb, cb) < 0) { | 794 | if (inet_twsk_diag_dump(tw, skb, cb) < 0) { |
| 794 | read_unlock_bh(&head->lock); | 795 | read_unlock_bh(lock); |
| 795 | goto done; | 796 | goto done; |
| 796 | } | 797 | } |
| 797 | next_dying: | 798 | next_dying: |
| 798 | ++num; | 799 | ++num; |
| 799 | } | 800 | } |
| 800 | } | 801 | } |
| 801 | read_unlock_bh(&head->lock); | 802 | read_unlock_bh(lock); |
| 802 | } | 803 | } |
| 803 | 804 | ||
| 804 | done: | 805 | done: |
| @@ -815,6 +816,12 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 815 | nlmsg_len(nlh) < hdrlen) | 816 | nlmsg_len(nlh) < hdrlen) |
| 816 | return -EINVAL; | 817 | return -EINVAL; |
| 817 | 818 | ||
| 819 | #ifdef CONFIG_KMOD | ||
| 820 | if (inet_diag_table[nlh->nlmsg_type] == NULL) | ||
| 821 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | ||
| 822 | NETLINK_INET_DIAG, nlh->nlmsg_type); | ||
| 823 | #endif | ||
| 824 | |||
| 818 | if (inet_diag_table[nlh->nlmsg_type] == NULL) | 825 | if (inet_diag_table[nlh->nlmsg_type] == NULL) |
| 819 | return -ENOENT; | 826 | return -ENOENT; |
| 820 | 827 | ||
| @@ -914,3 +921,4 @@ static void __exit inet_diag_exit(void) | |||
| 914 | module_init(inet_diag_init); | 921 | module_init(inet_diag_init); |
| 915 | module_exit(inet_diag_exit); | 922 | module_exit(inet_diag_exit); |
| 916 | MODULE_LICENSE("GPL"); | 923 | MODULE_LICENSE("GPL"); |
| 924 | MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG); | ||
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 16eecc7046a3..67704da04fc4 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
| @@ -204,12 +204,13 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, | |||
| 204 | const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); | 204 | const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); |
| 205 | unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); | 205 | unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); |
| 206 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); | 206 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); |
| 207 | rwlock_t *lock = inet_ehash_lockp(hinfo, hash); | ||
| 207 | struct sock *sk2; | 208 | struct sock *sk2; |
| 208 | const struct hlist_node *node; | 209 | const struct hlist_node *node; |
| 209 | struct inet_timewait_sock *tw; | 210 | struct inet_timewait_sock *tw; |
| 210 | 211 | ||
| 211 | prefetch(head->chain.first); | 212 | prefetch(head->chain.first); |
| 212 | write_lock(&head->lock); | 213 | write_lock(lock); |
| 213 | 214 | ||
| 214 | /* Check TIME-WAIT sockets first. */ | 215 | /* Check TIME-WAIT sockets first. */ |
| 215 | sk_for_each(sk2, node, &head->twchain) { | 216 | sk_for_each(sk2, node, &head->twchain) { |
| @@ -239,7 +240,7 @@ unique: | |||
| 239 | BUG_TRAP(sk_unhashed(sk)); | 240 | BUG_TRAP(sk_unhashed(sk)); |
| 240 | __sk_add_node(sk, &head->chain); | 241 | __sk_add_node(sk, &head->chain); |
| 241 | sock_prot_inc_use(sk->sk_prot); | 242 | sock_prot_inc_use(sk->sk_prot); |
| 242 | write_unlock(&head->lock); | 243 | write_unlock(lock); |
| 243 | 244 | ||
| 244 | if (twp) { | 245 | if (twp) { |
| 245 | *twp = tw; | 246 | *twp = tw; |
| @@ -255,7 +256,7 @@ unique: | |||
| 255 | return 0; | 256 | return 0; |
| 256 | 257 | ||
| 257 | not_unique: | 258 | not_unique: |
| 258 | write_unlock(&head->lock); | 259 | write_unlock(lock); |
| 259 | return -EADDRNOTAVAIL; | 260 | return -EADDRNOTAVAIL; |
| 260 | } | 261 | } |
| 261 | 262 | ||
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 4e189e28f306..a60b99e0ebdc 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
| @@ -20,16 +20,16 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, | |||
| 20 | struct inet_bind_hashbucket *bhead; | 20 | struct inet_bind_hashbucket *bhead; |
| 21 | struct inet_bind_bucket *tb; | 21 | struct inet_bind_bucket *tb; |
| 22 | /* Unlink from established hashes. */ | 22 | /* Unlink from established hashes. */ |
| 23 | struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash); | 23 | rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); |
| 24 | 24 | ||
| 25 | write_lock(&ehead->lock); | 25 | write_lock(lock); |
| 26 | if (hlist_unhashed(&tw->tw_node)) { | 26 | if (hlist_unhashed(&tw->tw_node)) { |
| 27 | write_unlock(&ehead->lock); | 27 | write_unlock(lock); |
| 28 | return; | 28 | return; |
| 29 | } | 29 | } |
| 30 | __hlist_del(&tw->tw_node); | 30 | __hlist_del(&tw->tw_node); |
| 31 | sk_node_init(&tw->tw_node); | 31 | sk_node_init(&tw->tw_node); |
| 32 | write_unlock(&ehead->lock); | 32 | write_unlock(lock); |
| 33 | 33 | ||
| 34 | /* Disassociate with bind bucket. */ | 34 | /* Disassociate with bind bucket. */ |
| 35 | bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)]; | 35 | bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)]; |
| @@ -59,6 +59,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
| 59 | const struct inet_sock *inet = inet_sk(sk); | 59 | const struct inet_sock *inet = inet_sk(sk); |
| 60 | const struct inet_connection_sock *icsk = inet_csk(sk); | 60 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 61 | struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); | 61 | struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); |
| 62 | rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); | ||
| 62 | struct inet_bind_hashbucket *bhead; | 63 | struct inet_bind_hashbucket *bhead; |
| 63 | /* Step 1: Put TW into bind hash. Original socket stays there too. | 64 | /* Step 1: Put TW into bind hash. Original socket stays there too. |
| 64 | Note, that any socket with inet->num != 0 MUST be bound in | 65 | Note, that any socket with inet->num != 0 MUST be bound in |
| @@ -71,7 +72,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
| 71 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); | 72 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); |
| 72 | spin_unlock(&bhead->lock); | 73 | spin_unlock(&bhead->lock); |
| 73 | 74 | ||
| 74 | write_lock(&ehead->lock); | 75 | write_lock(lock); |
| 75 | 76 | ||
| 76 | /* Step 2: Remove SK from established hash. */ | 77 | /* Step 2: Remove SK from established hash. */ |
| 77 | if (__sk_del_node_init(sk)) | 78 | if (__sk_del_node_init(sk)) |
| @@ -81,7 +82,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
| 81 | inet_twsk_add_node(tw, &ehead->twchain); | 82 | inet_twsk_add_node(tw, &ehead->twchain); |
| 82 | atomic_inc(&tw->tw_refcnt); | 83 | atomic_inc(&tw->tw_refcnt); |
| 83 | 84 | ||
| 84 | write_unlock(&ehead->lock); | 85 | write_unlock(lock); |
| 85 | } | 86 | } |
| 86 | 87 | ||
| 87 | EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); | 88 | EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index f151900efaf9..02b02a8d681c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -674,7 +674,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 674 | struct rtable *rt; /* Route to the other host */ | 674 | struct rtable *rt; /* Route to the other host */ |
| 675 | struct net_device *tdev; /* Device to other host */ | 675 | struct net_device *tdev; /* Device to other host */ |
| 676 | struct iphdr *iph; /* Our new IP header */ | 676 | struct iphdr *iph; /* Our new IP header */ |
| 677 | int max_headroom; /* The extra header space needed */ | 677 | unsigned int max_headroom; /* The extra header space needed */ |
| 678 | int gre_hlen; | 678 | int gre_hlen; |
| 679 | __be32 dst; | 679 | __be32 dst; |
| 680 | int mtu; | 680 | int mtu; |
| @@ -1033,7 +1033,6 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1033 | return 0; | 1033 | return 0; |
| 1034 | } | 1034 | } |
| 1035 | 1035 | ||
| 1036 | #ifdef CONFIG_NET_IPGRE_BROADCAST | ||
| 1037 | /* Nice toy. Unfortunately, useless in real life :-) | 1036 | /* Nice toy. Unfortunately, useless in real life :-) |
| 1038 | It allows to construct virtual multiprotocol broadcast "LAN" | 1037 | It allows to construct virtual multiprotocol broadcast "LAN" |
| 1039 | over the Internet, provided multicast routing is tuned. | 1038 | over the Internet, provided multicast routing is tuned. |
| @@ -1092,10 +1091,19 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, | |||
| 1092 | return -t->hlen; | 1091 | return -t->hlen; |
| 1093 | } | 1092 | } |
| 1094 | 1093 | ||
| 1094 | static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) | ||
| 1095 | { | ||
| 1096 | struct iphdr *iph = (struct iphdr*) skb_mac_header(skb); | ||
| 1097 | memcpy(haddr, &iph->saddr, 4); | ||
| 1098 | return 4; | ||
| 1099 | } | ||
| 1100 | |||
| 1095 | static const struct header_ops ipgre_header_ops = { | 1101 | static const struct header_ops ipgre_header_ops = { |
| 1096 | .create = ipgre_header, | 1102 | .create = ipgre_header, |
| 1103 | .parse = ipgre_header_parse, | ||
| 1097 | }; | 1104 | }; |
| 1098 | 1105 | ||
| 1106 | #ifdef CONFIG_NET_IPGRE_BROADCAST | ||
| 1099 | static int ipgre_open(struct net_device *dev) | 1107 | static int ipgre_open(struct net_device *dev) |
| 1100 | { | 1108 | { |
| 1101 | struct ip_tunnel *t = netdev_priv(dev); | 1109 | struct ip_tunnel *t = netdev_priv(dev); |
| @@ -1197,6 +1205,8 @@ static int ipgre_tunnel_init(struct net_device *dev) | |||
| 1197 | dev->stop = ipgre_close; | 1205 | dev->stop = ipgre_close; |
| 1198 | } | 1206 | } |
| 1199 | #endif | 1207 | #endif |
| 1208 | } else { | ||
| 1209 | dev->header_ops = &ipgre_header_ops; | ||
| 1200 | } | 1210 | } |
| 1201 | 1211 | ||
| 1202 | if (!tdev && tunnel->parms.link) | 1212 | if (!tdev && tunnel->parms.link) |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index f508835ba713..fd99fbd685ea 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -161,7 +161,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
| 161 | struct dst_entry *dst = skb->dst; | 161 | struct dst_entry *dst = skb->dst; |
| 162 | struct rtable *rt = (struct rtable *)dst; | 162 | struct rtable *rt = (struct rtable *)dst; |
| 163 | struct net_device *dev = dst->dev; | 163 | struct net_device *dev = dst->dev; |
| 164 | int hh_len = LL_RESERVED_SPACE(dev); | 164 | unsigned int hh_len = LL_RESERVED_SPACE(dev); |
| 165 | 165 | ||
| 166 | if (rt->rt_type == RTN_MULTICAST) | 166 | if (rt->rt_type == RTN_MULTICAST) |
| 167 | IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); | 167 | IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); |
| @@ -1183,6 +1183,17 @@ error: | |||
| 1183 | return err; | 1183 | return err; |
| 1184 | } | 1184 | } |
| 1185 | 1185 | ||
| 1186 | static void ip_cork_release(struct inet_sock *inet) | ||
| 1187 | { | ||
| 1188 | inet->cork.flags &= ~IPCORK_OPT; | ||
| 1189 | kfree(inet->cork.opt); | ||
| 1190 | inet->cork.opt = NULL; | ||
| 1191 | if (inet->cork.rt) { | ||
| 1192 | ip_rt_put(inet->cork.rt); | ||
| 1193 | inet->cork.rt = NULL; | ||
| 1194 | } | ||
| 1195 | } | ||
| 1196 | |||
| 1186 | /* | 1197 | /* |
| 1187 | * Combined all pending IP fragments on the socket as one IP datagram | 1198 | * Combined all pending IP fragments on the socket as one IP datagram |
| 1188 | * and push them out. | 1199 | * and push them out. |
| @@ -1276,13 +1287,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
| 1276 | } | 1287 | } |
| 1277 | 1288 | ||
| 1278 | out: | 1289 | out: |
| 1279 | inet->cork.flags &= ~IPCORK_OPT; | 1290 | ip_cork_release(inet); |
| 1280 | kfree(inet->cork.opt); | ||
| 1281 | inet->cork.opt = NULL; | ||
| 1282 | if (inet->cork.rt) { | ||
| 1283 | ip_rt_put(inet->cork.rt); | ||
| 1284 | inet->cork.rt = NULL; | ||
| 1285 | } | ||
| 1286 | return err; | 1291 | return err; |
| 1287 | 1292 | ||
| 1288 | error: | 1293 | error: |
| @@ -1295,19 +1300,12 @@ error: | |||
| 1295 | */ | 1300 | */ |
| 1296 | void ip_flush_pending_frames(struct sock *sk) | 1301 | void ip_flush_pending_frames(struct sock *sk) |
| 1297 | { | 1302 | { |
| 1298 | struct inet_sock *inet = inet_sk(sk); | ||
| 1299 | struct sk_buff *skb; | 1303 | struct sk_buff *skb; |
| 1300 | 1304 | ||
| 1301 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) | 1305 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) |
| 1302 | kfree_skb(skb); | 1306 | kfree_skb(skb); |
| 1303 | 1307 | ||
| 1304 | inet->cork.flags &= ~IPCORK_OPT; | 1308 | ip_cork_release(inet_sk(sk)); |
| 1305 | kfree(inet->cork.opt); | ||
| 1306 | inet->cork.opt = NULL; | ||
| 1307 | if (inet->cork.rt) { | ||
| 1308 | ip_rt_put(inet->cork.rt); | ||
| 1309 | inet->cork.rt = NULL; | ||
| 1310 | } | ||
| 1311 | } | 1309 | } |
| 1312 | 1310 | ||
| 1313 | 1311 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f51f20e487c8..82817e554363 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
| @@ -437,10 +437,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
| 437 | 437 | ||
| 438 | /* If optlen==0, it is equivalent to val == 0 */ | 438 | /* If optlen==0, it is equivalent to val == 0 */ |
| 439 | 439 | ||
| 440 | #ifdef CONFIG_IP_MROUTE | 440 | if (ip_mroute_opt(optname)) |
| 441 | if (optname >= MRT_BASE && optname <= (MRT_BASE + 10)) | ||
| 442 | return ip_mroute_setsockopt(sk,optname,optval,optlen); | 441 | return ip_mroute_setsockopt(sk,optname,optval,optlen); |
| 443 | #endif | ||
| 444 | 442 | ||
| 445 | err = 0; | 443 | err = 0; |
| 446 | lock_sock(sk); | 444 | lock_sock(sk); |
| @@ -909,11 +907,9 @@ int ip_setsockopt(struct sock *sk, int level, | |||
| 909 | #ifdef CONFIG_NETFILTER | 907 | #ifdef CONFIG_NETFILTER |
| 910 | /* we need to exclude all possible ENOPROTOOPTs except default case */ | 908 | /* we need to exclude all possible ENOPROTOOPTs except default case */ |
| 911 | if (err == -ENOPROTOOPT && optname != IP_HDRINCL && | 909 | if (err == -ENOPROTOOPT && optname != IP_HDRINCL && |
| 912 | optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY | 910 | optname != IP_IPSEC_POLICY && |
| 913 | #ifdef CONFIG_IP_MROUTE | 911 | optname != IP_XFRM_POLICY && |
| 914 | && (optname < MRT_BASE || optname > (MRT_BASE + 10)) | 912 | !ip_mroute_opt(optname)) { |
| 915 | #endif | ||
| 916 | ) { | ||
| 917 | lock_sock(sk); | 913 | lock_sock(sk); |
| 918 | err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); | 914 | err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); |
| 919 | release_sock(sk); | 915 | release_sock(sk); |
| @@ -935,11 +931,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname, | |||
| 935 | #ifdef CONFIG_NETFILTER | 931 | #ifdef CONFIG_NETFILTER |
| 936 | /* we need to exclude all possible ENOPROTOOPTs except default case */ | 932 | /* we need to exclude all possible ENOPROTOOPTs except default case */ |
| 937 | if (err == -ENOPROTOOPT && optname != IP_HDRINCL && | 933 | if (err == -ENOPROTOOPT && optname != IP_HDRINCL && |
| 938 | optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY | 934 | optname != IP_IPSEC_POLICY && |
| 939 | #ifdef CONFIG_IP_MROUTE | 935 | optname != IP_XFRM_POLICY && |
| 940 | && (optname < MRT_BASE || optname > (MRT_BASE + 10)) | 936 | !ip_mroute_opt(optname)) { |
| 941 | #endif | ||
| 942 | ) { | ||
| 943 | lock_sock(sk); | 937 | lock_sock(sk); |
| 944 | err = compat_nf_setsockopt(sk, PF_INET, optname, | 938 | err = compat_nf_setsockopt(sk, PF_INET, optname, |
| 945 | optval, optlen); | 939 | optval, optlen); |
| @@ -967,11 +961,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
| 967 | if (level != SOL_IP) | 961 | if (level != SOL_IP) |
| 968 | return -EOPNOTSUPP; | 962 | return -EOPNOTSUPP; |
| 969 | 963 | ||
| 970 | #ifdef CONFIG_IP_MROUTE | 964 | if (ip_mroute_opt(optname)) |
| 971 | if (optname >= MRT_BASE && optname <= MRT_BASE+10) { | ||
| 972 | return ip_mroute_getsockopt(sk,optname,optval,optlen); | 965 | return ip_mroute_getsockopt(sk,optname,optval,optlen); |
| 973 | } | ||
| 974 | #endif | ||
| 975 | 966 | ||
| 976 | if (get_user(len,optlen)) | 967 | if (get_user(len,optlen)) |
| 977 | return -EFAULT; | 968 | return -EFAULT; |
| @@ -1171,11 +1162,8 @@ int ip_getsockopt(struct sock *sk, int level, | |||
| 1171 | err = do_ip_getsockopt(sk, level, optname, optval, optlen); | 1162 | err = do_ip_getsockopt(sk, level, optname, optval, optlen); |
| 1172 | #ifdef CONFIG_NETFILTER | 1163 | #ifdef CONFIG_NETFILTER |
| 1173 | /* we need to exclude all possible ENOPROTOOPTs except default case */ | 1164 | /* we need to exclude all possible ENOPROTOOPTs except default case */ |
| 1174 | if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS | 1165 | if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && |
| 1175 | #ifdef CONFIG_IP_MROUTE | 1166 | !ip_mroute_opt(optname)) { |
| 1176 | && (optname < MRT_BASE || optname > MRT_BASE+10) | ||
| 1177 | #endif | ||
| 1178 | ) { | ||
| 1179 | int len; | 1167 | int len; |
| 1180 | 1168 | ||
| 1181 | if (get_user(len,optlen)) | 1169 | if (get_user(len,optlen)) |
| @@ -1200,11 +1188,8 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, | |||
| 1200 | int err = do_ip_getsockopt(sk, level, optname, optval, optlen); | 1188 | int err = do_ip_getsockopt(sk, level, optname, optval, optlen); |
| 1201 | #ifdef CONFIG_NETFILTER | 1189 | #ifdef CONFIG_NETFILTER |
| 1202 | /* we need to exclude all possible ENOPROTOOPTs except default case */ | 1190 | /* we need to exclude all possible ENOPROTOOPTs except default case */ |
| 1203 | if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS | 1191 | if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && |
| 1204 | #ifdef CONFIG_IP_MROUTE | 1192 | !ip_mroute_opt(optname)) { |
| 1205 | && (optname < MRT_BASE || optname > MRT_BASE+10) | ||
| 1206 | #endif | ||
| 1207 | ) { | ||
| 1208 | int len; | 1193 | int len; |
| 1209 | 1194 | ||
| 1210 | if (get_user(len, optlen)) | 1195 | if (get_user(len, optlen)) |
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 0bfeb02a5f87..2c44a94c2135 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
| @@ -14,9 +14,9 @@ | |||
| 14 | * - Adaptive compression. | 14 | * - Adaptive compression. |
| 15 | */ | 15 | */ |
| 16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 17 | #include <asm/scatterlist.h> | ||
| 18 | #include <asm/semaphore.h> | 17 | #include <asm/semaphore.h> |
| 19 | #include <linux/crypto.h> | 18 | #include <linux/crypto.h> |
| 19 | #include <linux/err.h> | ||
| 20 | #include <linux/pfkeyv2.h> | 20 | #include <linux/pfkeyv2.h> |
| 21 | #include <linux/percpu.h> | 21 | #include <linux/percpu.h> |
| 22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
| @@ -345,7 +345,7 @@ static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) | |||
| 345 | for_each_possible_cpu(cpu) { | 345 | for_each_possible_cpu(cpu) { |
| 346 | struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, | 346 | struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, |
| 347 | CRYPTO_ALG_ASYNC); | 347 | CRYPTO_ALG_ASYNC); |
| 348 | if (!tfm) | 348 | if (IS_ERR(tfm)) |
| 349 | goto error; | 349 | goto error; |
| 350 | *per_cpu_ptr(tfms, cpu) = tfm; | 350 | *per_cpu_ptr(tfms, cpu) = tfm; |
| 351 | } | 351 | } |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 5cd5bbe1379a..8c2b2b0741da 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
| @@ -515,7 +515,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 515 | struct net_device *tdev; /* Device to other host */ | 515 | struct net_device *tdev; /* Device to other host */ |
| 516 | struct iphdr *old_iph = ip_hdr(skb); | 516 | struct iphdr *old_iph = ip_hdr(skb); |
| 517 | struct iphdr *iph; /* Our new IP header */ | 517 | struct iphdr *iph; /* Our new IP header */ |
| 518 | int max_headroom; /* The extra header space needed */ | 518 | unsigned int max_headroom; /* The extra header space needed */ |
| 519 | __be32 dst = tiph->daddr; | 519 | __be32 dst = tiph->daddr; |
| 520 | int mtu; | 520 | int mtu; |
| 521 | 521 | ||
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index 4b702f708d30..b7eeae622d9b 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c | |||
| @@ -426,6 +426,25 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) | |||
| 426 | 426 | ||
| 427 | 427 | ||
| 428 | /* | 428 | /* |
| 429 | * Check if there is a destination for the connection, if so | ||
| 430 | * bind the connection to the destination. | ||
| 431 | */ | ||
| 432 | struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) | ||
| 433 | { | ||
| 434 | struct ip_vs_dest *dest; | ||
| 435 | |||
| 436 | if ((cp) && (!cp->dest)) { | ||
| 437 | dest = ip_vs_find_dest(cp->daddr, cp->dport, | ||
| 438 | cp->vaddr, cp->vport, cp->protocol); | ||
| 439 | ip_vs_bind_dest(cp, dest); | ||
| 440 | return dest; | ||
| 441 | } else | ||
| 442 | return NULL; | ||
| 443 | } | ||
| 444 | EXPORT_SYMBOL(ip_vs_try_bind_dest); | ||
| 445 | |||
| 446 | |||
| 447 | /* | ||
| 429 | * Unbind a connection entry with its VS destination | 448 | * Unbind a connection entry with its VS destination |
| 430 | * Called by the ip_vs_conn_expire function. | 449 | * Called by the ip_vs_conn_expire function. |
| 431 | */ | 450 | */ |
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index c6ed7654e839..20c884a57721 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c | |||
| @@ -979,15 +979,23 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
| 979 | ret = NF_ACCEPT; | 979 | ret = NF_ACCEPT; |
| 980 | } | 980 | } |
| 981 | 981 | ||
| 982 | /* increase its packet counter and check if it is needed | 982 | /* Increase its packet counter and check if it is needed |
| 983 | to be synchronized */ | 983 | * to be synchronized |
| 984 | * | ||
| 985 | * Sync connection if it is about to close to | ||
| 986 | * encorage the standby servers to update the connections timeout | ||
| 987 | */ | ||
| 984 | atomic_inc(&cp->in_pkts); | 988 | atomic_inc(&cp->in_pkts); |
| 985 | if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && | 989 | if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && |
| 986 | (cp->protocol != IPPROTO_TCP || | 990 | (((cp->protocol != IPPROTO_TCP || |
| 987 | cp->state == IP_VS_TCP_S_ESTABLISHED) && | 991 | cp->state == IP_VS_TCP_S_ESTABLISHED) && |
| 988 | (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] | 992 | (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] |
| 989 | == sysctl_ip_vs_sync_threshold[0])) | 993 | == sysctl_ip_vs_sync_threshold[0])) || |
| 994 | ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && | ||
| 995 | ((cp->state == IP_VS_TCP_S_FIN_WAIT) || | ||
| 996 | (cp->state == IP_VS_TCP_S_CLOSE))))) | ||
| 990 | ip_vs_sync_conn(cp); | 997 | ip_vs_sync_conn(cp); |
| 998 | cp->old_state = cp->state; | ||
| 991 | 999 | ||
| 992 | ip_vs_conn_put(cp); | 1000 | ip_vs_conn_put(cp); |
| 993 | return ret; | 1001 | return ret; |
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 7345fc252a23..3c4d22a468ec 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
| @@ -579,6 +579,32 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | |||
| 579 | return NULL; | 579 | return NULL; |
| 580 | } | 580 | } |
| 581 | 581 | ||
| 582 | /* | ||
| 583 | * Find destination by {daddr,dport,vaddr,protocol} | ||
| 584 | * Cretaed to be used in ip_vs_process_message() in | ||
| 585 | * the backup synchronization daemon. It finds the | ||
| 586 | * destination to be bound to the received connection | ||
| 587 | * on the backup. | ||
| 588 | * | ||
| 589 | * ip_vs_lookup_real_service() looked promissing, but | ||
| 590 | * seems not working as expected. | ||
| 591 | */ | ||
| 592 | struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport, | ||
| 593 | __be32 vaddr, __be16 vport, __u16 protocol) | ||
| 594 | { | ||
| 595 | struct ip_vs_dest *dest; | ||
| 596 | struct ip_vs_service *svc; | ||
| 597 | |||
| 598 | svc = ip_vs_service_get(0, protocol, vaddr, vport); | ||
| 599 | if (!svc) | ||
| 600 | return NULL; | ||
| 601 | dest = ip_vs_lookup_dest(svc, daddr, dport); | ||
| 602 | if (dest) | ||
| 603 | atomic_inc(&dest->refcnt); | ||
| 604 | ip_vs_service_put(svc); | ||
| 605 | return dest; | ||
| 606 | } | ||
| 607 | EXPORT_SYMBOL(ip_vs_find_dest); | ||
| 582 | 608 | ||
| 583 | /* | 609 | /* |
| 584 | * Lookup dest by {svc,addr,port} in the destination trash. | 610 | * Lookup dest by {svc,addr,port} in the destination trash. |
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c index 6a1fec416eaf..427b593c1069 100644 --- a/net/ipv4/ipvs/ip_vs_lblcr.c +++ b/net/ipv4/ipvs/ip_vs_lblcr.c | |||
| @@ -48,8 +48,6 @@ | |||
| 48 | /* for sysctl */ | 48 | /* for sysctl */ |
| 49 | #include <linux/fs.h> | 49 | #include <linux/fs.h> |
| 50 | #include <linux/sysctl.h> | 50 | #include <linux/sysctl.h> |
| 51 | /* for proc_net_create/proc_net_remove */ | ||
| 52 | #include <linux/proc_fs.h> | ||
| 53 | #include <net/net_namespace.h> | 51 | #include <net/net_namespace.h> |
| 54 | 52 | ||
| 55 | #include <net/ip_vs.h> | 53 | #include <net/ip_vs.h> |
| @@ -547,71 +545,6 @@ static void ip_vs_lblcr_check_expire(unsigned long data) | |||
| 547 | mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL); | 545 | mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL); |
| 548 | } | 546 | } |
| 549 | 547 | ||
| 550 | |||
| 551 | #ifdef CONFIG_IP_VS_LBLCR_DEBUG | ||
| 552 | static struct ip_vs_lblcr_table *lblcr_table_list; | ||
| 553 | |||
| 554 | /* | ||
| 555 | * /proc/net/ip_vs_lblcr to display the mappings of | ||
| 556 | * destination IP address <==> its serverSet | ||
| 557 | */ | ||
| 558 | static int | ||
| 559 | ip_vs_lblcr_getinfo(char *buffer, char **start, off_t offset, int length) | ||
| 560 | { | ||
| 561 | off_t pos=0, begin; | ||
| 562 | int len=0, size; | ||
| 563 | struct ip_vs_lblcr_table *tbl; | ||
| 564 | unsigned long now = jiffies; | ||
| 565 | int i; | ||
| 566 | struct ip_vs_lblcr_entry *en; | ||
| 567 | |||
| 568 | tbl = lblcr_table_list; | ||
| 569 | |||
| 570 | size = sprintf(buffer, "LastTime Dest IP address Server set\n"); | ||
| 571 | pos += size; | ||
| 572 | len += size; | ||
| 573 | |||
| 574 | for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { | ||
| 575 | read_lock_bh(&tbl->lock); | ||
| 576 | list_for_each_entry(en, &tbl->bucket[i], list) { | ||
| 577 | char tbuf[16]; | ||
| 578 | struct ip_vs_dest_list *d; | ||
| 579 | |||
| 580 | sprintf(tbuf, "%u.%u.%u.%u", NIPQUAD(en->addr)); | ||
| 581 | size = sprintf(buffer+len, "%8lu %-16s ", | ||
| 582 | now-en->lastuse, tbuf); | ||
| 583 | |||
| 584 | read_lock(&en->set.lock); | ||
| 585 | for (d=en->set.list; d!=NULL; d=d->next) { | ||
| 586 | size += sprintf(buffer+len+size, | ||
| 587 | "%u.%u.%u.%u ", | ||
| 588 | NIPQUAD(d->dest->addr)); | ||
| 589 | } | ||
| 590 | read_unlock(&en->set.lock); | ||
| 591 | size += sprintf(buffer+len+size, "\n"); | ||
| 592 | len += size; | ||
| 593 | pos += size; | ||
| 594 | if (pos <= offset) | ||
| 595 | len=0; | ||
| 596 | if (pos >= offset+length) { | ||
| 597 | read_unlock_bh(&tbl->lock); | ||
| 598 | goto done; | ||
| 599 | } | ||
| 600 | } | ||
| 601 | read_unlock_bh(&tbl->lock); | ||
| 602 | } | ||
| 603 | |||
| 604 | done: | ||
| 605 | begin = len - (pos - offset); | ||
| 606 | *start = buffer + begin; | ||
| 607 | len -= begin; | ||
| 608 | if(len>length) | ||
| 609 | len = length; | ||
| 610 | return len; | ||
| 611 | } | ||
| 612 | #endif | ||
| 613 | |||
| 614 | |||
| 615 | static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) | 548 | static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) |
| 616 | { | 549 | { |
| 617 | int i; | 550 | int i; |
| @@ -650,9 +583,6 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) | |||
| 650 | tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; | 583 | tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; |
| 651 | add_timer(&tbl->periodic_timer); | 584 | add_timer(&tbl->periodic_timer); |
| 652 | 585 | ||
| 653 | #ifdef CONFIG_IP_VS_LBLCR_DEBUG | ||
| 654 | lblcr_table_list = tbl; | ||
| 655 | #endif | ||
| 656 | return 0; | 586 | return 0; |
| 657 | } | 587 | } |
| 658 | 588 | ||
| @@ -843,18 +773,12 @@ static int __init ip_vs_lblcr_init(void) | |||
| 843 | { | 773 | { |
| 844 | INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list); | 774 | INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list); |
| 845 | sysctl_header = register_sysctl_table(lblcr_root_table); | 775 | sysctl_header = register_sysctl_table(lblcr_root_table); |
| 846 | #ifdef CONFIG_IP_VS_LBLCR_DEBUG | ||
| 847 | proc_net_create(&init_net, "ip_vs_lblcr", 0, ip_vs_lblcr_getinfo); | ||
| 848 | #endif | ||
| 849 | return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); | 776 | return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); |
| 850 | } | 777 | } |
| 851 | 778 | ||
| 852 | 779 | ||
| 853 | static void __exit ip_vs_lblcr_cleanup(void) | 780 | static void __exit ip_vs_lblcr_cleanup(void) |
| 854 | { | 781 | { |
| 855 | #ifdef CONFIG_IP_VS_LBLCR_DEBUG | ||
| 856 | proc_net_remove(&init_net, "ip_vs_lblcr"); | ||
| 857 | #endif | ||
| 858 | unregister_sysctl_table(sysctl_header); | 782 | unregister_sysctl_table(sysctl_header); |
| 859 | unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); | 783 | unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); |
| 860 | } | 784 | } |
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index c99f2a33fb9e..bd930efc18da 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c | |||
| @@ -72,7 +72,6 @@ struct ip_vs_sync_thread_data { | |||
| 72 | int state; | 72 | int state; |
| 73 | }; | 73 | }; |
| 74 | 74 | ||
| 75 | #define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ) | ||
| 76 | #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) | 75 | #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) |
| 77 | #define FULL_CONN_SIZE \ | 76 | #define FULL_CONN_SIZE \ |
| 78 | (sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options)) | 77 | (sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options)) |
| @@ -284,6 +283,8 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) | |||
| 284 | struct ip_vs_sync_conn *s; | 283 | struct ip_vs_sync_conn *s; |
| 285 | struct ip_vs_sync_conn_options *opt; | 284 | struct ip_vs_sync_conn_options *opt; |
| 286 | struct ip_vs_conn *cp; | 285 | struct ip_vs_conn *cp; |
| 286 | struct ip_vs_protocol *pp; | ||
| 287 | struct ip_vs_dest *dest; | ||
| 287 | char *p; | 288 | char *p; |
| 288 | int i; | 289 | int i; |
| 289 | 290 | ||
| @@ -317,20 +318,34 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) | |||
| 317 | s->caddr, s->cport, | 318 | s->caddr, s->cport, |
| 318 | s->vaddr, s->vport); | 319 | s->vaddr, s->vport); |
| 319 | if (!cp) { | 320 | if (!cp) { |
| 321 | /* | ||
| 322 | * Find the appropriate destination for the connection. | ||
| 323 | * If it is not found the connection will remain unbound | ||
| 324 | * but still handled. | ||
| 325 | */ | ||
| 326 | dest = ip_vs_find_dest(s->daddr, s->dport, | ||
| 327 | s->vaddr, s->vport, | ||
| 328 | s->protocol); | ||
| 320 | cp = ip_vs_conn_new(s->protocol, | 329 | cp = ip_vs_conn_new(s->protocol, |
| 321 | s->caddr, s->cport, | 330 | s->caddr, s->cport, |
| 322 | s->vaddr, s->vport, | 331 | s->vaddr, s->vport, |
| 323 | s->daddr, s->dport, | 332 | s->daddr, s->dport, |
| 324 | flags, NULL); | 333 | flags, dest); |
| 334 | if (dest) | ||
| 335 | atomic_dec(&dest->refcnt); | ||
| 325 | if (!cp) { | 336 | if (!cp) { |
| 326 | IP_VS_ERR("ip_vs_conn_new failed\n"); | 337 | IP_VS_ERR("ip_vs_conn_new failed\n"); |
| 327 | return; | 338 | return; |
| 328 | } | 339 | } |
| 329 | cp->state = ntohs(s->state); | 340 | cp->state = ntohs(s->state); |
| 330 | } else if (!cp->dest) { | 341 | } else if (!cp->dest) { |
| 331 | /* it is an entry created by the synchronization */ | 342 | dest = ip_vs_try_bind_dest(cp); |
| 332 | cp->state = ntohs(s->state); | 343 | if (!dest) { |
| 333 | cp->flags = flags | IP_VS_CONN_F_HASHED; | 344 | /* it is an unbound entry created by |
| 345 | * synchronization */ | ||
| 346 | cp->flags = flags | IP_VS_CONN_F_HASHED; | ||
| 347 | } else | ||
| 348 | atomic_dec(&dest->refcnt); | ||
| 334 | } /* Note that we don't touch its state and flags | 349 | } /* Note that we don't touch its state and flags |
| 335 | if it is a normal entry. */ | 350 | if it is a normal entry. */ |
| 336 | 351 | ||
| @@ -342,7 +357,9 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) | |||
| 342 | p += SIMPLE_CONN_SIZE; | 357 | p += SIMPLE_CONN_SIZE; |
| 343 | 358 | ||
| 344 | atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); | 359 | atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); |
| 345 | cp->timeout = IP_VS_SYNC_CONN_TIMEOUT; | 360 | cp->state = ntohs(s->state); |
| 361 | pp = ip_vs_proto_get(s->protocol); | ||
| 362 | cp->timeout = pp->timeout_table[cp->state]; | ||
| 346 | ip_vs_conn_put(cp); | 363 | ip_vs_conn_put(cp); |
| 347 | 364 | ||
| 348 | if (p > buffer+buflen) { | 365 | if (p > buffer+buflen) { |
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index d0a92dec1050..7c074e386c17 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c | |||
| @@ -325,7 +325,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
| 325 | __be16 df = old_iph->frag_off; | 325 | __be16 df = old_iph->frag_off; |
| 326 | sk_buff_data_t old_transport_header = skb->transport_header; | 326 | sk_buff_data_t old_transport_header = skb->transport_header; |
| 327 | struct iphdr *iph; /* Our new IP header */ | 327 | struct iphdr *iph; /* Our new IP header */ |
| 328 | int max_headroom; /* The extra header space needed */ | 328 | unsigned int max_headroom; /* The extra header space needed */ |
| 329 | int mtu; | 329 | int mtu; |
| 330 | 330 | ||
| 331 | EnterFunction(10); | 331 | EnterFunction(10); |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index fa97947c6ae1..9aca9c55687c 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
| @@ -128,7 +128,7 @@ config IP_NF_MATCH_ADDRTYPE | |||
| 128 | eg. UNICAST, LOCAL, BROADCAST, ... | 128 | eg. UNICAST, LOCAL, BROADCAST, ... |
| 129 | 129 | ||
| 130 | If you want to compile it as a module, say M here and read | 130 | If you want to compile it as a module, say M here and read |
| 131 | <file:Documentation/modules.txt>. If unsure, say `N'. | 131 | <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. |
| 132 | 132 | ||
| 133 | # `filter', generic and specific targets | 133 | # `filter', generic and specific targets |
| 134 | config IP_NF_FILTER | 134 | config IP_NF_FILTER |
| @@ -371,7 +371,7 @@ config IP_NF_RAW | |||
| 371 | and OUTPUT chains. | 371 | and OUTPUT chains. |
| 372 | 372 | ||
| 373 | If you want to compile it as a module, say M here and read | 373 | If you want to compile it as a module, say M here and read |
| 374 | <file:Documentation/modules.txt>. If unsure, say `N'. | 374 | <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. |
| 375 | 375 | ||
| 376 | # ARP tables | 376 | # ARP tables |
| 377 | config IP_NF_ARPTABLES | 377 | config IP_NF_ARPTABLES |
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 409d273f6f82..7456833d6ade 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile | |||
| @@ -41,27 +41,27 @@ obj-$(CONFIG_NF_NAT) += iptable_nat.o | |||
| 41 | obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o | 41 | obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o |
| 42 | 42 | ||
| 43 | # matches | 43 | # matches |
| 44 | obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o | ||
| 45 | obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o | ||
| 46 | obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o | ||
| 44 | obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o | 47 | obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o |
| 45 | obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o | 48 | obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o |
| 46 | obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o | ||
| 47 | obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o | 49 | obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o |
| 48 | obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o | 50 | obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o |
| 49 | obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o | ||
| 50 | obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o | 51 | obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o |
| 51 | obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o | ||
| 52 | 52 | ||
| 53 | # targets | 53 | # targets |
| 54 | obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o | 54 | obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o |
| 55 | obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o | ||
| 56 | obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o | 55 | obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o |
| 56 | obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o | ||
| 57 | obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o | 57 | obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o |
| 58 | obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o | ||
| 59 | obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o | 58 | obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o |
| 59 | obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o | ||
| 60 | obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o | ||
| 60 | obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o | 61 | obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o |
| 61 | obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o | 62 | obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o |
| 62 | obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o | ||
| 63 | obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o | ||
| 64 | obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o | 63 | obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o |
| 64 | obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o | ||
| 65 | 65 | ||
| 66 | # generic ARP tables | 66 | # generic ARP tables |
| 67 | obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o | 67 | obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o |
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 10a2ce09fd8e..14d64a383db1 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
| 23 | #include <linux/sysctl.h> | 23 | #include <linux/sysctl.h> |
| 24 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
| 25 | #include <linux/seq_file.h> | ||
| 25 | #include <linux/security.h> | 26 | #include <linux/security.h> |
| 26 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
| 27 | #include <net/net_namespace.h> | 28 | #include <net/net_namespace.h> |
| @@ -607,15 +608,11 @@ static ctl_table ipq_root_table[] = { | |||
| 607 | { .ctl_name = 0 } | 608 | { .ctl_name = 0 } |
| 608 | }; | 609 | }; |
| 609 | 610 | ||
| 610 | #ifdef CONFIG_PROC_FS | 611 | static int ip_queue_show(struct seq_file *m, void *v) |
| 611 | static int | ||
| 612 | ipq_get_info(char *buffer, char **start, off_t offset, int length) | ||
| 613 | { | 612 | { |
| 614 | int len; | ||
| 615 | |||
| 616 | read_lock_bh(&queue_lock); | 613 | read_lock_bh(&queue_lock); |
| 617 | 614 | ||
| 618 | len = sprintf(buffer, | 615 | seq_printf(m, |
| 619 | "Peer PID : %d\n" | 616 | "Peer PID : %d\n" |
| 620 | "Copy mode : %hu\n" | 617 | "Copy mode : %hu\n" |
| 621 | "Copy range : %u\n" | 618 | "Copy range : %u\n" |
| @@ -632,16 +629,21 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length) | |||
| 632 | queue_user_dropped); | 629 | queue_user_dropped); |
| 633 | 630 | ||
| 634 | read_unlock_bh(&queue_lock); | 631 | read_unlock_bh(&queue_lock); |
| 632 | return 0; | ||
| 633 | } | ||
| 635 | 634 | ||
| 636 | *start = buffer + offset; | 635 | static int ip_queue_open(struct inode *inode, struct file *file) |
| 637 | len -= offset; | 636 | { |
| 638 | if (len > length) | 637 | return single_open(file, ip_queue_show, NULL); |
| 639 | len = length; | ||
| 640 | else if (len < 0) | ||
| 641 | len = 0; | ||
| 642 | return len; | ||
| 643 | } | 638 | } |
| 644 | #endif /* CONFIG_PROC_FS */ | 639 | |
| 640 | static const struct file_operations ip_queue_proc_fops = { | ||
| 641 | .open = ip_queue_open, | ||
| 642 | .read = seq_read, | ||
| 643 | .llseek = seq_lseek, | ||
| 644 | .release = single_release, | ||
| 645 | .owner = THIS_MODULE, | ||
| 646 | }; | ||
| 645 | 647 | ||
| 646 | static struct nf_queue_handler nfqh = { | 648 | static struct nf_queue_handler nfqh = { |
| 647 | .name = "ip_queue", | 649 | .name = "ip_queue", |
| @@ -661,10 +663,11 @@ static int __init ip_queue_init(void) | |||
| 661 | goto cleanup_netlink_notifier; | 663 | goto cleanup_netlink_notifier; |
| 662 | } | 664 | } |
| 663 | 665 | ||
| 664 | proc = proc_net_create(&init_net, IPQ_PROC_FS_NAME, 0, ipq_get_info); | 666 | proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net); |
| 665 | if (proc) | 667 | if (proc) { |
| 666 | proc->owner = THIS_MODULE; | 668 | proc->owner = THIS_MODULE; |
| 667 | else { | 669 | proc->proc_fops = &ip_queue_proc_fops; |
| 670 | } else { | ||
| 668 | printk(KERN_ERR "ip_queue: failed to create proc entry\n"); | 671 | printk(KERN_ERR "ip_queue: failed to create proc entry\n"); |
| 669 | goto cleanup_ipqnl; | 672 | goto cleanup_ipqnl; |
| 670 | } | 673 | } |
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c index 35a5aa69cd92..c31b87668250 100644 --- a/net/ipv4/netfilter/nf_nat_amanda.c +++ b/net/ipv4/netfilter/nf_nat_amanda.c | |||
| @@ -69,7 +69,7 @@ static void __exit nf_nat_amanda_fini(void) | |||
| 69 | 69 | ||
| 70 | static int __init nf_nat_amanda_init(void) | 70 | static int __init nf_nat_amanda_init(void) |
| 71 | { | 71 | { |
| 72 | BUG_ON(rcu_dereference(nf_nat_amanda_hook)); | 72 | BUG_ON(nf_nat_amanda_hook != NULL); |
| 73 | rcu_assign_pointer(nf_nat_amanda_hook, help); | 73 | rcu_assign_pointer(nf_nat_amanda_hook, help); |
| 74 | return 0; | 74 | return 0; |
| 75 | } | 75 | } |
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c index e1a16d3ea4cb..a1d5d58a58bf 100644 --- a/net/ipv4/netfilter/nf_nat_ftp.c +++ b/net/ipv4/netfilter/nf_nat_ftp.c | |||
| @@ -147,7 +147,7 @@ static void __exit nf_nat_ftp_fini(void) | |||
| 147 | 147 | ||
| 148 | static int __init nf_nat_ftp_init(void) | 148 | static int __init nf_nat_ftp_init(void) |
| 149 | { | 149 | { |
| 150 | BUG_ON(rcu_dereference(nf_nat_ftp_hook)); | 150 | BUG_ON(nf_nat_ftp_hook != NULL); |
| 151 | rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp); | 151 | rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp); |
| 152 | return 0; | 152 | return 0; |
| 153 | } | 153 | } |
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index a868c8c41328..93e18ef114f2 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c | |||
| @@ -544,15 +544,15 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, | |||
| 544 | /****************************************************************************/ | 544 | /****************************************************************************/ |
| 545 | static int __init init(void) | 545 | static int __init init(void) |
| 546 | { | 546 | { |
| 547 | BUG_ON(rcu_dereference(set_h245_addr_hook) != NULL); | 547 | BUG_ON(set_h245_addr_hook != NULL); |
| 548 | BUG_ON(rcu_dereference(set_h225_addr_hook) != NULL); | 548 | BUG_ON(set_h225_addr_hook != NULL); |
| 549 | BUG_ON(rcu_dereference(set_sig_addr_hook) != NULL); | 549 | BUG_ON(set_sig_addr_hook != NULL); |
| 550 | BUG_ON(rcu_dereference(set_ras_addr_hook) != NULL); | 550 | BUG_ON(set_ras_addr_hook != NULL); |
| 551 | BUG_ON(rcu_dereference(nat_rtp_rtcp_hook) != NULL); | 551 | BUG_ON(nat_rtp_rtcp_hook != NULL); |
| 552 | BUG_ON(rcu_dereference(nat_t120_hook) != NULL); | 552 | BUG_ON(nat_t120_hook != NULL); |
| 553 | BUG_ON(rcu_dereference(nat_h245_hook) != NULL); | 553 | BUG_ON(nat_h245_hook != NULL); |
| 554 | BUG_ON(rcu_dereference(nat_callforwarding_hook) != NULL); | 554 | BUG_ON(nat_callforwarding_hook != NULL); |
| 555 | BUG_ON(rcu_dereference(nat_q931_hook) != NULL); | 555 | BUG_ON(nat_q931_hook != NULL); |
| 556 | 556 | ||
| 557 | rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); | 557 | rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); |
| 558 | rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); | 558 | rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); |
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c index 766e2c16c6b9..fe6f9cef6c85 100644 --- a/net/ipv4/netfilter/nf_nat_irc.c +++ b/net/ipv4/netfilter/nf_nat_irc.c | |||
| @@ -74,7 +74,7 @@ static void __exit nf_nat_irc_fini(void) | |||
| 74 | 74 | ||
| 75 | static int __init nf_nat_irc_init(void) | 75 | static int __init nf_nat_irc_init(void) |
| 76 | { | 76 | { |
| 77 | BUG_ON(rcu_dereference(nf_nat_irc_hook)); | 77 | BUG_ON(nf_nat_irc_hook != NULL); |
| 78 | rcu_assign_pointer(nf_nat_irc_hook, help); | 78 | rcu_assign_pointer(nf_nat_irc_hook, help); |
| 79 | return 0; | 79 | return 0; |
| 80 | } | 80 | } |
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index e1385a099079..6817e7995f35 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c | |||
| @@ -281,16 +281,16 @@ static int __init nf_nat_helper_pptp_init(void) | |||
| 281 | { | 281 | { |
| 282 | nf_nat_need_gre(); | 282 | nf_nat_need_gre(); |
| 283 | 283 | ||
| 284 | BUG_ON(rcu_dereference(nf_nat_pptp_hook_outbound)); | 284 | BUG_ON(nf_nat_pptp_hook_outbound != NULL); |
| 285 | rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); | 285 | rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); |
| 286 | 286 | ||
| 287 | BUG_ON(rcu_dereference(nf_nat_pptp_hook_inbound)); | 287 | BUG_ON(nf_nat_pptp_hook_inbound != NULL); |
| 288 | rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); | 288 | rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); |
| 289 | 289 | ||
| 290 | BUG_ON(rcu_dereference(nf_nat_pptp_hook_exp_gre)); | 290 | BUG_ON(nf_nat_pptp_hook_exp_gre != NULL); |
| 291 | rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); | 291 | rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); |
| 292 | 292 | ||
| 293 | BUG_ON(rcu_dereference(nf_nat_pptp_hook_expectfn)); | 293 | BUG_ON(nf_nat_pptp_hook_expectfn != NULL); |
| 294 | rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected); | 294 | rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected); |
| 295 | return 0; | 295 | return 0; |
| 296 | } | 296 | } |
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index ce9edbcc01e3..3ca98971a1e9 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c | |||
| @@ -293,8 +293,8 @@ static void __exit nf_nat_sip_fini(void) | |||
| 293 | 293 | ||
| 294 | static int __init nf_nat_sip_init(void) | 294 | static int __init nf_nat_sip_init(void) |
| 295 | { | 295 | { |
| 296 | BUG_ON(rcu_dereference(nf_nat_sip_hook)); | 296 | BUG_ON(nf_nat_sip_hook != NULL); |
| 297 | BUG_ON(rcu_dereference(nf_nat_sdp_hook)); | 297 | BUG_ON(nf_nat_sdp_hook != NULL); |
| 298 | rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); | 298 | rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); |
| 299 | rcu_assign_pointer(nf_nat_sdp_hook, ip_nat_sdp); | 299 | rcu_assign_pointer(nf_nat_sdp_hook, ip_nat_sdp); |
| 300 | return 0; | 300 | return 0; |
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c index 0ecec701cb44..1360a94766dd 100644 --- a/net/ipv4/netfilter/nf_nat_tftp.c +++ b/net/ipv4/netfilter/nf_nat_tftp.c | |||
| @@ -43,7 +43,7 @@ static void __exit nf_nat_tftp_fini(void) | |||
| 43 | 43 | ||
| 44 | static int __init nf_nat_tftp_init(void) | 44 | static int __init nf_nat_tftp_init(void) |
| 45 | { | 45 | { |
| 46 | BUG_ON(rcu_dereference(nf_nat_tftp_hook)); | 46 | BUG_ON(nf_nat_tftp_hook != NULL); |
| 47 | rcu_assign_pointer(nf_nat_tftp_hook, help); | 47 | rcu_assign_pointer(nf_nat_tftp_hook, help); |
| 48 | return 0; | 48 | return 0; |
| 49 | } | 49 | } |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index fd16cb8f8abe..ce34b281803f 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
| @@ -46,17 +46,6 @@ | |||
| 46 | #include <net/sock.h> | 46 | #include <net/sock.h> |
| 47 | #include <net/raw.h> | 47 | #include <net/raw.h> |
| 48 | 48 | ||
| 49 | static int fold_prot_inuse(struct proto *proto) | ||
| 50 | { | ||
| 51 | int res = 0; | ||
| 52 | int cpu; | ||
| 53 | |||
| 54 | for_each_possible_cpu(cpu) | ||
| 55 | res += proto->stats[cpu].inuse; | ||
| 56 | |||
| 57 | return res; | ||
| 58 | } | ||
| 59 | |||
| 60 | /* | 49 | /* |
| 61 | * Report socket allocation statistics [mea@utu.fi] | 50 | * Report socket allocation statistics [mea@utu.fi] |
| 62 | */ | 51 | */ |
| @@ -64,12 +53,12 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) | |||
| 64 | { | 53 | { |
| 65 | socket_seq_show(seq); | 54 | socket_seq_show(seq); |
| 66 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", | 55 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", |
| 67 | fold_prot_inuse(&tcp_prot), atomic_read(&tcp_orphan_count), | 56 | sock_prot_inuse(&tcp_prot), atomic_read(&tcp_orphan_count), |
| 68 | tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated), | 57 | tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated), |
| 69 | atomic_read(&tcp_memory_allocated)); | 58 | atomic_read(&tcp_memory_allocated)); |
| 70 | seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot)); | 59 | seq_printf(seq, "UDP: inuse %d\n", sock_prot_inuse(&udp_prot)); |
| 71 | seq_printf(seq, "UDPLITE: inuse %d\n", fold_prot_inuse(&udplite_prot)); | 60 | seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse(&udplite_prot)); |
| 72 | seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot)); | 61 | seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse(&raw_prot)); |
| 73 | seq_printf(seq, "FRAG: inuse %d memory %d\n", | 62 | seq_printf(seq, "FRAG: inuse %d memory %d\n", |
| 74 | ip_frag_nqueues(), ip_frag_mem()); | 63 | ip_frag_nqueues(), ip_frag_mem()); |
| 75 | return 0; | 64 | return 0; |
| @@ -121,14 +110,6 @@ static const struct snmp_mib snmp4_ipextstats_list[] = { | |||
| 121 | SNMP_MIB_SENTINEL | 110 | SNMP_MIB_SENTINEL |
| 122 | }; | 111 | }; |
| 123 | 112 | ||
| 124 | static const struct snmp_mib snmp4_icmp_list[] = { | ||
| 125 | SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS), | ||
| 126 | SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS), | ||
| 127 | SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS), | ||
| 128 | SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS), | ||
| 129 | SNMP_MIB_SENTINEL | ||
| 130 | }; | ||
| 131 | |||
| 132 | static struct { | 113 | static struct { |
| 133 | char *name; | 114 | char *name; |
| 134 | int index; | 115 | int index; |
| @@ -312,7 +293,7 @@ static void icmp_put(struct seq_file *seq) | |||
| 312 | for (i=0; icmpmibmap[i].name != NULL; i++) | 293 | for (i=0; icmpmibmap[i].name != NULL; i++) |
| 313 | seq_printf(seq, " %lu", | 294 | seq_printf(seq, " %lu", |
| 314 | snmp_fold_field((void **) icmpmsg_statistics, | 295 | snmp_fold_field((void **) icmpmsg_statistics, |
| 315 | icmpmibmap[i].index)); | 296 | icmpmibmap[i].index | 0x100)); |
| 316 | } | 297 | } |
| 317 | 298 | ||
| 318 | /* | 299 | /* |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 3916faca3afe..66b42f547bf9 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
| @@ -760,6 +760,8 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
| 760 | } | 760 | } |
| 761 | } | 761 | } |
| 762 | 762 | ||
| 763 | DEFINE_PROTO_INUSE(raw) | ||
| 764 | |||
| 763 | struct proto raw_prot = { | 765 | struct proto raw_prot = { |
| 764 | .name = "RAW", | 766 | .name = "RAW", |
| 765 | .owner = THIS_MODULE, | 767 | .owner = THIS_MODULE, |
| @@ -781,6 +783,7 @@ struct proto raw_prot = { | |||
| 781 | .compat_setsockopt = compat_raw_setsockopt, | 783 | .compat_setsockopt = compat_raw_setsockopt, |
| 782 | .compat_getsockopt = compat_raw_getsockopt, | 784 | .compat_getsockopt = compat_raw_getsockopt, |
| 783 | #endif | 785 | #endif |
| 786 | REF_PROTO_INUSE(raw) | ||
| 784 | }; | 787 | }; |
| 785 | 788 | ||
| 786 | #ifdef CONFIG_PROC_FS | 789 | #ifdef CONFIG_PROC_FS |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2e6ad6dbba6c..8e65182f7af1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -2453,14 +2453,14 @@ void __init tcp_init(void) | |||
| 2453 | 0, | 2453 | 0, |
| 2454 | &tcp_hashinfo.ehash_size, | 2454 | &tcp_hashinfo.ehash_size, |
| 2455 | NULL, | 2455 | NULL, |
| 2456 | 0); | 2456 | thash_entries ? 0 : 512 * 1024); |
| 2457 | tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; | 2457 | tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; |
| 2458 | for (i = 0; i < tcp_hashinfo.ehash_size; i++) { | 2458 | for (i = 0; i < tcp_hashinfo.ehash_size; i++) { |
| 2459 | rwlock_init(&tcp_hashinfo.ehash[i].lock); | ||
| 2460 | INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); | 2459 | INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); |
| 2461 | INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain); | 2460 | INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain); |
| 2462 | } | 2461 | } |
| 2463 | 2462 | if (inet_ehash_locks_alloc(&tcp_hashinfo)) | |
| 2463 | panic("TCP: failed to alloc ehash_locks"); | ||
| 2464 | tcp_hashinfo.bhash = | 2464 | tcp_hashinfo.bhash = |
| 2465 | alloc_large_system_hash("TCP bind", | 2465 | alloc_large_system_hash("TCP bind", |
| 2466 | sizeof(struct inet_bind_hashbucket), | 2466 | sizeof(struct inet_bind_hashbucket), |
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 3904d2158a92..2fbcc7d1b1a0 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c | |||
| @@ -56,3 +56,4 @@ static void __exit tcp_diag_exit(void) | |||
| 56 | module_init(tcp_diag_init); | 56 | module_init(tcp_diag_init); |
| 57 | module_exit(tcp_diag_exit); | 57 | module_exit(tcp_diag_exit); |
| 58 | MODULE_LICENSE("GPL"); | 58 | MODULE_LICENSE("GPL"); |
| 59 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, TCPDIAG_GETSOCK); | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9288220b73a8..ca9590f4f520 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -103,7 +103,7 @@ int sysctl_tcp_abc __read_mostly; | |||
| 103 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ | 103 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ |
| 104 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ | 104 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ |
| 105 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ | 105 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ |
| 106 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */ | 106 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
| 107 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ | 107 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ |
| 108 | 108 | ||
| 109 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) | 109 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) |
| @@ -866,7 +866,7 @@ static void tcp_disable_fack(struct tcp_sock *tp) | |||
| 866 | tp->rx_opt.sack_ok &= ~2; | 866 | tp->rx_opt.sack_ok &= ~2; |
| 867 | } | 867 | } |
| 868 | 868 | ||
| 869 | /* Take a notice that peer is sending DSACKs */ | 869 | /* Take a notice that peer is sending D-SACKs */ |
| 870 | static void tcp_dsack_seen(struct tcp_sock *tp) | 870 | static void tcp_dsack_seen(struct tcp_sock *tp) |
| 871 | { | 871 | { |
| 872 | tp->rx_opt.sack_ok |= 4; | 872 | tp->rx_opt.sack_ok |= 4; |
| @@ -1058,7 +1058,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
| 1058 | * | 1058 | * |
| 1059 | * With D-SACK the lower bound is extended to cover sequence space below | 1059 | * With D-SACK the lower bound is extended to cover sequence space below |
| 1060 | * SND.UNA down to undo_marker, which is the last point of interest. Yet | 1060 | * SND.UNA down to undo_marker, which is the last point of interest. Yet |
| 1061 | * again, DSACK block must not to go across snd_una (for the same reason as | 1061 | * again, D-SACK block must not to go across snd_una (for the same reason as |
| 1062 | * for the normal SACK blocks, explained above). But there all simplicity | 1062 | * for the normal SACK blocks, explained above). But there all simplicity |
| 1063 | * ends, TCP might receive valid D-SACKs below that. As long as they reside | 1063 | * ends, TCP might receive valid D-SACKs below that. As long as they reside |
| 1064 | * fully below undo_marker they do not affect behavior in anyway and can | 1064 | * fully below undo_marker they do not affect behavior in anyway and can |
| @@ -1080,7 +1080,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, | |||
| 1080 | if (!before(start_seq, tp->snd_nxt)) | 1080 | if (!before(start_seq, tp->snd_nxt)) |
| 1081 | return 0; | 1081 | return 0; |
| 1082 | 1082 | ||
| 1083 | /* In outstanding window? ...This is valid exit for DSACKs too. | 1083 | /* In outstanding window? ...This is valid exit for D-SACKs too. |
| 1084 | * start_seq == snd_una is non-sensical (see comments above) | 1084 | * start_seq == snd_una is non-sensical (see comments above) |
| 1085 | */ | 1085 | */ |
| 1086 | if (after(start_seq, tp->snd_una)) | 1086 | if (after(start_seq, tp->snd_una)) |
| @@ -1204,8 +1204,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, | |||
| 1204 | * which may fail and creates some hassle (caller must handle error case | 1204 | * which may fail and creates some hassle (caller must handle error case |
| 1205 | * returns). | 1205 | * returns). |
| 1206 | */ | 1206 | */ |
| 1207 | int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, | 1207 | static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, |
| 1208 | u32 start_seq, u32 end_seq) | 1208 | u32 start_seq, u32 end_seq) |
| 1209 | { | 1209 | { |
| 1210 | int in_sack, err; | 1210 | int in_sack, err; |
| 1211 | unsigned int pkt_len; | 1211 | unsigned int pkt_len; |
| @@ -1248,6 +1248,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1248 | int cached_fack_count; | 1248 | int cached_fack_count; |
| 1249 | int i; | 1249 | int i; |
| 1250 | int first_sack_index; | 1250 | int first_sack_index; |
| 1251 | int force_one_sack; | ||
| 1251 | 1252 | ||
| 1252 | if (!tp->sacked_out) { | 1253 | if (!tp->sacked_out) { |
| 1253 | if (WARN_ON(tp->fackets_out)) | 1254 | if (WARN_ON(tp->fackets_out)) |
| @@ -1272,18 +1273,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1272 | * if the only SACK change is the increase of the end_seq of | 1273 | * if the only SACK change is the increase of the end_seq of |
| 1273 | * the first block then only apply that SACK block | 1274 | * the first block then only apply that SACK block |
| 1274 | * and use retrans queue hinting otherwise slowpath */ | 1275 | * and use retrans queue hinting otherwise slowpath */ |
| 1275 | flag = 1; | 1276 | force_one_sack = 1; |
| 1276 | for (i = 0; i < num_sacks; i++) { | 1277 | for (i = 0; i < num_sacks; i++) { |
| 1277 | __be32 start_seq = sp[i].start_seq; | 1278 | __be32 start_seq = sp[i].start_seq; |
| 1278 | __be32 end_seq = sp[i].end_seq; | 1279 | __be32 end_seq = sp[i].end_seq; |
| 1279 | 1280 | ||
| 1280 | if (i == 0) { | 1281 | if (i == 0) { |
| 1281 | if (tp->recv_sack_cache[i].start_seq != start_seq) | 1282 | if (tp->recv_sack_cache[i].start_seq != start_seq) |
| 1282 | flag = 0; | 1283 | force_one_sack = 0; |
| 1283 | } else { | 1284 | } else { |
| 1284 | if ((tp->recv_sack_cache[i].start_seq != start_seq) || | 1285 | if ((tp->recv_sack_cache[i].start_seq != start_seq) || |
| 1285 | (tp->recv_sack_cache[i].end_seq != end_seq)) | 1286 | (tp->recv_sack_cache[i].end_seq != end_seq)) |
| 1286 | flag = 0; | 1287 | force_one_sack = 0; |
| 1287 | } | 1288 | } |
| 1288 | tp->recv_sack_cache[i].start_seq = start_seq; | 1289 | tp->recv_sack_cache[i].start_seq = start_seq; |
| 1289 | tp->recv_sack_cache[i].end_seq = end_seq; | 1290 | tp->recv_sack_cache[i].end_seq = end_seq; |
| @@ -1295,7 +1296,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1295 | } | 1296 | } |
| 1296 | 1297 | ||
| 1297 | first_sack_index = 0; | 1298 | first_sack_index = 0; |
| 1298 | if (flag) | 1299 | if (force_one_sack) |
| 1299 | num_sacks = 1; | 1300 | num_sacks = 1; |
| 1300 | else { | 1301 | else { |
| 1301 | int j; | 1302 | int j; |
| @@ -1321,9 +1322,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1321 | } | 1322 | } |
| 1322 | } | 1323 | } |
| 1323 | 1324 | ||
| 1324 | /* clear flag as used for different purpose in following code */ | ||
| 1325 | flag = 0; | ||
| 1326 | |||
| 1327 | /* Use SACK fastpath hint if valid */ | 1325 | /* Use SACK fastpath hint if valid */ |
| 1328 | cached_skb = tp->fastpath_skb_hint; | 1326 | cached_skb = tp->fastpath_skb_hint; |
| 1329 | cached_fack_count = tp->fastpath_cnt_hint; | 1327 | cached_fack_count = tp->fastpath_cnt_hint; |
| @@ -1332,12 +1330,15 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1332 | cached_fack_count = 0; | 1330 | cached_fack_count = 0; |
| 1333 | } | 1331 | } |
| 1334 | 1332 | ||
| 1335 | for (i=0; i<num_sacks; i++, sp++) { | 1333 | for (i = 0; i < num_sacks; i++) { |
| 1336 | struct sk_buff *skb; | 1334 | struct sk_buff *skb; |
| 1337 | __u32 start_seq = ntohl(sp->start_seq); | 1335 | __u32 start_seq = ntohl(sp->start_seq); |
| 1338 | __u32 end_seq = ntohl(sp->end_seq); | 1336 | __u32 end_seq = ntohl(sp->end_seq); |
| 1339 | int fack_count; | 1337 | int fack_count; |
| 1340 | int dup_sack = (found_dup_sack && (i == first_sack_index)); | 1338 | int dup_sack = (found_dup_sack && (i == first_sack_index)); |
| 1339 | int next_dup = (found_dup_sack && (i+1 == first_sack_index)); | ||
| 1340 | |||
| 1341 | sp++; | ||
| 1341 | 1342 | ||
| 1342 | if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) { | 1343 | if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) { |
| 1343 | if (dup_sack) { | 1344 | if (dup_sack) { |
| @@ -1363,7 +1364,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1363 | flag |= FLAG_DATA_LOST; | 1364 | flag |= FLAG_DATA_LOST; |
| 1364 | 1365 | ||
| 1365 | tcp_for_write_queue_from(skb, sk) { | 1366 | tcp_for_write_queue_from(skb, sk) { |
| 1366 | int in_sack; | 1367 | int in_sack = 0; |
| 1367 | u8 sacked; | 1368 | u8 sacked; |
| 1368 | 1369 | ||
| 1369 | if (skb == tcp_send_head(sk)) | 1370 | if (skb == tcp_send_head(sk)) |
| @@ -1382,7 +1383,23 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1382 | if (!before(TCP_SKB_CB(skb)->seq, end_seq)) | 1383 | if (!before(TCP_SKB_CB(skb)->seq, end_seq)) |
| 1383 | break; | 1384 | break; |
| 1384 | 1385 | ||
| 1385 | in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); | 1386 | dup_sack = (found_dup_sack && (i == first_sack_index)); |
| 1387 | |||
| 1388 | /* Due to sorting DSACK may reside within this SACK block! */ | ||
| 1389 | if (next_dup) { | ||
| 1390 | u32 dup_start = ntohl(sp->start_seq); | ||
| 1391 | u32 dup_end = ntohl(sp->end_seq); | ||
| 1392 | |||
| 1393 | if (before(TCP_SKB_CB(skb)->seq, dup_end)) { | ||
| 1394 | in_sack = tcp_match_skb_to_sack(sk, skb, dup_start, dup_end); | ||
| 1395 | if (in_sack > 0) | ||
| 1396 | dup_sack = 1; | ||
| 1397 | } | ||
| 1398 | } | ||
| 1399 | |||
| 1400 | /* DSACK info lost if out-of-mem, try SACK still */ | ||
| 1401 | if (in_sack <= 0) | ||
| 1402 | in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); | ||
| 1386 | if (in_sack < 0) | 1403 | if (in_sack < 0) |
| 1387 | break; | 1404 | break; |
| 1388 | 1405 | ||
| @@ -1615,7 +1632,7 @@ void tcp_enter_frto(struct sock *sk) | |||
| 1615 | !icsk->icsk_retransmits)) { | 1632 | !icsk->icsk_retransmits)) { |
| 1616 | tp->prior_ssthresh = tcp_current_ssthresh(sk); | 1633 | tp->prior_ssthresh = tcp_current_ssthresh(sk); |
| 1617 | /* Our state is too optimistic in ssthresh() call because cwnd | 1634 | /* Our state is too optimistic in ssthresh() call because cwnd |
| 1618 | * is not reduced until tcp_enter_frto_loss() when previous FRTO | 1635 | * is not reduced until tcp_enter_frto_loss() when previous F-RTO |
| 1619 | * recovery has not yet completed. Pattern would be this: RTO, | 1636 | * recovery has not yet completed. Pattern would be this: RTO, |
| 1620 | * Cumulative ACK, RTO (2xRTO for the same segment does not end | 1637 | * Cumulative ACK, RTO (2xRTO for the same segment does not end |
| 1621 | * up here twice). | 1638 | * up here twice). |
| @@ -1801,7 +1818,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
| 1801 | tcp_set_ca_state(sk, TCP_CA_Loss); | 1818 | tcp_set_ca_state(sk, TCP_CA_Loss); |
| 1802 | tp->high_seq = tp->snd_nxt; | 1819 | tp->high_seq = tp->snd_nxt; |
| 1803 | TCP_ECN_queue_cwr(tp); | 1820 | TCP_ECN_queue_cwr(tp); |
| 1804 | /* Abort FRTO algorithm if one is in progress */ | 1821 | /* Abort F-RTO algorithm if one is in progress */ |
| 1805 | tp->frto_counter = 0; | 1822 | tp->frto_counter = 0; |
| 1806 | } | 1823 | } |
| 1807 | 1824 | ||
| @@ -1946,7 +1963,7 @@ static int tcp_time_to_recover(struct sock *sk) | |||
| 1946 | struct tcp_sock *tp = tcp_sk(sk); | 1963 | struct tcp_sock *tp = tcp_sk(sk); |
| 1947 | __u32 packets_out; | 1964 | __u32 packets_out; |
| 1948 | 1965 | ||
| 1949 | /* Do not perform any recovery during FRTO algorithm */ | 1966 | /* Do not perform any recovery during F-RTO algorithm */ |
| 1950 | if (tp->frto_counter) | 1967 | if (tp->frto_counter) |
| 1951 | return 0; | 1968 | return 0; |
| 1952 | 1969 | ||
| @@ -2061,7 +2078,7 @@ static void tcp_update_scoreboard(struct sock *sk) | |||
| 2061 | if (!tcp_skb_timedout(sk, skb)) | 2078 | if (!tcp_skb_timedout(sk, skb)) |
| 2062 | break; | 2079 | break; |
| 2063 | 2080 | ||
| 2064 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { | 2081 | if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { |
| 2065 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 2082 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
| 2066 | tp->lost_out += tcp_skb_pcount(skb); | 2083 | tp->lost_out += tcp_skb_pcount(skb); |
| 2067 | tcp_verify_retransmit_hint(tp, skb); | 2084 | tcp_verify_retransmit_hint(tp, skb); |
| @@ -2962,7 +2979,7 @@ static int tcp_process_frto(struct sock *sk, int flag) | |||
| 2962 | } | 2979 | } |
| 2963 | 2980 | ||
| 2964 | if (tp->frto_counter == 1) { | 2981 | if (tp->frto_counter == 1) { |
| 2965 | /* Sending of the next skb must be allowed or no FRTO */ | 2982 | /* Sending of the next skb must be allowed or no F-RTO */ |
| 2966 | if (!tcp_send_head(sk) || | 2983 | if (!tcp_send_head(sk) || |
| 2967 | after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, | 2984 | after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, |
| 2968 | tp->snd_una + tp->snd_wnd)) { | 2985 | tp->snd_una + tp->snd_wnd)) { |
| @@ -3909,7 +3926,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
| 3909 | 3926 | ||
| 3910 | while (before(start, end)) { | 3927 | while (before(start, end)) { |
| 3911 | struct sk_buff *nskb; | 3928 | struct sk_buff *nskb; |
| 3912 | int header = skb_headroom(skb); | 3929 | unsigned int header = skb_headroom(skb); |
| 3913 | int copy = SKB_MAX_ORDER(header, 0); | 3930 | int copy = SKB_MAX_ORDER(header, 0); |
| 3914 | 3931 | ||
| 3915 | /* Too big header? This can happen with IPv6. */ | 3932 | /* Too big header? This can happen with IPv6. */ |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 38cf73a56731..e566f3c67677 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -858,16 +858,16 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
| 858 | u8 *newkey, u8 newkeylen) | 858 | u8 *newkey, u8 newkeylen) |
| 859 | { | 859 | { |
| 860 | /* Add Key to the list */ | 860 | /* Add Key to the list */ |
| 861 | struct tcp4_md5sig_key *key; | 861 | struct tcp_md5sig_key *key; |
| 862 | struct tcp_sock *tp = tcp_sk(sk); | 862 | struct tcp_sock *tp = tcp_sk(sk); |
| 863 | struct tcp4_md5sig_key *keys; | 863 | struct tcp4_md5sig_key *keys; |
| 864 | 864 | ||
| 865 | key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); | 865 | key = tcp_v4_md5_do_lookup(sk, addr); |
| 866 | if (key) { | 866 | if (key) { |
| 867 | /* Pre-existing entry - just update that one. */ | 867 | /* Pre-existing entry - just update that one. */ |
| 868 | kfree(key->base.key); | 868 | kfree(key->key); |
| 869 | key->base.key = newkey; | 869 | key->key = newkey; |
| 870 | key->base.keylen = newkeylen; | 870 | key->keylen = newkeylen; |
| 871 | } else { | 871 | } else { |
| 872 | struct tcp_md5sig_info *md5sig; | 872 | struct tcp_md5sig_info *md5sig; |
| 873 | 873 | ||
| @@ -1055,6 +1055,9 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
| 1055 | bp->pad = 0; | 1055 | bp->pad = 0; |
| 1056 | bp->protocol = protocol; | 1056 | bp->protocol = protocol; |
| 1057 | bp->len = htons(tcplen); | 1057 | bp->len = htons(tcplen); |
| 1058 | |||
| 1059 | sg_init_table(sg, 4); | ||
| 1060 | |||
| 1058 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | 1061 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); |
| 1059 | nbytes += sizeof(*bp); | 1062 | nbytes += sizeof(*bp); |
| 1060 | 1063 | ||
| @@ -1080,6 +1083,8 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
| 1080 | sg_set_buf(&sg[block++], key->key, key->keylen); | 1083 | sg_set_buf(&sg[block++], key->key, key->keylen); |
| 1081 | nbytes += key->keylen; | 1084 | nbytes += key->keylen; |
| 1082 | 1085 | ||
| 1086 | sg_mark_end(&sg[block - 1]); | ||
| 1087 | |||
| 1083 | /* Now store the Hash into the packet */ | 1088 | /* Now store the Hash into the packet */ |
| 1084 | err = crypto_hash_init(desc); | 1089 | err = crypto_hash_init(desc); |
| 1085 | if (err) | 1090 | if (err) |
| @@ -2044,8 +2049,9 @@ static void *established_get_first(struct seq_file *seq) | |||
| 2044 | struct sock *sk; | 2049 | struct sock *sk; |
| 2045 | struct hlist_node *node; | 2050 | struct hlist_node *node; |
| 2046 | struct inet_timewait_sock *tw; | 2051 | struct inet_timewait_sock *tw; |
| 2052 | rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); | ||
| 2047 | 2053 | ||
| 2048 | read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock); | 2054 | read_lock_bh(lock); |
| 2049 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { | 2055 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { |
| 2050 | if (sk->sk_family != st->family) { | 2056 | if (sk->sk_family != st->family) { |
| 2051 | continue; | 2057 | continue; |
| @@ -2062,7 +2068,7 @@ static void *established_get_first(struct seq_file *seq) | |||
| 2062 | rc = tw; | 2068 | rc = tw; |
| 2063 | goto out; | 2069 | goto out; |
| 2064 | } | 2070 | } |
| 2065 | read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock); | 2071 | read_unlock_bh(lock); |
| 2066 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2072 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
| 2067 | } | 2073 | } |
| 2068 | out: | 2074 | out: |
| @@ -2089,11 +2095,11 @@ get_tw: | |||
| 2089 | cur = tw; | 2095 | cur = tw; |
| 2090 | goto out; | 2096 | goto out; |
| 2091 | } | 2097 | } |
| 2092 | read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock); | 2098 | read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); |
| 2093 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2099 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
| 2094 | 2100 | ||
| 2095 | if (++st->bucket < tcp_hashinfo.ehash_size) { | 2101 | if (++st->bucket < tcp_hashinfo.ehash_size) { |
| 2096 | read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock); | 2102 | read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); |
| 2097 | sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); | 2103 | sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); |
| 2098 | } else { | 2104 | } else { |
| 2099 | cur = NULL; | 2105 | cur = NULL; |
| @@ -2201,7 +2207,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) | |||
| 2201 | case TCP_SEQ_STATE_TIME_WAIT: | 2207 | case TCP_SEQ_STATE_TIME_WAIT: |
| 2202 | case TCP_SEQ_STATE_ESTABLISHED: | 2208 | case TCP_SEQ_STATE_ESTABLISHED: |
| 2203 | if (v) | 2209 | if (v) |
| 2204 | read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock); | 2210 | read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); |
| 2205 | break; | 2211 | break; |
| 2206 | } | 2212 | } |
| 2207 | } | 2213 | } |
| @@ -2412,6 +2418,8 @@ void tcp4_proc_exit(void) | |||
| 2412 | } | 2418 | } |
| 2413 | #endif /* CONFIG_PROC_FS */ | 2419 | #endif /* CONFIG_PROC_FS */ |
| 2414 | 2420 | ||
| 2421 | DEFINE_PROTO_INUSE(tcp) | ||
| 2422 | |||
| 2415 | struct proto tcp_prot = { | 2423 | struct proto tcp_prot = { |
| 2416 | .name = "TCP", | 2424 | .name = "TCP", |
| 2417 | .owner = THIS_MODULE, | 2425 | .owner = THIS_MODULE, |
| @@ -2446,6 +2454,7 @@ struct proto tcp_prot = { | |||
| 2446 | .compat_setsockopt = compat_tcp_setsockopt, | 2454 | .compat_setsockopt = compat_tcp_setsockopt, |
| 2447 | .compat_getsockopt = compat_tcp_getsockopt, | 2455 | .compat_getsockopt = compat_tcp_getsockopt, |
| 2448 | #endif | 2456 | #endif |
| 2457 | REF_PROTO_INUSE(tcp) | ||
| 2449 | }; | 2458 | }; |
| 2450 | 2459 | ||
| 2451 | void __init tcp_v4_init(struct net_proto_family *ops) | 2460 | void __init tcp_v4_init(struct net_proto_family *ops) |
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index b49dedcda52d..007304e99842 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
| @@ -266,26 +266,25 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, | |||
| 266 | */ | 266 | */ |
| 267 | diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; | 267 | diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; |
| 268 | 268 | ||
| 269 | if (tp->snd_cwnd <= tp->snd_ssthresh) { | 269 | if (diff > gamma && tp->snd_ssthresh > 2 ) { |
| 270 | /* Slow start. */ | 270 | /* Going too fast. Time to slow down |
| 271 | if (diff > gamma) { | 271 | * and switch to congestion avoidance. |
| 272 | /* Going too fast. Time to slow down | 272 | */ |
| 273 | * and switch to congestion avoidance. | 273 | tp->snd_ssthresh = 2; |
| 274 | */ | 274 | |
| 275 | tp->snd_ssthresh = 2; | 275 | /* Set cwnd to match the actual rate |
| 276 | 276 | * exactly: | |
| 277 | /* Set cwnd to match the actual rate | 277 | * cwnd = (actual rate) * baseRTT |
| 278 | * exactly: | 278 | * Then we add 1 because the integer |
| 279 | * cwnd = (actual rate) * baseRTT | 279 | * truncation robs us of full link |
| 280 | * Then we add 1 because the integer | 280 | * utilization. |
| 281 | * truncation robs us of full link | 281 | */ |
| 282 | * utilization. | 282 | tp->snd_cwnd = min(tp->snd_cwnd, |
| 283 | */ | 283 | (target_cwnd >> |
| 284 | tp->snd_cwnd = min(tp->snd_cwnd, | 284 | V_PARAM_SHIFT)+1); |
| 285 | (target_cwnd >> | ||
| 286 | V_PARAM_SHIFT)+1); | ||
| 287 | 285 | ||
| 288 | } | 286 | } else if (tp->snd_cwnd <= tp->snd_ssthresh) { |
| 287 | /* Slow start. */ | ||
| 289 | tcp_slow_start(tp); | 288 | tcp_slow_start(tp); |
| 290 | } else { | 289 | } else { |
| 291 | /* Congestion avoidance. */ | 290 | /* Congestion avoidance. */ |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 35d2b0e9e10b..03c400ca14c5 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -1152,7 +1152,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
| 1152 | return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); | 1152 | return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); |
| 1153 | 1153 | ||
| 1154 | sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, | 1154 | sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, |
| 1155 | skb->dev->ifindex, udptable ); | 1155 | inet_iif(skb), udptable); |
| 1156 | 1156 | ||
| 1157 | if (sk != NULL) { | 1157 | if (sk != NULL) { |
| 1158 | int ret = udp_queue_rcv_skb(sk, skb); | 1158 | int ret = udp_queue_rcv_skb(sk, skb); |
| @@ -1430,6 +1430,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
| 1430 | 1430 | ||
| 1431 | } | 1431 | } |
| 1432 | 1432 | ||
| 1433 | DEFINE_PROTO_INUSE(udp) | ||
| 1434 | |||
| 1433 | struct proto udp_prot = { | 1435 | struct proto udp_prot = { |
| 1434 | .name = "UDP", | 1436 | .name = "UDP", |
| 1435 | .owner = THIS_MODULE, | 1437 | .owner = THIS_MODULE, |
| @@ -1452,6 +1454,7 @@ struct proto udp_prot = { | |||
| 1452 | .compat_setsockopt = compat_udp_setsockopt, | 1454 | .compat_setsockopt = compat_udp_setsockopt, |
| 1453 | .compat_getsockopt = compat_udp_getsockopt, | 1455 | .compat_getsockopt = compat_udp_getsockopt, |
| 1454 | #endif | 1456 | #endif |
| 1457 | REF_PROTO_INUSE(udp) | ||
| 1455 | }; | 1458 | }; |
| 1456 | 1459 | ||
| 1457 | /* ------------------------------------------------------------------------ */ | 1460 | /* ------------------------------------------------------------------------ */ |
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 94977205abb4..f5baeb3e8b85 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c | |||
| @@ -44,6 +44,8 @@ static struct net_protocol udplite_protocol = { | |||
| 44 | .no_policy = 1, | 44 | .no_policy = 1, |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | DEFINE_PROTO_INUSE(udplite) | ||
| 48 | |||
| 47 | struct proto udplite_prot = { | 49 | struct proto udplite_prot = { |
| 48 | .name = "UDP-Lite", | 50 | .name = "UDP-Lite", |
| 49 | .owner = THIS_MODULE, | 51 | .owner = THIS_MODULE, |
| @@ -67,6 +69,7 @@ struct proto udplite_prot = { | |||
| 67 | .compat_setsockopt = compat_udp_setsockopt, | 69 | .compat_setsockopt = compat_udp_setsockopt, |
| 68 | .compat_getsockopt = compat_udp_getsockopt, | 70 | .compat_getsockopt = compat_udp_getsockopt, |
| 69 | #endif | 71 | #endif |
| 72 | REF_PROTO_INUSE(udplite) | ||
| 70 | }; | 73 | }; |
| 71 | 74 | ||
| 72 | static struct inet_protosw udplite4_protosw = { | 75 | static struct inet_protosw udplite4_protosw = { |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 348bd8d06112..1bd8d818f8e9 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -4288,8 +4288,4 @@ void __exit addrconf_cleanup(void) | |||
| 4288 | del_timer(&addr_chk_timer); | 4288 | del_timer(&addr_chk_timer); |
| 4289 | 4289 | ||
| 4290 | rtnl_unlock(); | 4290 | rtnl_unlock(); |
| 4291 | |||
| 4292 | #ifdef CONFIG_PROC_FS | ||
| 4293 | proc_net_remove(&init_net, "if_inet6"); | ||
| 4294 | #endif | ||
| 4295 | } | 4291 | } |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 1b1caf3aa1c1..ecbd38894fdd 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
| @@ -162,7 +162,7 @@ lookup_protocol: | |||
| 162 | BUG_TRAP(answer_prot->slab != NULL); | 162 | BUG_TRAP(answer_prot->slab != NULL); |
| 163 | 163 | ||
| 164 | err = -ENOBUFS; | 164 | err = -ENOBUFS; |
| 165 | sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, 1); | 165 | sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot); |
| 166 | if (sk == NULL) | 166 | if (sk == NULL) |
| 167 | goto out; | 167 | goto out; |
| 168 | 168 | ||
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 67cd06613a25..4eaf55072b1b 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
| @@ -35,7 +35,6 @@ | |||
| 35 | #include <net/ipv6.h> | 35 | #include <net/ipv6.h> |
| 36 | #include <net/protocol.h> | 36 | #include <net/protocol.h> |
| 37 | #include <net/xfrm.h> | 37 | #include <net/xfrm.h> |
| 38 | #include <asm/scatterlist.h> | ||
| 39 | 38 | ||
| 40 | static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) | 39 | static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) |
| 41 | { | 40 | { |
| @@ -483,6 +482,7 @@ static int ah6_init_state(struct xfrm_state *x) | |||
| 483 | break; | 482 | break; |
| 484 | case XFRM_MODE_TUNNEL: | 483 | case XFRM_MODE_TUNNEL: |
| 485 | x->props.header_len += sizeof(struct ipv6hdr); | 484 | x->props.header_len += sizeof(struct ipv6hdr); |
| 485 | break; | ||
| 486 | default: | 486 | default: |
| 487 | goto error; | 487 | goto error; |
| 488 | } | 488 | } |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index b0715432e454..7db66f10e00d 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | #include <net/ip.h> | 29 | #include <net/ip.h> |
| 30 | #include <net/xfrm.h> | 30 | #include <net/xfrm.h> |
| 31 | #include <net/esp.h> | 31 | #include <net/esp.h> |
| 32 | #include <asm/scatterlist.h> | 32 | #include <linux/scatterlist.h> |
| 33 | #include <linux/crypto.h> | 33 | #include <linux/crypto.h> |
| 34 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
| 35 | #include <linux/pfkeyv2.h> | 35 | #include <linux/pfkeyv2.h> |
| @@ -109,7 +109,11 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
| 109 | if (!sg) | 109 | if (!sg) |
| 110 | goto unlock; | 110 | goto unlock; |
| 111 | } | 111 | } |
| 112 | skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); | 112 | sg_init_table(sg, nfrags); |
| 113 | skb_to_sgvec(skb, sg, | ||
| 114 | esph->enc_data + | ||
| 115 | esp->conf.ivlen - | ||
| 116 | skb->data, clen); | ||
| 113 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); | 117 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); |
| 114 | if (unlikely(sg != &esp->sgbuf[0])) | 118 | if (unlikely(sg != &esp->sgbuf[0])) |
| 115 | kfree(sg); | 119 | kfree(sg); |
| @@ -205,7 +209,10 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
| 205 | goto out; | 209 | goto out; |
| 206 | } | 210 | } |
| 207 | } | 211 | } |
| 208 | skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); | 212 | sg_init_table(sg, nfrags); |
| 213 | skb_to_sgvec(skb, sg, | ||
| 214 | sizeof(*esph) + esp->conf.ivlen, | ||
| 215 | elen); | ||
| 209 | ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); | 216 | ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); |
| 210 | if (unlikely(sg != &esp->sgbuf[0])) | 217 | if (unlikely(sg != &esp->sgbuf[0])) |
| 211 | kfree(sg); | 218 | kfree(sg); |
| @@ -360,6 +367,7 @@ static int esp6_init_state(struct xfrm_state *x) | |||
| 360 | break; | 367 | break; |
| 361 | case XFRM_MODE_TUNNEL: | 368 | case XFRM_MODE_TUNNEL: |
| 362 | x->props.header_len += sizeof(struct ipv6hdr); | 369 | x->props.header_len += sizeof(struct ipv6hdr); |
| 370 | break; | ||
| 363 | default: | 371 | default: |
| 364 | goto error; | 372 | goto error; |
| 365 | } | 373 | } |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index d6f1026f1943..adc73adadfae 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
| @@ -37,9 +37,8 @@ void __inet6_hash(struct inet_hashinfo *hashinfo, | |||
| 37 | } else { | 37 | } else { |
| 38 | unsigned int hash; | 38 | unsigned int hash; |
| 39 | sk->sk_hash = hash = inet6_sk_ehashfn(sk); | 39 | sk->sk_hash = hash = inet6_sk_ehashfn(sk); |
| 40 | hash &= (hashinfo->ehash_size - 1); | 40 | list = &inet_ehash_bucket(hashinfo, hash)->chain; |
| 41 | list = &hashinfo->ehash[hash].chain; | 41 | lock = inet_ehash_lockp(hashinfo, hash); |
| 42 | lock = &hashinfo->ehash[hash].lock; | ||
| 43 | write_lock(lock); | 42 | write_lock(lock); |
| 44 | } | 43 | } |
| 45 | 44 | ||
| @@ -70,9 +69,10 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo, | |||
| 70 | */ | 69 | */ |
| 71 | unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport); | 70 | unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport); |
| 72 | struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); | 71 | struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); |
| 72 | rwlock_t *lock = inet_ehash_lockp(hashinfo, hash); | ||
| 73 | 73 | ||
| 74 | prefetch(head->chain.first); | 74 | prefetch(head->chain.first); |
| 75 | read_lock(&head->lock); | 75 | read_lock(lock); |
| 76 | sk_for_each(sk, node, &head->chain) { | 76 | sk_for_each(sk, node, &head->chain) { |
| 77 | /* For IPV6 do the cheaper port and family tests first. */ | 77 | /* For IPV6 do the cheaper port and family tests first. */ |
| 78 | if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif)) | 78 | if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif)) |
| @@ -92,12 +92,12 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo, | |||
| 92 | goto hit; | 92 | goto hit; |
| 93 | } | 93 | } |
| 94 | } | 94 | } |
| 95 | read_unlock(&head->lock); | 95 | read_unlock(lock); |
| 96 | return NULL; | 96 | return NULL; |
| 97 | 97 | ||
| 98 | hit: | 98 | hit: |
| 99 | sock_hold(sk); | 99 | sock_hold(sk); |
| 100 | read_unlock(&head->lock); | 100 | read_unlock(lock); |
| 101 | return sk; | 101 | return sk; |
| 102 | } | 102 | } |
| 103 | EXPORT_SYMBOL(__inet6_lookup_established); | 103 | EXPORT_SYMBOL(__inet6_lookup_established); |
| @@ -175,12 +175,13 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, | |||
| 175 | const unsigned int hash = inet6_ehashfn(daddr, lport, saddr, | 175 | const unsigned int hash = inet6_ehashfn(daddr, lport, saddr, |
| 176 | inet->dport); | 176 | inet->dport); |
| 177 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); | 177 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); |
| 178 | rwlock_t *lock = inet_ehash_lockp(hinfo, hash); | ||
| 178 | struct sock *sk2; | 179 | struct sock *sk2; |
| 179 | const struct hlist_node *node; | 180 | const struct hlist_node *node; |
| 180 | struct inet_timewait_sock *tw; | 181 | struct inet_timewait_sock *tw; |
| 181 | 182 | ||
| 182 | prefetch(head->chain.first); | 183 | prefetch(head->chain.first); |
| 183 | write_lock(&head->lock); | 184 | write_lock(lock); |
| 184 | 185 | ||
| 185 | /* Check TIME-WAIT sockets first. */ | 186 | /* Check TIME-WAIT sockets first. */ |
| 186 | sk_for_each(sk2, node, &head->twchain) { | 187 | sk_for_each(sk2, node, &head->twchain) { |
| @@ -216,7 +217,7 @@ unique: | |||
| 216 | __sk_add_node(sk, &head->chain); | 217 | __sk_add_node(sk, &head->chain); |
| 217 | sk->sk_hash = hash; | 218 | sk->sk_hash = hash; |
| 218 | sock_prot_inc_use(sk->sk_prot); | 219 | sock_prot_inc_use(sk->sk_prot); |
| 219 | write_unlock(&head->lock); | 220 | write_unlock(lock); |
| 220 | 221 | ||
| 221 | if (twp != NULL) { | 222 | if (twp != NULL) { |
| 222 | *twp = tw; | 223 | *twp = tw; |
| @@ -231,7 +232,7 @@ unique: | |||
| 231 | return 0; | 232 | return 0; |
| 232 | 233 | ||
| 233 | not_unique: | 234 | not_unique: |
| 234 | write_unlock(&head->lock); | 235 | write_unlock(lock); |
| 235 | return -EADDRNOTAVAIL; | 236 | return -EADDRNOTAVAIL; |
| 236 | } | 237 | } |
| 237 | 238 | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 13565dfb1b45..86e1835ce4e4 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -171,7 +171,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
| 171 | u32 mtu; | 171 | u32 mtu; |
| 172 | 172 | ||
| 173 | if (opt) { | 173 | if (opt) { |
| 174 | int head_room; | 174 | unsigned int head_room; |
| 175 | 175 | ||
| 176 | /* First: exthdrs may take lots of space (~8K for now) | 176 | /* First: exthdrs may take lots of space (~8K for now) |
| 177 | MAX_HEADER is not enough. | 177 | MAX_HEADER is not enough. |
| @@ -1339,6 +1339,19 @@ error: | |||
| 1339 | return err; | 1339 | return err; |
| 1340 | } | 1340 | } |
| 1341 | 1341 | ||
| 1342 | static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) | ||
| 1343 | { | ||
| 1344 | inet->cork.flags &= ~IPCORK_OPT; | ||
| 1345 | kfree(np->cork.opt); | ||
| 1346 | np->cork.opt = NULL; | ||
| 1347 | if (np->cork.rt) { | ||
| 1348 | dst_release(&np->cork.rt->u.dst); | ||
| 1349 | np->cork.rt = NULL; | ||
| 1350 | inet->cork.flags &= ~IPCORK_ALLFRAG; | ||
| 1351 | } | ||
| 1352 | memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); | ||
| 1353 | } | ||
| 1354 | |||
| 1342 | int ip6_push_pending_frames(struct sock *sk) | 1355 | int ip6_push_pending_frames(struct sock *sk) |
| 1343 | { | 1356 | { |
| 1344 | struct sk_buff *skb, *tmp_skb; | 1357 | struct sk_buff *skb, *tmp_skb; |
| @@ -1415,15 +1428,7 @@ int ip6_push_pending_frames(struct sock *sk) | |||
| 1415 | } | 1428 | } |
| 1416 | 1429 | ||
| 1417 | out: | 1430 | out: |
| 1418 | inet->cork.flags &= ~IPCORK_OPT; | 1431 | ip6_cork_release(inet, np); |
| 1419 | kfree(np->cork.opt); | ||
| 1420 | np->cork.opt = NULL; | ||
| 1421 | if (np->cork.rt) { | ||
| 1422 | dst_release(&np->cork.rt->u.dst); | ||
| 1423 | np->cork.rt = NULL; | ||
| 1424 | inet->cork.flags &= ~IPCORK_ALLFRAG; | ||
| 1425 | } | ||
| 1426 | memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); | ||
| 1427 | return err; | 1432 | return err; |
| 1428 | error: | 1433 | error: |
| 1429 | goto out; | 1434 | goto out; |
| @@ -1431,8 +1436,6 @@ error: | |||
| 1431 | 1436 | ||
| 1432 | void ip6_flush_pending_frames(struct sock *sk) | 1437 | void ip6_flush_pending_frames(struct sock *sk) |
| 1433 | { | 1438 | { |
| 1434 | struct inet_sock *inet = inet_sk(sk); | ||
| 1435 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
| 1436 | struct sk_buff *skb; | 1439 | struct sk_buff *skb; |
| 1437 | 1440 | ||
| 1438 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { | 1441 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { |
| @@ -1442,14 +1445,5 @@ void ip6_flush_pending_frames(struct sock *sk) | |||
| 1442 | kfree_skb(skb); | 1445 | kfree_skb(skb); |
| 1443 | } | 1446 | } |
| 1444 | 1447 | ||
| 1445 | inet->cork.flags &= ~IPCORK_OPT; | 1448 | ip6_cork_release(inet_sk(sk), inet6_sk(sk)); |
| 1446 | |||
| 1447 | kfree(np->cork.opt); | ||
| 1448 | np->cork.opt = NULL; | ||
| 1449 | if (np->cork.rt) { | ||
| 1450 | dst_release(&np->cork.rt->u.dst); | ||
| 1451 | np->cork.rt = NULL; | ||
| 1452 | inet->cork.flags &= ~IPCORK_ALLFRAG; | ||
| 1453 | } | ||
| 1454 | memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); | ||
| 1455 | } | 1449 | } |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 2320cc27ff9e..5383b33db8ca 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -838,7 +838,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
| 838 | struct dst_entry *dst; | 838 | struct dst_entry *dst; |
| 839 | struct net_device *tdev; | 839 | struct net_device *tdev; |
| 840 | int mtu; | 840 | int mtu; |
| 841 | int max_headroom = sizeof(struct ipv6hdr); | 841 | unsigned int max_headroom = sizeof(struct ipv6hdr); |
| 842 | u8 proto; | 842 | u8 proto; |
| 843 | int err = -1; | 843 | int err = -1; |
| 844 | int pkt_len; | 844 | int pkt_len; |
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 80ef2a1d39fd..0cd4056f9127 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c | |||
| @@ -34,9 +34,9 @@ | |||
| 34 | #include <net/ip.h> | 34 | #include <net/ip.h> |
| 35 | #include <net/xfrm.h> | 35 | #include <net/xfrm.h> |
| 36 | #include <net/ipcomp.h> | 36 | #include <net/ipcomp.h> |
| 37 | #include <asm/scatterlist.h> | ||
| 38 | #include <asm/semaphore.h> | 37 | #include <asm/semaphore.h> |
| 39 | #include <linux/crypto.h> | 38 | #include <linux/crypto.h> |
| 39 | #include <linux/err.h> | ||
| 40 | #include <linux/pfkeyv2.h> | 40 | #include <linux/pfkeyv2.h> |
| 41 | #include <linux/random.h> | 41 | #include <linux/random.h> |
| 42 | #include <linux/percpu.h> | 42 | #include <linux/percpu.h> |
| @@ -359,7 +359,7 @@ static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name) | |||
| 359 | for_each_possible_cpu(cpu) { | 359 | for_each_possible_cpu(cpu) { |
| 360 | struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, | 360 | struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, |
| 361 | CRYPTO_ALG_ASYNC); | 361 | CRYPTO_ALG_ASYNC); |
| 362 | if (!tfm) | 362 | if (IS_ERR(tfm)) |
| 363 | goto error; | 363 | goto error; |
| 364 | *per_cpu_ptr(tfms, cpu) = tfm; | 364 | *per_cpu_ptr(tfms, cpu) = tfm; |
| 365 | } | 365 | } |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 20cfc90d5597..36f7dbfb6dbb 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
| @@ -1670,7 +1670,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f | |||
| 1670 | filp, buffer, lenp, ppos); | 1670 | filp, buffer, lenp, ppos); |
| 1671 | 1671 | ||
| 1672 | else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) || | 1672 | else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) || |
| 1673 | (strcmp(ctl->procname, "base_reacable_time_ms") == 0)) | 1673 | (strcmp(ctl->procname, "base_reachable_time_ms") == 0)) |
| 1674 | ret = proc_dointvec_ms_jiffies(ctl, write, | 1674 | ret = proc_dointvec_ms_jiffies(ctl, write, |
| 1675 | filp, buffer, lenp, ppos); | 1675 | filp, buffer, lenp, ppos); |
| 1676 | else | 1676 | else |
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index 4513eab77397..e789ec44d23b 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile | |||
| @@ -4,25 +4,29 @@ | |||
| 4 | 4 | ||
| 5 | # Link order matters here. | 5 | # Link order matters here. |
| 6 | obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o | 6 | obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o |
| 7 | obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o | ||
| 8 | obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o | ||
| 9 | obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o | ||
| 10 | obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o | ||
| 11 | obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o | ||
| 12 | obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o | ||
| 13 | obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o | ||
| 14 | obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o | 7 | obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o |
| 15 | obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o | 8 | obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o |
| 16 | obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o | ||
| 17 | obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o | 9 | obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o |
| 18 | obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o | ||
| 19 | obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o | 10 | obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o |
| 20 | obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o | ||
| 21 | obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o | ||
| 22 | obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o | ||
| 23 | 11 | ||
| 24 | # objects for l3 independent conntrack | 12 | # objects for l3 independent conntrack |
| 25 | nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o | 13 | nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o |
| 26 | 14 | ||
| 27 | # l3 independent conntrack | 15 | # l3 independent conntrack |
| 28 | obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o | 16 | obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o |
| 17 | |||
| 18 | # matches | ||
| 19 | obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o | ||
| 20 | obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o | ||
| 21 | obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o | ||
| 22 | obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o | ||
| 23 | obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o | ||
| 24 | obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o | ||
| 25 | obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o | ||
| 26 | obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o | ||
| 27 | obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o | ||
| 28 | |||
| 29 | # targets | ||
| 30 | obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o | ||
| 31 | obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o | ||
| 32 | obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o | ||
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 6413a30d9f68..e273605eef85 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
| 24 | #include <linux/sysctl.h> | 24 | #include <linux/sysctl.h> |
| 25 | #include <linux/proc_fs.h> | 25 | #include <linux/proc_fs.h> |
| 26 | #include <linux/seq_file.h> | ||
| 26 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
| 27 | #include <net/net_namespace.h> | 28 | #include <net/net_namespace.h> |
| 28 | #include <net/sock.h> | 29 | #include <net/sock.h> |
| @@ -596,15 +597,11 @@ static ctl_table ipq_root_table[] = { | |||
| 596 | { .ctl_name = 0 } | 597 | { .ctl_name = 0 } |
| 597 | }; | 598 | }; |
| 598 | 599 | ||
| 599 | #ifdef CONFIG_PROC_FS | 600 | static int ip6_queue_show(struct seq_file *m, void *v) |
| 600 | static int | ||
| 601 | ipq_get_info(char *buffer, char **start, off_t offset, int length) | ||
| 602 | { | 601 | { |
| 603 | int len; | ||
| 604 | |||
| 605 | read_lock_bh(&queue_lock); | 602 | read_lock_bh(&queue_lock); |
| 606 | 603 | ||
| 607 | len = sprintf(buffer, | 604 | seq_printf(m, |
| 608 | "Peer PID : %d\n" | 605 | "Peer PID : %d\n" |
| 609 | "Copy mode : %hu\n" | 606 | "Copy mode : %hu\n" |
| 610 | "Copy range : %u\n" | 607 | "Copy range : %u\n" |
| @@ -621,16 +618,21 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length) | |||
| 621 | queue_user_dropped); | 618 | queue_user_dropped); |
| 622 | 619 | ||
| 623 | read_unlock_bh(&queue_lock); | 620 | read_unlock_bh(&queue_lock); |
| 621 | return 0; | ||
| 622 | } | ||
| 624 | 623 | ||
| 625 | *start = buffer + offset; | 624 | static int ip6_queue_open(struct inode *inode, struct file *file) |
| 626 | len -= offset; | 625 | { |
| 627 | if (len > length) | 626 | return single_open(file, ip6_queue_show, NULL); |
| 628 | len = length; | ||
| 629 | else if (len < 0) | ||
| 630 | len = 0; | ||
| 631 | return len; | ||
| 632 | } | 627 | } |
| 633 | #endif /* CONFIG_PROC_FS */ | 628 | |
| 629 | static const struct file_operations ip6_queue_proc_fops = { | ||
| 630 | .open = ip6_queue_open, | ||
| 631 | .read = seq_read, | ||
| 632 | .llseek = seq_lseek, | ||
| 633 | .release = single_release, | ||
| 634 | .owner = THIS_MODULE, | ||
| 635 | }; | ||
| 634 | 636 | ||
| 635 | static struct nf_queue_handler nfqh = { | 637 | static struct nf_queue_handler nfqh = { |
| 636 | .name = "ip6_queue", | 638 | .name = "ip6_queue", |
| @@ -650,10 +652,11 @@ static int __init ip6_queue_init(void) | |||
| 650 | goto cleanup_netlink_notifier; | 652 | goto cleanup_netlink_notifier; |
| 651 | } | 653 | } |
| 652 | 654 | ||
| 653 | proc = proc_net_create(&init_net, IPQ_PROC_FS_NAME, 0, ipq_get_info); | 655 | proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net); |
| 654 | if (proc) | 656 | if (proc) { |
| 655 | proc->owner = THIS_MODULE; | 657 | proc->owner = THIS_MODULE; |
| 656 | else { | 658 | proc->proc_fops = &ip6_queue_proc_fops; |
| 659 | } else { | ||
| 657 | printk(KERN_ERR "ip6_queue: failed to create proc entry\n"); | 660 | printk(KERN_ERR "ip6_queue: failed to create proc entry\n"); |
| 658 | goto cleanup_ipqnl; | 661 | goto cleanup_ipqnl; |
| 659 | } | 662 | } |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index be526ad92543..8631ed7fe8a9 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
| @@ -32,27 +32,16 @@ | |||
| 32 | 32 | ||
| 33 | static struct proc_dir_entry *proc_net_devsnmp6; | 33 | static struct proc_dir_entry *proc_net_devsnmp6; |
| 34 | 34 | ||
| 35 | static int fold_prot_inuse(struct proto *proto) | ||
| 36 | { | ||
| 37 | int res = 0; | ||
| 38 | int cpu; | ||
| 39 | |||
| 40 | for_each_possible_cpu(cpu) | ||
| 41 | res += proto->stats[cpu].inuse; | ||
| 42 | |||
| 43 | return res; | ||
| 44 | } | ||
| 45 | |||
| 46 | static int sockstat6_seq_show(struct seq_file *seq, void *v) | 35 | static int sockstat6_seq_show(struct seq_file *seq, void *v) |
| 47 | { | 36 | { |
| 48 | seq_printf(seq, "TCP6: inuse %d\n", | 37 | seq_printf(seq, "TCP6: inuse %d\n", |
| 49 | fold_prot_inuse(&tcpv6_prot)); | 38 | sock_prot_inuse(&tcpv6_prot)); |
| 50 | seq_printf(seq, "UDP6: inuse %d\n", | 39 | seq_printf(seq, "UDP6: inuse %d\n", |
| 51 | fold_prot_inuse(&udpv6_prot)); | 40 | sock_prot_inuse(&udpv6_prot)); |
| 52 | seq_printf(seq, "UDPLITE6: inuse %d\n", | 41 | seq_printf(seq, "UDPLITE6: inuse %d\n", |
| 53 | fold_prot_inuse(&udplitev6_prot)); | 42 | sock_prot_inuse(&udplitev6_prot)); |
| 54 | seq_printf(seq, "RAW6: inuse %d\n", | 43 | seq_printf(seq, "RAW6: inuse %d\n", |
| 55 | fold_prot_inuse(&rawv6_prot)); | 44 | sock_prot_inuse(&rawv6_prot)); |
| 56 | seq_printf(seq, "FRAG6: inuse %d memory %d\n", | 45 | seq_printf(seq, "FRAG6: inuse %d memory %d\n", |
| 57 | ip6_frag_nqueues(), ip6_frag_mem()); | 46 | ip6_frag_nqueues(), ip6_frag_mem()); |
| 58 | return 0; | 47 | return 0; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ca24ef19cd8f..807260d03586 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
| @@ -1144,6 +1144,8 @@ static int rawv6_init_sk(struct sock *sk) | |||
| 1144 | return(0); | 1144 | return(0); |
| 1145 | } | 1145 | } |
| 1146 | 1146 | ||
| 1147 | DEFINE_PROTO_INUSE(rawv6) | ||
| 1148 | |||
| 1147 | struct proto rawv6_prot = { | 1149 | struct proto rawv6_prot = { |
| 1148 | .name = "RAWv6", | 1150 | .name = "RAWv6", |
| 1149 | .owner = THIS_MODULE, | 1151 | .owner = THIS_MODULE, |
| @@ -1166,6 +1168,7 @@ struct proto rawv6_prot = { | |||
| 1166 | .compat_setsockopt = compat_rawv6_setsockopt, | 1168 | .compat_setsockopt = compat_rawv6_setsockopt, |
| 1167 | .compat_getsockopt = compat_rawv6_getsockopt, | 1169 | .compat_getsockopt = compat_rawv6_getsockopt, |
| 1168 | #endif | 1170 | #endif |
| 1171 | REF_PROTO_INUSE(rawv6) | ||
| 1169 | }; | 1172 | }; |
| 1170 | 1173 | ||
| 1171 | #ifdef CONFIG_PROC_FS | 1174 | #ifdef CONFIG_PROC_FS |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 95f8e4a62f68..973a97abc446 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -38,12 +38,8 @@ | |||
| 38 | #include <linux/in6.h> | 38 | #include <linux/in6.h> |
| 39 | #include <linux/init.h> | 39 | #include <linux/init.h> |
| 40 | #include <linux/if_arp.h> | 40 | #include <linux/if_arp.h> |
| 41 | |||
| 42 | #ifdef CONFIG_PROC_FS | ||
| 43 | #include <linux/proc_fs.h> | 41 | #include <linux/proc_fs.h> |
| 44 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> |
| 45 | #endif | ||
| 46 | |||
| 47 | #include <net/net_namespace.h> | 43 | #include <net/net_namespace.h> |
| 48 | #include <net/snmp.h> | 44 | #include <net/snmp.h> |
| 49 | #include <net/ipv6.h> | 45 | #include <net/ipv6.h> |
| @@ -2288,71 +2284,50 @@ struct rt6_proc_arg | |||
| 2288 | 2284 | ||
| 2289 | static int rt6_info_route(struct rt6_info *rt, void *p_arg) | 2285 | static int rt6_info_route(struct rt6_info *rt, void *p_arg) |
| 2290 | { | 2286 | { |
| 2291 | struct rt6_proc_arg *arg = (struct rt6_proc_arg *) p_arg; | 2287 | struct seq_file *m = p_arg; |
| 2292 | 2288 | ||
| 2293 | if (arg->skip < arg->offset / RT6_INFO_LEN) { | 2289 | seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_dst.addr), |
| 2294 | arg->skip++; | 2290 | rt->rt6i_dst.plen); |
| 2295 | return 0; | ||
| 2296 | } | ||
| 2297 | |||
| 2298 | if (arg->len >= arg->length) | ||
| 2299 | return 0; | ||
| 2300 | |||
| 2301 | arg->len += sprintf(arg->buffer + arg->len, | ||
| 2302 | NIP6_SEQFMT " %02x ", | ||
| 2303 | NIP6(rt->rt6i_dst.addr), | ||
| 2304 | rt->rt6i_dst.plen); | ||
| 2305 | 2291 | ||
| 2306 | #ifdef CONFIG_IPV6_SUBTREES | 2292 | #ifdef CONFIG_IPV6_SUBTREES |
| 2307 | arg->len += sprintf(arg->buffer + arg->len, | 2293 | seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_src.addr), |
| 2308 | NIP6_SEQFMT " %02x ", | 2294 | rt->rt6i_src.plen); |
| 2309 | NIP6(rt->rt6i_src.addr), | ||
| 2310 | rt->rt6i_src.plen); | ||
| 2311 | #else | 2295 | #else |
| 2312 | arg->len += sprintf(arg->buffer + arg->len, | 2296 | seq_puts(m, "00000000000000000000000000000000 00 "); |
| 2313 | "00000000000000000000000000000000 00 "); | ||
| 2314 | #endif | 2297 | #endif |
| 2315 | 2298 | ||
| 2316 | if (rt->rt6i_nexthop) { | 2299 | if (rt->rt6i_nexthop) { |
| 2317 | arg->len += sprintf(arg->buffer + arg->len, | 2300 | seq_printf(m, NIP6_SEQFMT, |
| 2318 | NIP6_SEQFMT, | 2301 | NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key))); |
| 2319 | NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key))); | ||
| 2320 | } else { | 2302 | } else { |
| 2321 | arg->len += sprintf(arg->buffer + arg->len, | 2303 | seq_puts(m, "00000000000000000000000000000000"); |
| 2322 | "00000000000000000000000000000000"); | ||
| 2323 | } | 2304 | } |
| 2324 | arg->len += sprintf(arg->buffer + arg->len, | 2305 | seq_printf(m, " %08x %08x %08x %08x %8s\n", |
| 2325 | " %08x %08x %08x %08x %8s\n", | 2306 | rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt), |
| 2326 | rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt), | 2307 | rt->u.dst.__use, rt->rt6i_flags, |
| 2327 | rt->u.dst.__use, rt->rt6i_flags, | 2308 | rt->rt6i_dev ? rt->rt6i_dev->name : ""); |
| 2328 | rt->rt6i_dev ? rt->rt6i_dev->name : ""); | ||
| 2329 | return 0; | 2309 | return 0; |
| 2330 | } | 2310 | } |
| 2331 | 2311 | ||
| 2332 | static int rt6_proc_info(char *buffer, char **start, off_t offset, int length) | 2312 | static int ipv6_route_show(struct seq_file *m, void *v) |
| 2333 | { | 2313 | { |
| 2334 | struct rt6_proc_arg arg = { | 2314 | fib6_clean_all(rt6_info_route, 0, m); |
| 2335 | .buffer = buffer, | 2315 | return 0; |
| 2336 | .offset = offset, | 2316 | } |
| 2337 | .length = length, | ||
| 2338 | }; | ||
| 2339 | |||
| 2340 | fib6_clean_all(rt6_info_route, 0, &arg); | ||
| 2341 | |||
| 2342 | *start = buffer; | ||
| 2343 | if (offset) | ||
| 2344 | *start += offset % RT6_INFO_LEN; | ||
| 2345 | |||
| 2346 | arg.len -= offset % RT6_INFO_LEN; | ||
| 2347 | |||
| 2348 | if (arg.len > length) | ||
| 2349 | arg.len = length; | ||
| 2350 | if (arg.len < 0) | ||
| 2351 | arg.len = 0; | ||
| 2352 | 2317 | ||
| 2353 | return arg.len; | 2318 | static int ipv6_route_open(struct inode *inode, struct file *file) |
| 2319 | { | ||
| 2320 | return single_open(file, ipv6_route_show, NULL); | ||
| 2354 | } | 2321 | } |
| 2355 | 2322 | ||
| 2323 | static const struct file_operations ipv6_route_proc_fops = { | ||
| 2324 | .owner = THIS_MODULE, | ||
| 2325 | .open = ipv6_route_open, | ||
| 2326 | .read = seq_read, | ||
| 2327 | .llseek = seq_lseek, | ||
| 2328 | .release = single_release, | ||
| 2329 | }; | ||
| 2330 | |||
| 2356 | static int rt6_stats_seq_show(struct seq_file *seq, void *v) | 2331 | static int rt6_stats_seq_show(struct seq_file *seq, void *v) |
| 2357 | { | 2332 | { |
| 2358 | seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", | 2333 | seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", |
| @@ -2489,22 +2464,14 @@ ctl_table ipv6_route_table[] = { | |||
| 2489 | 2464 | ||
| 2490 | void __init ip6_route_init(void) | 2465 | void __init ip6_route_init(void) |
| 2491 | { | 2466 | { |
| 2492 | #ifdef CONFIG_PROC_FS | ||
| 2493 | struct proc_dir_entry *p; | ||
| 2494 | #endif | ||
| 2495 | ip6_dst_ops.kmem_cachep = | 2467 | ip6_dst_ops.kmem_cachep = |
| 2496 | kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, | 2468 | kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, |
| 2497 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 2469 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
| 2498 | ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep; | 2470 | ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep; |
| 2499 | 2471 | ||
| 2500 | fib6_init(); | 2472 | fib6_init(); |
| 2501 | #ifdef CONFIG_PROC_FS | 2473 | proc_net_fops_create(&init_net, "ipv6_route", 0, &ipv6_route_proc_fops); |
| 2502 | p = proc_net_create(&init_net, "ipv6_route", 0, rt6_proc_info); | ||
| 2503 | if (p) | ||
| 2504 | p->owner = THIS_MODULE; | ||
| 2505 | |||
| 2506 | proc_net_fops_create(&init_net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); | 2474 | proc_net_fops_create(&init_net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); |
| 2507 | #endif | ||
| 2508 | #ifdef CONFIG_XFRM | 2475 | #ifdef CONFIG_XFRM |
| 2509 | xfrm6_init(); | 2476 | xfrm6_init(); |
| 2510 | #endif | 2477 | #endif |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 466657a9a8bd..71433d29d884 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
| @@ -430,7 +430,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 430 | struct rtable *rt; /* Route to the other host */ | 430 | struct rtable *rt; /* Route to the other host */ |
| 431 | struct net_device *tdev; /* Device to other host */ | 431 | struct net_device *tdev; /* Device to other host */ |
| 432 | struct iphdr *iph; /* Our new IP header */ | 432 | struct iphdr *iph; /* Our new IP header */ |
| 433 | int max_headroom; /* The extra header space needed */ | 433 | unsigned int max_headroom; /* The extra header space needed */ |
| 434 | __be32 dst = tiph->daddr; | 434 | __be32 dst = tiph->daddr; |
| 435 | int mtu; | 435 | int mtu; |
| 436 | struct in6_addr *addr6; | 436 | struct in6_addr *addr6; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 737b755342bd..3aad861975a0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -59,6 +59,7 @@ | |||
| 59 | #include <net/snmp.h> | 59 | #include <net/snmp.h> |
| 60 | #include <net/dsfield.h> | 60 | #include <net/dsfield.h> |
| 61 | #include <net/timewait_sock.h> | 61 | #include <net/timewait_sock.h> |
| 62 | #include <net/netdma.h> | ||
| 62 | 63 | ||
| 63 | #include <asm/uaccess.h> | 64 | #include <asm/uaccess.h> |
| 64 | 65 | ||
| @@ -560,16 +561,16 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, | |||
| 560 | char *newkey, u8 newkeylen) | 561 | char *newkey, u8 newkeylen) |
| 561 | { | 562 | { |
| 562 | /* Add key to the list */ | 563 | /* Add key to the list */ |
| 563 | struct tcp6_md5sig_key *key; | 564 | struct tcp_md5sig_key *key; |
| 564 | struct tcp_sock *tp = tcp_sk(sk); | 565 | struct tcp_sock *tp = tcp_sk(sk); |
| 565 | struct tcp6_md5sig_key *keys; | 566 | struct tcp6_md5sig_key *keys; |
| 566 | 567 | ||
| 567 | key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); | 568 | key = tcp_v6_md5_do_lookup(sk, peer); |
| 568 | if (key) { | 569 | if (key) { |
| 569 | /* modify existing entry - just update that one */ | 570 | /* modify existing entry - just update that one */ |
| 570 | kfree(key->base.key); | 571 | kfree(key->key); |
| 571 | key->base.key = newkey; | 572 | key->key = newkey; |
| 572 | key->base.keylen = newkeylen; | 573 | key->keylen = newkeylen; |
| 573 | } else { | 574 | } else { |
| 574 | /* reallocate new list if current one is full. */ | 575 | /* reallocate new list if current one is full. */ |
| 575 | if (!tp->md5sig_info) { | 576 | if (!tp->md5sig_info) { |
| @@ -757,6 +758,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
| 757 | bp->len = htonl(tcplen); | 758 | bp->len = htonl(tcplen); |
| 758 | bp->protocol = htonl(protocol); | 759 | bp->protocol = htonl(protocol); |
| 759 | 760 | ||
| 761 | sg_init_table(sg, 4); | ||
| 762 | |||
| 760 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | 763 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); |
| 761 | nbytes += sizeof(*bp); | 764 | nbytes += sizeof(*bp); |
| 762 | 765 | ||
| @@ -778,6 +781,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
| 778 | sg_set_buf(&sg[block++], key->key, key->keylen); | 781 | sg_set_buf(&sg[block++], key->key, key->keylen); |
| 779 | nbytes += key->keylen; | 782 | nbytes += key->keylen; |
| 780 | 783 | ||
| 784 | sg_mark_end(&sg[block - 1]); | ||
| 785 | |||
| 781 | /* Now store the hash into the packet */ | 786 | /* Now store the hash into the packet */ |
| 782 | err = crypto_hash_init(desc); | 787 | err = crypto_hash_init(desc); |
| 783 | if (err) { | 788 | if (err) { |
| @@ -1728,6 +1733,8 @@ process: | |||
| 1728 | if (!sock_owned_by_user(sk)) { | 1733 | if (!sock_owned_by_user(sk)) { |
| 1729 | #ifdef CONFIG_NET_DMA | 1734 | #ifdef CONFIG_NET_DMA |
| 1730 | struct tcp_sock *tp = tcp_sk(sk); | 1735 | struct tcp_sock *tp = tcp_sk(sk); |
| 1736 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | ||
| 1737 | tp->ucopy.dma_chan = get_softnet_dma(); | ||
| 1731 | if (tp->ucopy.dma_chan) | 1738 | if (tp->ucopy.dma_chan) |
| 1732 | ret = tcp_v6_do_rcv(sk, skb); | 1739 | ret = tcp_v6_do_rcv(sk, skb); |
| 1733 | else | 1740 | else |
| @@ -2100,6 +2107,8 @@ void tcp6_proc_exit(void) | |||
| 2100 | } | 2107 | } |
| 2101 | #endif | 2108 | #endif |
| 2102 | 2109 | ||
| 2110 | DEFINE_PROTO_INUSE(tcpv6) | ||
| 2111 | |||
| 2103 | struct proto tcpv6_prot = { | 2112 | struct proto tcpv6_prot = { |
| 2104 | .name = "TCPv6", | 2113 | .name = "TCPv6", |
| 2105 | .owner = THIS_MODULE, | 2114 | .owner = THIS_MODULE, |
| @@ -2134,6 +2143,7 @@ struct proto tcpv6_prot = { | |||
| 2134 | .compat_setsockopt = compat_tcp_setsockopt, | 2143 | .compat_setsockopt = compat_tcp_setsockopt, |
| 2135 | .compat_getsockopt = compat_tcp_getsockopt, | 2144 | .compat_getsockopt = compat_tcp_getsockopt, |
| 2136 | #endif | 2145 | #endif |
| 2146 | REF_PROTO_INUSE(tcpv6) | ||
| 2137 | }; | 2147 | }; |
| 2138 | 2148 | ||
| 2139 | static struct inet6_protocol tcpv6_protocol = { | 2149 | static struct inet6_protocol tcpv6_protocol = { |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index caebad6ee510..ee1cc3f8599f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -205,12 +205,11 @@ out: | |||
| 205 | return err; | 205 | return err; |
| 206 | 206 | ||
| 207 | csum_copy_err: | 207 | csum_copy_err: |
| 208 | UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); | ||
| 208 | skb_kill_datagram(sk, skb, flags); | 209 | skb_kill_datagram(sk, skb, flags); |
| 209 | 210 | ||
| 210 | if (flags & MSG_DONTWAIT) { | 211 | if (flags & MSG_DONTWAIT) |
| 211 | UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); | ||
| 212 | return -EAGAIN; | 212 | return -EAGAIN; |
| 213 | } | ||
| 214 | goto try_again; | 213 | goto try_again; |
| 215 | } | 214 | } |
| 216 | 215 | ||
| @@ -971,6 +970,8 @@ void udp6_proc_exit(void) { | |||
| 971 | 970 | ||
| 972 | /* ------------------------------------------------------------------------ */ | 971 | /* ------------------------------------------------------------------------ */ |
| 973 | 972 | ||
| 973 | DEFINE_PROTO_INUSE(udpv6) | ||
| 974 | |||
| 974 | struct proto udpv6_prot = { | 975 | struct proto udpv6_prot = { |
| 975 | .name = "UDPv6", | 976 | .name = "UDPv6", |
| 976 | .owner = THIS_MODULE, | 977 | .owner = THIS_MODULE, |
| @@ -992,6 +993,7 @@ struct proto udpv6_prot = { | |||
| 992 | .compat_setsockopt = compat_udpv6_setsockopt, | 993 | .compat_setsockopt = compat_udpv6_setsockopt, |
| 993 | .compat_getsockopt = compat_udpv6_getsockopt, | 994 | .compat_getsockopt = compat_udpv6_getsockopt, |
| 994 | #endif | 995 | #endif |
| 996 | REF_PROTO_INUSE(udpv6) | ||
| 995 | }; | 997 | }; |
| 996 | 998 | ||
| 997 | static struct inet_protosw udpv6_protosw = { | 999 | static struct inet_protosw udpv6_protosw = { |
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 766566f7de47..5a0379f71415 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c | |||
| @@ -40,6 +40,8 @@ static int udplite_v6_get_port(struct sock *sk, unsigned short snum) | |||
| 40 | return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal); | 40 | return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal); |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | DEFINE_PROTO_INUSE(udplitev6) | ||
| 44 | |||
| 43 | struct proto udplitev6_prot = { | 45 | struct proto udplitev6_prot = { |
| 44 | .name = "UDPLITEv6", | 46 | .name = "UDPLITEv6", |
| 45 | .owner = THIS_MODULE, | 47 | .owner = THIS_MODULE, |
| @@ -62,6 +64,7 @@ struct proto udplitev6_prot = { | |||
| 62 | .compat_setsockopt = compat_udpv6_setsockopt, | 64 | .compat_setsockopt = compat_udpv6_setsockopt, |
| 63 | .compat_getsockopt = compat_udpv6_getsockopt, | 65 | .compat_getsockopt = compat_udpv6_getsockopt, |
| 64 | #endif | 66 | #endif |
| 67 | REF_PROTO_INUSE(udplitev6) | ||
| 65 | }; | 68 | }; |
| 66 | 69 | ||
| 67 | static struct inet_protosw udplite6_protosw = { | 70 | static struct inet_protosw udplite6_protosw = { |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 29b063d43120..a195a66e0cc7 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
| @@ -1381,7 +1381,7 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol) | |||
| 1381 | goto out; | 1381 | goto out; |
| 1382 | 1382 | ||
| 1383 | rc = -ENOMEM; | 1383 | rc = -ENOMEM; |
| 1384 | sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto, 1); | 1384 | sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto); |
| 1385 | if (!sk) | 1385 | if (!sk) |
| 1386 | goto out; | 1386 | goto out; |
| 1387 | #ifdef IPX_REFCNT_DEBUG | 1387 | #ifdef IPX_REFCNT_DEBUG |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 0328ae2654f4..48ce59a6e026 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
| @@ -1078,7 +1078,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol) | |||
| 1078 | } | 1078 | } |
| 1079 | 1079 | ||
| 1080 | /* Allocate networking socket */ | 1080 | /* Allocate networking socket */ |
| 1081 | sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto, 1); | 1081 | sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto); |
| 1082 | if (sk == NULL) | 1082 | if (sk == NULL) |
| 1083 | return -ENOMEM; | 1083 | return -ENOMEM; |
| 1084 | 1084 | ||
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 3d241e415a2a..1120b150e211 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
| @@ -77,7 +77,7 @@ static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len, | |||
| 77 | #endif /* CONFIG_PROC_FS */ | 77 | #endif /* CONFIG_PROC_FS */ |
| 78 | static struct tty_driver *driver; | 78 | static struct tty_driver *driver; |
| 79 | 79 | ||
| 80 | hashbin_t *ircomm_tty = NULL; | 80 | static hashbin_t *ircomm_tty = NULL; |
| 81 | 81 | ||
| 82 | static const struct tty_operations ops = { | 82 | static const struct tty_operations ops = { |
| 83 | .open = ircomm_tty_open, | 83 | .open = ircomm_tty_open, |
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 2f9f8dce5a69..e0eab5927c4f 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c | |||
| @@ -731,15 +731,25 @@ dev_irnet_ioctl(struct inode * inode, | |||
| 731 | /* Get termios */ | 731 | /* Get termios */ |
| 732 | case TCGETS: | 732 | case TCGETS: |
| 733 | DEBUG(FS_INFO, "Get termios.\n"); | 733 | DEBUG(FS_INFO, "Get termios.\n"); |
| 734 | #ifndef TCGETS2 | ||
| 734 | if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) | 735 | if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) |
| 735 | break; | 736 | break; |
| 737 | #else | ||
| 738 | if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) | ||
| 739 | break; | ||
| 740 | #endif | ||
| 736 | err = 0; | 741 | err = 0; |
| 737 | break; | 742 | break; |
| 738 | /* Set termios */ | 743 | /* Set termios */ |
| 739 | case TCSETSF: | 744 | case TCSETSF: |
| 740 | DEBUG(FS_INFO, "Set termios.\n"); | 745 | DEBUG(FS_INFO, "Set termios.\n"); |
| 746 | #ifndef TCGETS2 | ||
| 741 | if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) | 747 | if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) |
| 742 | break; | 748 | break; |
| 749 | #else | ||
| 750 | if(user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) | ||
| 751 | break; | ||
| 752 | #endif | ||
| 743 | err = 0; | 753 | err = 0; |
| 744 | break; | 754 | break; |
| 745 | 755 | ||
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 43e01c8d382b..aef664580355 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
| @@ -216,7 +216,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) | |||
| 216 | { | 216 | { |
| 217 | struct sock *sk; | 217 | struct sock *sk; |
| 218 | 218 | ||
| 219 | sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, 1); | 219 | sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); |
| 220 | if (!sk) | 220 | if (!sk) |
| 221 | return NULL; | 221 | return NULL; |
| 222 | 222 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index 7969f8a716df..10c89d47f685 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
| @@ -152,7 +152,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol) | |||
| 152 | return -EPROTONOSUPPORT; | 152 | return -EPROTONOSUPPORT; |
| 153 | 153 | ||
| 154 | err = -ENOMEM; | 154 | err = -ENOMEM; |
| 155 | sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, 1); | 155 | sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto); |
| 156 | if (sk == NULL) | 156 | if (sk == NULL) |
| 157 | goto out; | 157 | goto out; |
| 158 | 158 | ||
| @@ -395,9 +395,9 @@ static inline int pfkey_sec_ctx_len(struct sadb_x_sec_ctx *sec_ctx) | |||
| 395 | static inline int verify_sec_ctx_len(void *p) | 395 | static inline int verify_sec_ctx_len(void *p) |
| 396 | { | 396 | { |
| 397 | struct sadb_x_sec_ctx *sec_ctx = (struct sadb_x_sec_ctx *)p; | 397 | struct sadb_x_sec_ctx *sec_ctx = (struct sadb_x_sec_ctx *)p; |
| 398 | int len; | 398 | int len = sec_ctx->sadb_x_ctx_len; |
| 399 | 399 | ||
| 400 | if (sec_ctx->sadb_x_ctx_len > PAGE_SIZE) | 400 | if (len > PAGE_SIZE) |
| 401 | return -EINVAL; | 401 | return -EINVAL; |
| 402 | 402 | ||
| 403 | len = pfkey_sec_ctx_len(sec_ctx); | 403 | len = pfkey_sec_ctx_len(sec_ctx); |
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 8ebc2769dfda..5c0b484237c8 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c | |||
| @@ -869,7 +869,7 @@ static void llc_sk_init(struct sock* sk) | |||
| 869 | */ | 869 | */ |
| 870 | struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot) | 870 | struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot) |
| 871 | { | 871 | { |
| 872 | struct sock *sk = sk_alloc(net, family, priority, prot, 1); | 872 | struct sock *sk = sk_alloc(net, family, priority, prot); |
| 873 | 873 | ||
| 874 | if (!sk) | 874 | if (!sk) |
| 875 | goto out; | 875 | goto out; |
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index bf7ba128b963..e62fe55944b8 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
| 12 | #include <linux/crypto.h> | 12 | #include <linux/crypto.h> |
| 13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
| 14 | #include <asm/scatterlist.h> | ||
| 15 | 14 | ||
| 16 | #include <net/mac80211.h> | 15 | #include <net/mac80211.h> |
| 17 | #include "ieee80211_key.h" | 16 | #include "ieee80211_key.h" |
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c index f7ffeec3913f..fda0e06453e8 100644 --- a/net/mac80211/ieee80211_sta.c +++ b/net/mac80211/ieee80211_sta.c | |||
| @@ -1184,7 +1184,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, | |||
| 1184 | printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " | 1184 | printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " |
| 1185 | "status=%d aid=%d)\n", | 1185 | "status=%d aid=%d)\n", |
| 1186 | dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), | 1186 | dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), |
| 1187 | capab_info, status_code, aid & ~(BIT(15) | BIT(14))); | 1187 | capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); |
| 1188 | 1188 | ||
| 1189 | if (status_code != WLAN_STATUS_SUCCESS) { | 1189 | if (status_code != WLAN_STATUS_SUCCESS) { |
| 1190 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", | 1190 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", |
| @@ -2096,7 +2096,8 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta, | |||
| 2096 | { | 2096 | { |
| 2097 | int tmp, hidden_ssid; | 2097 | int tmp, hidden_ssid; |
| 2098 | 2098 | ||
| 2099 | if (!memcmp(ifsta->ssid, ssid, ssid_len)) | 2099 | if (ssid_len == ifsta->ssid_len && |
| 2100 | !memcmp(ifsta->ssid, ssid, ssid_len)) | ||
| 2100 | return 1; | 2101 | return 1; |
| 2101 | 2102 | ||
| 2102 | if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) | 2103 | if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) |
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 6675261e958f..a84a23310ff4 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | #include <linux/crypto.h> | 16 | #include <linux/crypto.h> |
| 17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
| 18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
| 19 | #include <asm/scatterlist.h> | 19 | #include <linux/scatterlist.h> |
| 20 | 20 | ||
| 21 | #include <net/mac80211.h> | 21 | #include <net/mac80211.h> |
| 22 | #include "ieee80211_i.h" | 22 | #include "ieee80211_i.h" |
| @@ -138,9 +138,7 @@ void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, | |||
| 138 | *icv = cpu_to_le32(~crc32_le(~0, data, data_len)); | 138 | *icv = cpu_to_le32(~crc32_le(~0, data, data_len)); |
| 139 | 139 | ||
| 140 | crypto_blkcipher_setkey(tfm, rc4key, klen); | 140 | crypto_blkcipher_setkey(tfm, rc4key, klen); |
| 141 | sg.page = virt_to_page(data); | 141 | sg_init_one(&sg, data, data_len + WEP_ICV_LEN); |
| 142 | sg.offset = offset_in_page(data); | ||
| 143 | sg.length = data_len + WEP_ICV_LEN; | ||
| 144 | crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); | 142 | crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); |
| 145 | } | 143 | } |
| 146 | 144 | ||
| @@ -204,9 +202,7 @@ int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, | |||
| 204 | __le32 crc; | 202 | __le32 crc; |
| 205 | 203 | ||
| 206 | crypto_blkcipher_setkey(tfm, rc4key, klen); | 204 | crypto_blkcipher_setkey(tfm, rc4key, klen); |
| 207 | sg.page = virt_to_page(data); | 205 | sg_init_one(&sg, data, data_len + WEP_ICV_LEN); |
| 208 | sg.offset = offset_in_page(data); | ||
| 209 | sg.length = data_len + WEP_ICV_LEN; | ||
| 210 | crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); | 206 | crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); |
| 211 | 207 | ||
| 212 | crc = cpu_to_le32(~crc32_le(~0, data, data_len)); | 208 | crc = cpu_to_le32(~crc32_le(~0, data, data_len)); |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index d7a600a5720a..21a9fcc03796 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
| @@ -363,7 +363,7 @@ config NETFILTER_XT_TARGET_TRACE | |||
| 363 | the tables, chains, rules. | 363 | the tables, chains, rules. |
| 364 | 364 | ||
| 365 | If you want to compile it as a module, say M here and read | 365 | If you want to compile it as a module, say M here and read |
| 366 | <file:Documentation/modules.txt>. If unsure, say `N'. | 366 | <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. |
| 367 | 367 | ||
| 368 | config NETFILTER_XT_TARGET_SECMARK | 368 | config NETFILTER_XT_TARGET_SECMARK |
| 369 | tristate '"SECMARK" target support' | 369 | tristate '"SECMARK" target support' |
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 93c58f973831..ad0e36ebea3d 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile | |||
| @@ -40,15 +40,15 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o | |||
| 40 | # targets | 40 | # targets |
| 41 | obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o | 41 | obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o |
| 42 | obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o | 42 | obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o |
| 43 | obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o | ||
| 43 | obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o | 44 | obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o |
| 44 | obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o | 45 | obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o |
| 45 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o | ||
| 46 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o | 46 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o |
| 47 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o | ||
| 47 | obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o | 48 | obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o |
| 48 | obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o | ||
| 49 | obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o | 49 | obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o |
| 50 | obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o | 50 | obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o |
| 51 | obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o | 51 | obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o |
| 52 | 52 | ||
| 53 | # matches | 53 | # matches |
| 54 | obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o | 54 | obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o |
| @@ -59,22 +59,22 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o | |||
| 59 | obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o | 59 | obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o |
| 60 | obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o | 60 | obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o |
| 61 | obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o | 61 | obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o |
| 62 | obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o | ||
| 62 | obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o | 63 | obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o |
| 63 | obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o | 64 | obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o |
| 64 | obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o | 65 | obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o |
| 65 | obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o | 66 | obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o |
| 66 | obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o | 67 | obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o |
| 67 | obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o | 68 | obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o |
| 68 | obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o | 69 | obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o |
| 69 | obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o | 70 | obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o |
| 71 | obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o | ||
| 70 | obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o | 72 | obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o |
| 71 | obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o | 73 | obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o |
| 72 | obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o | 74 | obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o |
| 73 | obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o | 75 | obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o |
| 74 | obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o | 76 | obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o |
| 75 | obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o | 77 | obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o |
| 76 | obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o | ||
| 77 | obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o | 78 | obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o |
| 78 | obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o | 79 | obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o |
| 79 | obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o | 80 | obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o |
| 80 | obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 4d6171bc0829..000c2fb462d0 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
| @@ -999,7 +999,7 @@ struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced) | |||
| 999 | *vmalloced = 0; | 999 | *vmalloced = 0; |
| 1000 | 1000 | ||
| 1001 | size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); | 1001 | size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); |
| 1002 | hash = (void*)__get_free_pages(GFP_KERNEL, | 1002 | hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN, |
| 1003 | get_order(sizeof(struct hlist_head) | 1003 | get_order(sizeof(struct hlist_head) |
| 1004 | * size)); | 1004 | * size)); |
| 1005 | if (!hash) { | 1005 | if (!hash) { |
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c index aa2831587b82..2dfac3253569 100644 --- a/net/netfilter/nf_sockopt.c +++ b/net/netfilter/nf_sockopt.c | |||
| @@ -23,14 +23,13 @@ static inline int overlap(int min1, int max1, int min2, int max2) | |||
| 23 | /* Functions to register sockopt ranges (exclusive). */ | 23 | /* Functions to register sockopt ranges (exclusive). */ |
| 24 | int nf_register_sockopt(struct nf_sockopt_ops *reg) | 24 | int nf_register_sockopt(struct nf_sockopt_ops *reg) |
| 25 | { | 25 | { |
| 26 | struct list_head *i; | 26 | struct nf_sockopt_ops *ops; |
| 27 | int ret = 0; | 27 | int ret = 0; |
| 28 | 28 | ||
| 29 | if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) | 29 | if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) |
| 30 | return -EINTR; | 30 | return -EINTR; |
| 31 | 31 | ||
| 32 | list_for_each(i, &nf_sockopts) { | 32 | list_for_each_entry(ops, &nf_sockopts, list) { |
| 33 | struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i; | ||
| 34 | if (ops->pf == reg->pf | 33 | if (ops->pf == reg->pf |
| 35 | && (overlap(ops->set_optmin, ops->set_optmax, | 34 | && (overlap(ops->set_optmin, ops->set_optmax, |
| 36 | reg->set_optmin, reg->set_optmax) | 35 | reg->set_optmin, reg->set_optmax) |
| @@ -65,7 +64,6 @@ EXPORT_SYMBOL(nf_unregister_sockopt); | |||
| 65 | static int nf_sockopt(struct sock *sk, int pf, int val, | 64 | static int nf_sockopt(struct sock *sk, int pf, int val, |
| 66 | char __user *opt, int *len, int get) | 65 | char __user *opt, int *len, int get) |
| 67 | { | 66 | { |
| 68 | struct list_head *i; | ||
| 69 | struct nf_sockopt_ops *ops; | 67 | struct nf_sockopt_ops *ops; |
| 70 | int ret; | 68 | int ret; |
| 71 | 69 | ||
| @@ -75,8 +73,7 @@ static int nf_sockopt(struct sock *sk, int pf, int val, | |||
| 75 | if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) | 73 | if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) |
| 76 | return -EINTR; | 74 | return -EINTR; |
| 77 | 75 | ||
| 78 | list_for_each(i, &nf_sockopts) { | 76 | list_for_each_entry(ops, &nf_sockopts, list) { |
| 79 | ops = (struct nf_sockopt_ops *)i; | ||
| 80 | if (ops->pf == pf) { | 77 | if (ops->pf == pf) { |
| 81 | if (!try_module_get(ops->owner)) | 78 | if (!try_module_get(ops->owner)) |
| 82 | goto out_nosup; | 79 | goto out_nosup; |
| @@ -124,7 +121,6 @@ EXPORT_SYMBOL(nf_getsockopt); | |||
| 124 | static int compat_nf_sockopt(struct sock *sk, int pf, int val, | 121 | static int compat_nf_sockopt(struct sock *sk, int pf, int val, |
| 125 | char __user *opt, int *len, int get) | 122 | char __user *opt, int *len, int get) |
| 126 | { | 123 | { |
| 127 | struct list_head *i; | ||
| 128 | struct nf_sockopt_ops *ops; | 124 | struct nf_sockopt_ops *ops; |
| 129 | int ret; | 125 | int ret; |
| 130 | 126 | ||
| @@ -135,8 +131,7 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val, | |||
| 135 | if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) | 131 | if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) |
| 136 | return -EINTR; | 132 | return -EINTR; |
| 137 | 133 | ||
| 138 | list_for_each(i, &nf_sockopts) { | 134 | list_for_each_entry(ops, &nf_sockopts, list) { |
| 139 | ops = (struct nf_sockopt_ops *)i; | ||
| 140 | if (ops->pf == pf) { | 135 | if (ops->pf == pf) { |
| 141 | if (!try_module_get(ops->owner)) | 136 | if (!try_module_get(ops->owner)) |
| 142 | goto out_nosup; | 137 | goto out_nosup; |
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 06cff1d13690..d7becf08a93a 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c | |||
| @@ -4,7 +4,8 @@ | |||
| 4 | * (c) 2000 Gerd Knorr <kraxel@bytesex.org> | 4 | * (c) 2000 Gerd Knorr <kraxel@bytesex.org> |
| 5 | * Nov 2002: Martin Bene <martin.bene@icomedias.com>: | 5 | * Nov 2002: Martin Bene <martin.bene@icomedias.com>: |
| 6 | * only ignore TIME_WAIT or gone connections | 6 | * only ignore TIME_WAIT or gone connections |
| 7 | * Copyright © Jan Engelhardt <jengelh@gmx.de>, 2007 | 7 | * (C) CC Computer Consultants GmbH, 2007 |
| 8 | * Contact: <jengelh@computergmbh.de> | ||
| 8 | * | 9 | * |
| 9 | * based on ... | 10 | * based on ... |
| 10 | * | 11 | * |
| @@ -306,7 +307,7 @@ static void __exit xt_connlimit_exit(void) | |||
| 306 | 307 | ||
| 307 | module_init(xt_connlimit_init); | 308 | module_init(xt_connlimit_init); |
| 308 | module_exit(xt_connlimit_exit); | 309 | module_exit(xt_connlimit_exit); |
| 309 | MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>"); | 310 | MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>"); |
| 310 | MODULE_DESCRIPTION("netfilter xt_connlimit match module"); | 311 | MODULE_DESCRIPTION("netfilter xt_connlimit match module"); |
| 311 | MODULE_LICENSE("GPL"); | 312 | MODULE_LICENSE("GPL"); |
| 312 | MODULE_ALIAS("ipt_connlimit"); | 313 | MODULE_ALIAS("ipt_connlimit"); |
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index ef48bbd93573..ff44f86c24ce 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * xt_time | 2 | * xt_time |
| 3 | * Copyright © Jan Engelhardt <jengelh@computergmbh.de>, 2007 | 3 | * Copyright © CC Computer Consultants GmbH, 2007 |
| 4 | * Contact: <jengelh@computergmbh.de> | ||
| 4 | * | 5 | * |
| 5 | * based on ipt_time by Fabrice MARIE <fabrice@netfilter.org> | 6 | * based on ipt_time by Fabrice MARIE <fabrice@netfilter.org> |
| 6 | * This is a module which is used for time matching | 7 | * This is a module which is used for time matching |
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c index bec427915b30..af75b8c3f20b 100644 --- a/net/netfilter/xt_u32.c +++ b/net/netfilter/xt_u32.c | |||
| @@ -2,7 +2,8 @@ | |||
| 2 | * xt_u32 - kernel module to match u32 packet content | 2 | * xt_u32 - kernel module to match u32 packet content |
| 3 | * | 3 | * |
| 4 | * Original author: Don Cohen <don@isis.cs3-inc.com> | 4 | * Original author: Don Cohen <don@isis.cs3-inc.com> |
| 5 | * © Jan Engelhardt <jengelh@gmx.de>, 2007 | 5 | * (C) CC Computer Consultants GmbH, 2007 |
| 6 | * Contact: <jengelh@computergmbh.de> | ||
| 6 | */ | 7 | */ |
| 7 | 8 | ||
| 8 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| @@ -129,7 +130,7 @@ static void __exit xt_u32_exit(void) | |||
| 129 | 130 | ||
| 130 | module_init(xt_u32_init); | 131 | module_init(xt_u32_init); |
| 131 | module_exit(xt_u32_exit); | 132 | module_exit(xt_u32_exit); |
| 132 | MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>"); | 133 | MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>"); |
| 133 | MODULE_DESCRIPTION("netfilter u32 match module"); | 134 | MODULE_DESCRIPTION("netfilter u32 match module"); |
| 134 | MODULE_LICENSE("GPL"); | 135 | MODULE_LICENSE("GPL"); |
| 135 | MODULE_ALIAS("ipt_u32"); | 136 | MODULE_ALIAS("ipt_u32"); |
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index b6c844b7e1c1..b3675bd7db33 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c | |||
| @@ -178,11 +178,9 @@ int netlbl_domhsh_init(u32 size) | |||
| 178 | for (iter = 0; iter < hsh_tbl->size; iter++) | 178 | for (iter = 0; iter < hsh_tbl->size; iter++) |
| 179 | INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); | 179 | INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); |
| 180 | 180 | ||
| 181 | rcu_read_lock(); | ||
| 182 | spin_lock(&netlbl_domhsh_lock); | 181 | spin_lock(&netlbl_domhsh_lock); |
| 183 | rcu_assign_pointer(netlbl_domhsh, hsh_tbl); | 182 | rcu_assign_pointer(netlbl_domhsh, hsh_tbl); |
| 184 | spin_unlock(&netlbl_domhsh_lock); | 183 | spin_unlock(&netlbl_domhsh_lock); |
| 185 | rcu_read_unlock(); | ||
| 186 | 184 | ||
| 187 | return 0; | 185 | return 0; |
| 188 | } | 186 | } |
| @@ -222,7 +220,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
| 222 | entry->valid = 1; | 220 | entry->valid = 1; |
| 223 | INIT_RCU_HEAD(&entry->rcu); | 221 | INIT_RCU_HEAD(&entry->rcu); |
| 224 | 222 | ||
| 225 | ret_val = 0; | ||
| 226 | rcu_read_lock(); | 223 | rcu_read_lock(); |
| 227 | if (entry->domain != NULL) { | 224 | if (entry->domain != NULL) { |
| 228 | bkt = netlbl_domhsh_hash(entry->domain); | 225 | bkt = netlbl_domhsh_hash(entry->domain); |
| @@ -233,7 +230,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
| 233 | else | 230 | else |
| 234 | ret_val = -EEXIST; | 231 | ret_val = -EEXIST; |
| 235 | spin_unlock(&netlbl_domhsh_lock); | 232 | spin_unlock(&netlbl_domhsh_lock); |
| 236 | } else if (entry->domain == NULL) { | 233 | } else { |
| 237 | INIT_LIST_HEAD(&entry->list); | 234 | INIT_LIST_HEAD(&entry->list); |
| 238 | spin_lock(&netlbl_domhsh_def_lock); | 235 | spin_lock(&netlbl_domhsh_def_lock); |
| 239 | if (rcu_dereference(netlbl_domhsh_def) == NULL) | 236 | if (rcu_dereference(netlbl_domhsh_def) == NULL) |
| @@ -241,9 +238,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
| 241 | else | 238 | else |
| 242 | ret_val = -EEXIST; | 239 | ret_val = -EEXIST; |
| 243 | spin_unlock(&netlbl_domhsh_def_lock); | 240 | spin_unlock(&netlbl_domhsh_def_lock); |
| 244 | } else | 241 | } |
| 245 | ret_val = -EINVAL; | ||
| 246 | |||
| 247 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); | 242 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); |
| 248 | if (audit_buf != NULL) { | 243 | if (audit_buf != NULL) { |
| 249 | audit_log_format(audit_buf, | 244 | audit_log_format(audit_buf, |
| @@ -262,7 +257,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
| 262 | audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); | 257 | audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); |
| 263 | audit_log_end(audit_buf); | 258 | audit_log_end(audit_buf); |
| 264 | } | 259 | } |
| 265 | |||
| 266 | rcu_read_unlock(); | 260 | rcu_read_unlock(); |
| 267 | 261 | ||
| 268 | if (ret_val != 0) { | 262 | if (ret_val != 0) { |
| @@ -313,38 +307,30 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info) | |||
| 313 | struct audit_buffer *audit_buf; | 307 | struct audit_buffer *audit_buf; |
| 314 | 308 | ||
| 315 | rcu_read_lock(); | 309 | rcu_read_lock(); |
| 316 | if (domain != NULL) | 310 | entry = netlbl_domhsh_search(domain, (domain != NULL ? 0 : 1)); |
| 317 | entry = netlbl_domhsh_search(domain, 0); | ||
| 318 | else | ||
| 319 | entry = netlbl_domhsh_search(domain, 1); | ||
| 320 | if (entry == NULL) | 311 | if (entry == NULL) |
| 321 | goto remove_return; | 312 | goto remove_return; |
| 322 | switch (entry->type) { | 313 | switch (entry->type) { |
| 323 | case NETLBL_NLTYPE_UNLABELED: | ||
| 324 | break; | ||
| 325 | case NETLBL_NLTYPE_CIPSOV4: | 314 | case NETLBL_NLTYPE_CIPSOV4: |
| 326 | ret_val = cipso_v4_doi_domhsh_remove(entry->type_def.cipsov4, | 315 | cipso_v4_doi_domhsh_remove(entry->type_def.cipsov4, |
| 327 | entry->domain); | 316 | entry->domain); |
| 328 | if (ret_val != 0) | ||
| 329 | goto remove_return; | ||
| 330 | break; | 317 | break; |
| 331 | } | 318 | } |
| 332 | ret_val = 0; | ||
| 333 | if (entry != rcu_dereference(netlbl_domhsh_def)) { | 319 | if (entry != rcu_dereference(netlbl_domhsh_def)) { |
| 334 | spin_lock(&netlbl_domhsh_lock); | 320 | spin_lock(&netlbl_domhsh_lock); |
| 335 | if (entry->valid) { | 321 | if (entry->valid) { |
| 336 | entry->valid = 0; | 322 | entry->valid = 0; |
| 337 | list_del_rcu(&entry->list); | 323 | list_del_rcu(&entry->list); |
| 338 | } else | 324 | ret_val = 0; |
| 339 | ret_val = -ENOENT; | 325 | } |
| 340 | spin_unlock(&netlbl_domhsh_lock); | 326 | spin_unlock(&netlbl_domhsh_lock); |
| 341 | } else { | 327 | } else { |
| 342 | spin_lock(&netlbl_domhsh_def_lock); | 328 | spin_lock(&netlbl_domhsh_def_lock); |
| 343 | if (entry->valid) { | 329 | if (entry->valid) { |
| 344 | entry->valid = 0; | 330 | entry->valid = 0; |
| 345 | rcu_assign_pointer(netlbl_domhsh_def, NULL); | 331 | rcu_assign_pointer(netlbl_domhsh_def, NULL); |
| 346 | } else | 332 | ret_val = 0; |
| 347 | ret_val = -ENOENT; | 333 | } |
| 348 | spin_unlock(&netlbl_domhsh_def_lock); | 334 | spin_unlock(&netlbl_domhsh_def_lock); |
| 349 | } | 335 | } |
| 350 | 336 | ||
| @@ -357,11 +343,10 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info) | |||
| 357 | audit_log_end(audit_buf); | 343 | audit_log_end(audit_buf); |
| 358 | } | 344 | } |
| 359 | 345 | ||
| 360 | if (ret_val == 0) | ||
| 361 | call_rcu(&entry->rcu, netlbl_domhsh_free_entry); | ||
| 362 | |||
| 363 | remove_return: | 346 | remove_return: |
| 364 | rcu_read_unlock(); | 347 | rcu_read_unlock(); |
| 348 | if (ret_val == 0) | ||
| 349 | call_rcu(&entry->rcu, netlbl_domhsh_free_entry); | ||
| 365 | return ret_val; | 350 | return ret_val; |
| 366 | } | 351 | } |
| 367 | 352 | ||
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index 5315dacc5222..56483377997a 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c | |||
| @@ -85,11 +85,9 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = { | |||
| 85 | */ | 85 | */ |
| 86 | void netlbl_mgmt_protocount_inc(void) | 86 | void netlbl_mgmt_protocount_inc(void) |
| 87 | { | 87 | { |
| 88 | rcu_read_lock(); | ||
| 89 | spin_lock(&netlabel_mgmt_protocount_lock); | 88 | spin_lock(&netlabel_mgmt_protocount_lock); |
| 90 | netlabel_mgmt_protocount++; | 89 | netlabel_mgmt_protocount++; |
| 91 | spin_unlock(&netlabel_mgmt_protocount_lock); | 90 | spin_unlock(&netlabel_mgmt_protocount_lock); |
| 92 | rcu_read_unlock(); | ||
| 93 | } | 91 | } |
| 94 | 92 | ||
| 95 | /** | 93 | /** |
| @@ -103,12 +101,10 @@ void netlbl_mgmt_protocount_inc(void) | |||
| 103 | */ | 101 | */ |
| 104 | void netlbl_mgmt_protocount_dec(void) | 102 | void netlbl_mgmt_protocount_dec(void) |
| 105 | { | 103 | { |
| 106 | rcu_read_lock(); | ||
| 107 | spin_lock(&netlabel_mgmt_protocount_lock); | 104 | spin_lock(&netlabel_mgmt_protocount_lock); |
| 108 | if (netlabel_mgmt_protocount > 0) | 105 | if (netlabel_mgmt_protocount > 0) |
| 109 | netlabel_mgmt_protocount--; | 106 | netlabel_mgmt_protocount--; |
| 110 | spin_unlock(&netlabel_mgmt_protocount_lock); | 107 | spin_unlock(&netlabel_mgmt_protocount_lock); |
| 111 | rcu_read_unlock(); | ||
| 112 | } | 108 | } |
| 113 | 109 | ||
| 114 | /** | 110 | /** |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 5c303c68af1d..348292450deb 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
| @@ -84,12 +84,10 @@ static void netlbl_unlabel_acceptflg_set(u8 value, | |||
| 84 | struct audit_buffer *audit_buf; | 84 | struct audit_buffer *audit_buf; |
| 85 | u8 old_val; | 85 | u8 old_val; |
| 86 | 86 | ||
| 87 | rcu_read_lock(); | ||
| 88 | old_val = netlabel_unlabel_acceptflg; | ||
| 89 | spin_lock(&netlabel_unlabel_acceptflg_lock); | 87 | spin_lock(&netlabel_unlabel_acceptflg_lock); |
| 88 | old_val = netlabel_unlabel_acceptflg; | ||
| 90 | netlabel_unlabel_acceptflg = value; | 89 | netlabel_unlabel_acceptflg = value; |
| 91 | spin_unlock(&netlabel_unlabel_acceptflg_lock); | 90 | spin_unlock(&netlabel_unlabel_acceptflg_lock); |
| 92 | rcu_read_unlock(); | ||
| 93 | 91 | ||
| 94 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW, | 92 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW, |
| 95 | audit_info); | 93 | audit_info); |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 98e313e5e594..415c97236f63 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -396,7 +396,7 @@ static int __netlink_create(struct net *net, struct socket *sock, | |||
| 396 | 396 | ||
| 397 | sock->ops = &netlink_ops; | 397 | sock->ops = &netlink_ops; |
| 398 | 398 | ||
| 399 | sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, 1); | 399 | sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto); |
| 400 | if (!sk) | 400 | if (!sk) |
| 401 | return -ENOMEM; | 401 | return -ENOMEM; |
| 402 | 402 | ||
| @@ -752,7 +752,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp) | |||
| 752 | * 1: repeat lookup - reference dropped while waiting for socket memory. | 752 | * 1: repeat lookup - reference dropped while waiting for socket memory. |
| 753 | */ | 753 | */ |
| 754 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, | 754 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, |
| 755 | long timeo, struct sock *ssk) | 755 | long *timeo, struct sock *ssk) |
| 756 | { | 756 | { |
| 757 | struct netlink_sock *nlk; | 757 | struct netlink_sock *nlk; |
| 758 | 758 | ||
| @@ -761,7 +761,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, | |||
| 761 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 761 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || |
| 762 | test_bit(0, &nlk->state)) { | 762 | test_bit(0, &nlk->state)) { |
| 763 | DECLARE_WAITQUEUE(wait, current); | 763 | DECLARE_WAITQUEUE(wait, current); |
| 764 | if (!timeo) { | 764 | if (!*timeo) { |
| 765 | if (!ssk || netlink_is_kernel(ssk)) | 765 | if (!ssk || netlink_is_kernel(ssk)) |
| 766 | netlink_overrun(sk); | 766 | netlink_overrun(sk); |
| 767 | sock_put(sk); | 767 | sock_put(sk); |
| @@ -775,7 +775,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, | |||
| 775 | if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 775 | if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || |
| 776 | test_bit(0, &nlk->state)) && | 776 | test_bit(0, &nlk->state)) && |
| 777 | !sock_flag(sk, SOCK_DEAD)) | 777 | !sock_flag(sk, SOCK_DEAD)) |
| 778 | timeo = schedule_timeout(timeo); | 778 | *timeo = schedule_timeout(*timeo); |
| 779 | 779 | ||
| 780 | __set_current_state(TASK_RUNNING); | 780 | __set_current_state(TASK_RUNNING); |
| 781 | remove_wait_queue(&nlk->wait, &wait); | 781 | remove_wait_queue(&nlk->wait, &wait); |
| @@ -783,7 +783,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, | |||
| 783 | 783 | ||
| 784 | if (signal_pending(current)) { | 784 | if (signal_pending(current)) { |
| 785 | kfree_skb(skb); | 785 | kfree_skb(skb); |
| 786 | return sock_intr_errno(timeo); | 786 | return sock_intr_errno(*timeo); |
| 787 | } | 787 | } |
| 788 | return 1; | 788 | return 1; |
| 789 | } | 789 | } |
| @@ -877,7 +877,7 @@ retry: | |||
| 877 | if (netlink_is_kernel(sk)) | 877 | if (netlink_is_kernel(sk)) |
| 878 | return netlink_unicast_kernel(sk, skb); | 878 | return netlink_unicast_kernel(sk, skb); |
| 879 | 879 | ||
| 880 | err = netlink_attachskb(sk, skb, nonblock, timeo, ssk); | 880 | err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); |
| 881 | if (err == 1) | 881 | if (err == 1) |
| 882 | goto retry; | 882 | goto retry; |
| 883 | if (err) | 883 | if (err) |
| @@ -1565,7 +1565,11 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
| 1565 | 1565 | ||
| 1566 | netlink_dump(sk); | 1566 | netlink_dump(sk); |
| 1567 | sock_put(sk); | 1567 | sock_put(sk); |
| 1568 | return 0; | 1568 | |
| 1569 | /* We successfully started a dump, by returning -EINTR we | ||
| 1570 | * signal not to send ACK even if it was requested. | ||
| 1571 | */ | ||
| 1572 | return -EINTR; | ||
| 1569 | } | 1573 | } |
| 1570 | 1574 | ||
| 1571 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) | 1575 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) |
| @@ -1619,17 +1623,21 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, | |||
| 1619 | 1623 | ||
| 1620 | /* Only requests are handled by the kernel */ | 1624 | /* Only requests are handled by the kernel */ |
| 1621 | if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) | 1625 | if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) |
| 1622 | goto skip; | 1626 | goto ack; |
| 1623 | 1627 | ||
| 1624 | /* Skip control messages */ | 1628 | /* Skip control messages */ |
| 1625 | if (nlh->nlmsg_type < NLMSG_MIN_TYPE) | 1629 | if (nlh->nlmsg_type < NLMSG_MIN_TYPE) |
| 1626 | goto skip; | 1630 | goto ack; |
| 1627 | 1631 | ||
| 1628 | err = cb(skb, nlh); | 1632 | err = cb(skb, nlh); |
| 1629 | skip: | 1633 | if (err == -EINTR) |
| 1634 | goto skip; | ||
| 1635 | |||
| 1636 | ack: | ||
| 1630 | if (nlh->nlmsg_flags & NLM_F_ACK || err) | 1637 | if (nlh->nlmsg_flags & NLM_F_ACK || err) |
| 1631 | netlink_ack(skb, nlh, err); | 1638 | netlink_ack(skb, nlh, err); |
| 1632 | 1639 | ||
| 1640 | skip: | ||
| 1633 | msglen = NLMSG_ALIGN(nlh->nlmsg_len); | 1641 | msglen = NLMSG_ALIGN(nlh->nlmsg_len); |
| 1634 | if (msglen > skb->len) | 1642 | if (msglen > skb->len) |
| 1635 | msglen = skb->len; | 1643 | msglen = skb->len; |
| @@ -1880,7 +1888,7 @@ static void __net_exit netlink_net_exit(struct net *net) | |||
| 1880 | #endif | 1888 | #endif |
| 1881 | } | 1889 | } |
| 1882 | 1890 | ||
| 1883 | static struct pernet_operations __net_initdata netlink_net_ops = { | 1891 | static struct pernet_operations netlink_net_ops = { |
| 1884 | .init = netlink_net_init, | 1892 | .init = netlink_net_init, |
| 1885 | .exit = netlink_net_exit, | 1893 | .exit = netlink_net_exit, |
| 1886 | }; | 1894 | }; |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 3a4d479ea64e..972250c974f1 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
| @@ -423,7 +423,8 @@ static int nr_create(struct net *net, struct socket *sock, int protocol) | |||
| 423 | if (sock->type != SOCK_SEQPACKET || protocol != 0) | 423 | if (sock->type != SOCK_SEQPACKET || protocol != 0) |
| 424 | return -ESOCKTNOSUPPORT; | 424 | return -ESOCKTNOSUPPORT; |
| 425 | 425 | ||
| 426 | if ((sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, 1)) == NULL) | 426 | sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto); |
| 427 | if (sk == NULL) | ||
| 427 | return -ENOMEM; | 428 | return -ENOMEM; |
| 428 | 429 | ||
| 429 | nr = nr_sk(sk); | 430 | nr = nr_sk(sk); |
| @@ -465,7 +466,8 @@ static struct sock *nr_make_new(struct sock *osk) | |||
| 465 | if (osk->sk_type != SOCK_SEQPACKET) | 466 | if (osk->sk_type != SOCK_SEQPACKET) |
| 466 | return NULL; | 467 | return NULL; |
| 467 | 468 | ||
| 468 | if ((sk = sk_alloc(osk->sk_net, PF_NETROM, GFP_ATOMIC, osk->sk_prot, 1)) == NULL) | 469 | sk = sk_alloc(osk->sk_net, PF_NETROM, GFP_ATOMIC, osk->sk_prot); |
| 470 | if (sk == NULL) | ||
| 469 | return NULL; | 471 | return NULL; |
| 470 | 472 | ||
| 471 | nr = nr_sk(sk); | 473 | nr = nr_sk(sk); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d0936506b731..4cb2dfba0993 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -995,7 +995,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol) | |||
| 995 | sock->state = SS_UNCONNECTED; | 995 | sock->state = SS_UNCONNECTED; |
| 996 | 996 | ||
| 997 | err = -ENOBUFS; | 997 | err = -ENOBUFS; |
| 998 | sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, 1); | 998 | sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto); |
| 999 | if (sk == NULL) | 999 | if (sk == NULL) |
| 1000 | goto out; | 1000 | goto out; |
| 1001 | 1001 | ||
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 509defe53ee5..ed2d65cd8010 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
| @@ -513,7 +513,8 @@ static int rose_create(struct net *net, struct socket *sock, int protocol) | |||
| 513 | if (sock->type != SOCK_SEQPACKET || protocol != 0) | 513 | if (sock->type != SOCK_SEQPACKET || protocol != 0) |
| 514 | return -ESOCKTNOSUPPORT; | 514 | return -ESOCKTNOSUPPORT; |
| 515 | 515 | ||
| 516 | if ((sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL) | 516 | sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); |
| 517 | if (sk == NULL) | ||
| 517 | return -ENOMEM; | 518 | return -ENOMEM; |
| 518 | 519 | ||
| 519 | rose = rose_sk(sk); | 520 | rose = rose_sk(sk); |
| @@ -551,7 +552,8 @@ static struct sock *rose_make_new(struct sock *osk) | |||
| 551 | if (osk->sk_type != SOCK_SEQPACKET) | 552 | if (osk->sk_type != SOCK_SEQPACKET) |
| 552 | return NULL; | 553 | return NULL; |
| 553 | 554 | ||
| 554 | if ((sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL) | 555 | sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto); |
| 556 | if (sk == NULL) | ||
| 555 | return NULL; | 557 | return NULL; |
| 556 | 558 | ||
| 557 | rose = rose_sk(sk); | 559 | rose = rose_sk(sk); |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index c680017f5c8e..d6389450c4bf 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
| @@ -627,7 +627,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol) | |||
| 627 | sock->ops = &rxrpc_rpc_ops; | 627 | sock->ops = &rxrpc_rpc_ops; |
| 628 | sock->state = SS_UNCONNECTED; | 628 | sock->state = SS_UNCONNECTED; |
| 629 | 629 | ||
| 630 | sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1); | 630 | sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto); |
| 631 | if (!sk) | 631 | if (!sk) |
| 632 | return -ENOMEM; | 632 | return -ENOMEM; |
| 633 | 633 | ||
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index ac3cabdca78c..e09a95aa68ff 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c | |||
| @@ -135,9 +135,8 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn) | |||
| 135 | tmpbuf.x[2] = 0; | 135 | tmpbuf.x[2] = 0; |
| 136 | tmpbuf.x[3] = htonl(conn->security_ix); | 136 | tmpbuf.x[3] = htonl(conn->security_ix); |
| 137 | 137 | ||
| 138 | memset(sg, 0, sizeof(sg)); | 138 | sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); |
| 139 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | 139 | sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); |
| 140 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
| 141 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | 140 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); |
| 142 | 141 | ||
| 143 | memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); | 142 | memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); |
| @@ -180,9 +179,8 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, | |||
| 180 | desc.info = iv.x; | 179 | desc.info = iv.x; |
| 181 | desc.flags = 0; | 180 | desc.flags = 0; |
| 182 | 181 | ||
| 183 | memset(sg, 0, sizeof(sg)); | 182 | sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); |
| 184 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | 183 | sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); |
| 185 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
| 186 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | 184 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); |
| 187 | 185 | ||
| 188 | memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); | 186 | memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); |
| @@ -227,9 +225,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, | |||
| 227 | desc.info = iv.x; | 225 | desc.info = iv.x; |
| 228 | desc.flags = 0; | 226 | desc.flags = 0; |
| 229 | 227 | ||
| 230 | memset(sg, 0, sizeof(sg[0]) * 2); | 228 | sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); |
| 231 | sg_set_buf(&sg[0], sechdr, sizeof(rxkhdr)); | 229 | sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr)); |
| 232 | sg_set_buf(&sg[1], &rxkhdr, sizeof(rxkhdr)); | ||
| 233 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); | 230 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); |
| 234 | 231 | ||
| 235 | /* we want to encrypt the skbuff in-place */ | 232 | /* we want to encrypt the skbuff in-place */ |
| @@ -240,6 +237,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, | |||
| 240 | len = data_size + call->conn->size_align - 1; | 237 | len = data_size + call->conn->size_align - 1; |
| 241 | len &= ~(call->conn->size_align - 1); | 238 | len &= ~(call->conn->size_align - 1); |
| 242 | 239 | ||
| 240 | sg_init_table(sg, nsg); | ||
| 243 | skb_to_sgvec(skb, sg, 0, len); | 241 | skb_to_sgvec(skb, sg, 0, len); |
| 244 | crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); | 242 | crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); |
| 245 | 243 | ||
| @@ -290,9 +288,8 @@ static int rxkad_secure_packet(const struct rxrpc_call *call, | |||
| 290 | tmpbuf.x[0] = sp->hdr.callNumber; | 288 | tmpbuf.x[0] = sp->hdr.callNumber; |
| 291 | tmpbuf.x[1] = x; | 289 | tmpbuf.x[1] = x; |
| 292 | 290 | ||
| 293 | memset(&sg, 0, sizeof(sg)); | 291 | sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); |
| 294 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | 292 | sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); |
| 295 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
| 296 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | 293 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); |
| 297 | 294 | ||
| 298 | x = ntohl(tmpbuf.x[1]); | 295 | x = ntohl(tmpbuf.x[1]); |
| @@ -332,19 +329,22 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call, | |||
| 332 | struct rxrpc_skb_priv *sp; | 329 | struct rxrpc_skb_priv *sp; |
| 333 | struct blkcipher_desc desc; | 330 | struct blkcipher_desc desc; |
| 334 | struct rxrpc_crypt iv; | 331 | struct rxrpc_crypt iv; |
| 335 | struct scatterlist sg[2]; | 332 | struct scatterlist sg[16]; |
| 336 | struct sk_buff *trailer; | 333 | struct sk_buff *trailer; |
| 337 | u32 data_size, buf; | 334 | u32 data_size, buf; |
| 338 | u16 check; | 335 | u16 check; |
| 336 | int nsg; | ||
| 339 | 337 | ||
| 340 | _enter(""); | 338 | _enter(""); |
| 341 | 339 | ||
| 342 | sp = rxrpc_skb(skb); | 340 | sp = rxrpc_skb(skb); |
| 343 | 341 | ||
| 344 | /* we want to decrypt the skbuff in-place */ | 342 | /* we want to decrypt the skbuff in-place */ |
| 345 | if (skb_cow_data(skb, 0, &trailer) < 0) | 343 | nsg = skb_cow_data(skb, 0, &trailer); |
| 344 | if (nsg < 0 || nsg > 16) | ||
| 346 | goto nomem; | 345 | goto nomem; |
| 347 | 346 | ||
| 347 | sg_init_table(sg, nsg); | ||
| 348 | skb_to_sgvec(skb, sg, 0, 8); | 348 | skb_to_sgvec(skb, sg, 0, 8); |
| 349 | 349 | ||
| 350 | /* start the decryption afresh */ | 350 | /* start the decryption afresh */ |
| @@ -426,6 +426,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, | |||
| 426 | goto nomem; | 426 | goto nomem; |
| 427 | } | 427 | } |
| 428 | 428 | ||
| 429 | sg_init_table(sg, nsg); | ||
| 429 | skb_to_sgvec(skb, sg, 0, skb->len); | 430 | skb_to_sgvec(skb, sg, 0, skb->len); |
| 430 | 431 | ||
| 431 | /* decrypt from the session key */ | 432 | /* decrypt from the session key */ |
| @@ -521,9 +522,8 @@ static int rxkad_verify_packet(const struct rxrpc_call *call, | |||
| 521 | tmpbuf.x[0] = call->call_id; | 522 | tmpbuf.x[0] = call->call_id; |
| 522 | tmpbuf.x[1] = x; | 523 | tmpbuf.x[1] = x; |
| 523 | 524 | ||
| 524 | memset(&sg, 0, sizeof(sg)); | 525 | sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); |
| 525 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | 526 | sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); |
| 526 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
| 527 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | 527 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); |
| 528 | 528 | ||
| 529 | x = ntohl(tmpbuf.x[1]); | 529 | x = ntohl(tmpbuf.x[1]); |
| @@ -690,16 +690,20 @@ static void rxkad_calc_response_checksum(struct rxkad_response *response) | |||
| 690 | static void rxkad_sg_set_buf2(struct scatterlist sg[2], | 690 | static void rxkad_sg_set_buf2(struct scatterlist sg[2], |
| 691 | void *buf, size_t buflen) | 691 | void *buf, size_t buflen) |
| 692 | { | 692 | { |
| 693 | int nsg = 1; | ||
| 693 | 694 | ||
| 694 | memset(sg, 0, sizeof(sg)); | 695 | sg_init_table(sg, 2); |
| 695 | 696 | ||
| 696 | sg_set_buf(&sg[0], buf, buflen); | 697 | sg_set_buf(&sg[0], buf, buflen); |
| 697 | if (sg[0].offset + buflen > PAGE_SIZE) { | 698 | if (sg[0].offset + buflen > PAGE_SIZE) { |
| 698 | /* the buffer was split over two pages */ | 699 | /* the buffer was split over two pages */ |
| 699 | sg[0].length = PAGE_SIZE - sg[0].offset; | 700 | sg[0].length = PAGE_SIZE - sg[0].offset; |
| 700 | sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); | 701 | sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); |
| 702 | nsg++; | ||
| 701 | } | 703 | } |
| 702 | 704 | ||
| 705 | sg_mark_end(&sg[nsg - 1]); | ||
| 706 | |||
| 703 | ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); | 707 | ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); |
| 704 | } | 708 | } |
| 705 | 709 | ||
| @@ -712,7 +716,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn, | |||
| 712 | { | 716 | { |
| 713 | struct blkcipher_desc desc; | 717 | struct blkcipher_desc desc; |
| 714 | struct rxrpc_crypt iv; | 718 | struct rxrpc_crypt iv; |
| 715 | struct scatterlist ssg[2], dsg[2]; | 719 | struct scatterlist sg[2]; |
| 716 | 720 | ||
| 717 | /* continue encrypting from where we left off */ | 721 | /* continue encrypting from where we left off */ |
| 718 | memcpy(&iv, s2->session_key, sizeof(iv)); | 722 | memcpy(&iv, s2->session_key, sizeof(iv)); |
| @@ -720,9 +724,8 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn, | |||
| 720 | desc.info = iv.x; | 724 | desc.info = iv.x; |
| 721 | desc.flags = 0; | 725 | desc.flags = 0; |
| 722 | 726 | ||
| 723 | rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); | 727 | rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); |
| 724 | memcpy(dsg, ssg, sizeof(dsg)); | 728 | crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); |
| 725 | crypto_blkcipher_encrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted)); | ||
| 726 | } | 729 | } |
| 727 | 730 | ||
| 728 | /* | 731 | /* |
| @@ -817,7 +820,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, | |||
| 817 | { | 820 | { |
| 818 | struct blkcipher_desc desc; | 821 | struct blkcipher_desc desc; |
| 819 | struct rxrpc_crypt iv, key; | 822 | struct rxrpc_crypt iv, key; |
| 820 | struct scatterlist ssg[1], dsg[1]; | 823 | struct scatterlist sg[1]; |
| 821 | struct in_addr addr; | 824 | struct in_addr addr; |
| 822 | unsigned life; | 825 | unsigned life; |
| 823 | time_t issue, now; | 826 | time_t issue, now; |
| @@ -850,9 +853,8 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, | |||
| 850 | desc.info = iv.x; | 853 | desc.info = iv.x; |
| 851 | desc.flags = 0; | 854 | desc.flags = 0; |
| 852 | 855 | ||
| 853 | sg_init_one(&ssg[0], ticket, ticket_len); | 856 | sg_init_one(&sg[0], ticket, ticket_len); |
| 854 | memcpy(dsg, ssg, sizeof(dsg)); | 857 | crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len); |
| 855 | crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, ticket_len); | ||
| 856 | 858 | ||
| 857 | p = ticket; | 859 | p = ticket; |
| 858 | end = p + ticket_len; | 860 | end = p + ticket_len; |
| @@ -961,7 +963,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn, | |||
| 961 | const struct rxrpc_crypt *session_key) | 963 | const struct rxrpc_crypt *session_key) |
| 962 | { | 964 | { |
| 963 | struct blkcipher_desc desc; | 965 | struct blkcipher_desc desc; |
| 964 | struct scatterlist ssg[2], dsg[2]; | 966 | struct scatterlist sg[2]; |
| 965 | struct rxrpc_crypt iv; | 967 | struct rxrpc_crypt iv; |
| 966 | 968 | ||
| 967 | _enter(",,%08x%08x", | 969 | _enter(",,%08x%08x", |
| @@ -979,9 +981,8 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn, | |||
| 979 | desc.info = iv.x; | 981 | desc.info = iv.x; |
| 980 | desc.flags = 0; | 982 | desc.flags = 0; |
| 981 | 983 | ||
| 982 | rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); | 984 | rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); |
| 983 | memcpy(dsg, ssg, sizeof(dsg)); | 985 | crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); |
| 984 | crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted)); | ||
| 985 | mutex_unlock(&rxkad_ci_mutex); | 986 | mutex_unlock(&rxkad_ci_mutex); |
| 986 | 987 | ||
| 987 | _leave(""); | 988 | _leave(""); |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index fd7bca4d5c20..c3fde9180f9d 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
| @@ -166,7 +166,7 @@ bad_mirred: | |||
| 166 | return TC_ACT_SHOT; | 166 | return TC_ACT_SHOT; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | skb2 = skb_clone(skb, GFP_ATOMIC); | 169 | skb2 = skb_act_clone(skb, GFP_ATOMIC); |
| 170 | if (skb2 == NULL) | 170 | if (skb2 == NULL) |
| 171 | goto bad_mirred; | 171 | goto bad_mirred; |
| 172 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR && | 172 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR && |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 9e98c6e567dd..53171029439f 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
| @@ -91,7 +91,7 @@ static struct tc_u_common *u32_list; | |||
| 91 | 91 | ||
| 92 | static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift) | 92 | static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift) |
| 93 | { | 93 | { |
| 94 | unsigned h = (key & sel->hmask)>>fshift; | 94 | unsigned h = ntohl(key & sel->hmask)>>fshift; |
| 95 | 95 | ||
| 96 | return h; | 96 | return h; |
| 97 | } | 97 | } |
| @@ -615,7 +615,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, | |||
| 615 | n->handle = handle; | 615 | n->handle = handle; |
| 616 | { | 616 | { |
| 617 | u8 i = 0; | 617 | u8 i = 0; |
| 618 | u32 mask = s->hmask; | 618 | u32 mask = ntohl(s->hmask); |
| 619 | if (mask) { | 619 | if (mask) { |
| 620 | while (!(mask & 1)) { | 620 | while (!(mask & 1)) { |
| 621 | i++; | 621 | i++; |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index abd82fc3ec60..de894096e442 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
| @@ -136,7 +136,7 @@ prio_dequeue(struct Qdisc* sch) | |||
| 136 | * pulling an skb. This way we avoid excessive requeues | 136 | * pulling an skb. This way we avoid excessive requeues |
| 137 | * for slower queues. | 137 | * for slower queues. |
| 138 | */ | 138 | */ |
| 139 | if (!netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { | 139 | if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { |
| 140 | qdisc = q->queues[prio]; | 140 | qdisc = q->queues[prio]; |
| 141 | skb = qdisc->dequeue(qdisc); | 141 | skb = qdisc->dequeue(qdisc); |
| 142 | if (skb) { | 142 | if (skb) { |
| @@ -165,7 +165,7 @@ static struct sk_buff *rr_dequeue(struct Qdisc* sch) | |||
| 165 | * for slower queues. If the queue is stopped, try the | 165 | * for slower queues. If the queue is stopped, try the |
| 166 | * next queue. | 166 | * next queue. |
| 167 | */ | 167 | */ |
| 168 | if (!netif_subqueue_stopped(sch->dev, | 168 | if (!__netif_subqueue_stopped(sch->dev, |
| 169 | (q->mq ? q->curband : 0))) { | 169 | (q->mq ? q->curband : 0))) { |
| 170 | qdisc = q->queues[q->curband]; | 170 | qdisc = q->queues[q->curband]; |
| 171 | skb = qdisc->dequeue(qdisc); | 171 | skb = qdisc->dequeue(qdisc); |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index be57cf317a7f..c0ed06d4a504 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
| @@ -252,6 +252,9 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * | |||
| 252 | static inline int teql_resolve(struct sk_buff *skb, | 252 | static inline int teql_resolve(struct sk_buff *skb, |
| 253 | struct sk_buff *skb_res, struct net_device *dev) | 253 | struct sk_buff *skb_res, struct net_device *dev) |
| 254 | { | 254 | { |
| 255 | if (dev->qdisc == &noop_qdisc) | ||
| 256 | return -ENODEV; | ||
| 257 | |||
| 255 | if (dev->header_ops == NULL || | 258 | if (dev->header_ops == NULL || |
| 256 | skb->dst == NULL || | 259 | skb->dst == NULL || |
| 257 | skb->dst->neighbour == NULL) | 260 | skb->dst->neighbour == NULL) |
| @@ -266,7 +269,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 266 | int busy; | 269 | int busy; |
| 267 | int nores; | 270 | int nores; |
| 268 | int len = skb->len; | 271 | int len = skb->len; |
| 269 | int subq = skb->queue_mapping; | 272 | int subq = skb_get_queue_mapping(skb); |
| 270 | struct sk_buff *skb_res = NULL; | 273 | struct sk_buff *skb_res = NULL; |
| 271 | 274 | ||
| 272 | start = master->slaves; | 275 | start = master->slaves; |
| @@ -284,7 +287,7 @@ restart: | |||
| 284 | if (slave->qdisc_sleeping != q) | 287 | if (slave->qdisc_sleeping != q) |
| 285 | continue; | 288 | continue; |
| 286 | if (netif_queue_stopped(slave) || | 289 | if (netif_queue_stopped(slave) || |
| 287 | netif_subqueue_stopped(slave, subq) || | 290 | __netif_subqueue_stopped(slave, subq) || |
| 288 | !netif_running(slave)) { | 291 | !netif_running(slave)) { |
| 289 | busy = 1; | 292 | busy = 1; |
| 290 | continue; | 293 | continue; |
| @@ -294,7 +297,7 @@ restart: | |||
| 294 | case 0: | 297 | case 0: |
| 295 | if (netif_tx_trylock(slave)) { | 298 | if (netif_tx_trylock(slave)) { |
| 296 | if (!netif_queue_stopped(slave) && | 299 | if (!netif_queue_stopped(slave) && |
| 297 | !netif_subqueue_stopped(slave, subq) && | 300 | !__netif_subqueue_stopped(slave, subq) && |
| 298 | slave->hard_start_xmit(skb, slave) == 0) { | 301 | slave->hard_start_xmit(skb, slave) == 0) { |
| 299 | netif_tx_unlock(slave); | 302 | netif_tx_unlock(slave); |
| 300 | master->slaves = NEXT_SLAVE(q); | 303 | master->slaves = NEXT_SLAVE(q); |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 781810724714..6d5fa6bb371b 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
| @@ -107,7 +107,7 @@ struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp) | |||
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | /* Free the shared key stucture */ | 109 | /* Free the shared key stucture */ |
| 110 | void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) | 110 | static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) |
| 111 | { | 111 | { |
| 112 | BUG_ON(!list_empty(&sh_key->key_list)); | 112 | BUG_ON(!list_empty(&sh_key->key_list)); |
| 113 | sctp_auth_key_put(sh_key->key); | 113 | sctp_auth_key_put(sh_key->key); |
| @@ -220,7 +220,7 @@ static struct sctp_auth_bytes *sctp_auth_make_key_vector( | |||
| 220 | 220 | ||
| 221 | 221 | ||
| 222 | /* Make a key vector based on our local parameters */ | 222 | /* Make a key vector based on our local parameters */ |
| 223 | struct sctp_auth_bytes *sctp_auth_make_local_vector( | 223 | static struct sctp_auth_bytes *sctp_auth_make_local_vector( |
| 224 | const struct sctp_association *asoc, | 224 | const struct sctp_association *asoc, |
| 225 | gfp_t gfp) | 225 | gfp_t gfp) |
| 226 | { | 226 | { |
| @@ -232,7 +232,7 @@ struct sctp_auth_bytes *sctp_auth_make_local_vector( | |||
| 232 | } | 232 | } |
| 233 | 233 | ||
| 234 | /* Make a key vector based on peer's parameters */ | 234 | /* Make a key vector based on peer's parameters */ |
| 235 | struct sctp_auth_bytes *sctp_auth_make_peer_vector( | 235 | static struct sctp_auth_bytes *sctp_auth_make_peer_vector( |
| 236 | const struct sctp_association *asoc, | 236 | const struct sctp_association *asoc, |
| 237 | gfp_t gfp) | 237 | gfp_t gfp) |
| 238 | { | 238 | { |
| @@ -556,7 +556,7 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc) | |||
| 556 | return &sctp_hmac_list[id]; | 556 | return &sctp_hmac_list[id]; |
| 557 | } | 557 | } |
| 558 | 558 | ||
| 559 | static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id) | 559 | static int __sctp_auth_find_hmacid(__be16 *hmacs, int n_elts, __be16 hmac_id) |
| 560 | { | 560 | { |
| 561 | int found = 0; | 561 | int found = 0; |
| 562 | int i; | 562 | int i; |
| @@ -573,7 +573,7 @@ static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id) | |||
| 573 | 573 | ||
| 574 | /* See if the HMAC_ID is one that we claim as supported */ | 574 | /* See if the HMAC_ID is one that we claim as supported */ |
| 575 | int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, | 575 | int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, |
| 576 | __u16 hmac_id) | 576 | __be16 hmac_id) |
| 577 | { | 577 | { |
| 578 | struct sctp_hmac_algo_param *hmacs; | 578 | struct sctp_hmac_algo_param *hmacs; |
| 579 | __u16 n_elt; | 579 | __u16 n_elt; |
| @@ -726,9 +726,7 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc, | |||
| 726 | 726 | ||
| 727 | /* set up scatter list */ | 727 | /* set up scatter list */ |
| 728 | end = skb_tail_pointer(skb); | 728 | end = skb_tail_pointer(skb); |
| 729 | sg.page = virt_to_page(auth); | 729 | sg_init_one(&sg, auth, end - (unsigned char *)auth); |
| 730 | sg.offset = (unsigned long)(auth) % PAGE_SIZE; | ||
| 731 | sg.length = end - (unsigned char *)auth; | ||
| 732 | 730 | ||
| 733 | desc.tfm = asoc->ep->auth_hmacs[hmac_id]; | 731 | desc.tfm = asoc->ep->auth_hmacs[hmac_id]; |
| 734 | desc.flags = 0; | 732 | desc.flags = 0; |
diff --git a/net/sctp/crc32c.c b/net/sctp/crc32c.c index 59cf7b06d216..181edabdb8ca 100644 --- a/net/sctp/crc32c.c +++ b/net/sctp/crc32c.c | |||
| @@ -170,6 +170,7 @@ __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32) | |||
| 170 | return crc32; | 170 | return crc32; |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | #if 0 | ||
| 173 | __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32) | 174 | __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32) |
| 174 | { | 175 | { |
| 175 | __u32 i; | 176 | __u32 i; |
| @@ -186,6 +187,7 @@ __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32) | |||
| 186 | 187 | ||
| 187 | return crc32; | 188 | return crc32; |
| 188 | } | 189 | } |
| 190 | #endif /* 0 */ | ||
| 189 | 191 | ||
| 190 | __u32 sctp_end_cksum(__u32 crc32) | 192 | __u32 sctp_end_cksum(__u32 crc32) |
| 191 | { | 193 | { |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index eb4deaf58914..7f31ff638bc6 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -631,7 +631,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, | |||
| 631 | struct ipv6_pinfo *newnp, *np = inet6_sk(sk); | 631 | struct ipv6_pinfo *newnp, *np = inet6_sk(sk); |
| 632 | struct sctp6_sock *newsctp6sk; | 632 | struct sctp6_sock *newsctp6sk; |
| 633 | 633 | ||
| 634 | newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot, 1); | 634 | newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot); |
| 635 | if (!newsk) | 635 | if (!newsk) |
| 636 | goto out; | 636 | goto out; |
| 637 | 637 | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index f5cd96f5fe74..40c1a47d1b8d 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
| @@ -552,7 +552,8 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk, | |||
| 552 | { | 552 | { |
| 553 | struct inet_sock *inet = inet_sk(sk); | 553 | struct inet_sock *inet = inet_sk(sk); |
| 554 | struct inet_sock *newinet; | 554 | struct inet_sock *newinet; |
| 555 | struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL, sk->sk_prot, 1); | 555 | struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL, |
| 556 | sk->sk_prot); | ||
| 556 | 557 | ||
| 557 | if (!newsk) | 558 | if (!newsk) |
| 558 | goto out; | 559 | goto out; |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index f983a369d4e2..c377e4e8f653 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -56,7 +56,7 @@ | |||
| 56 | #include <linux/ipv6.h> | 56 | #include <linux/ipv6.h> |
| 57 | #include <linux/net.h> | 57 | #include <linux/net.h> |
| 58 | #include <linux/inet.h> | 58 | #include <linux/inet.h> |
| 59 | #include <asm/scatterlist.h> | 59 | #include <linux/scatterlist.h> |
| 60 | #include <linux/crypto.h> | 60 | #include <linux/crypto.h> |
| 61 | #include <net/sock.h> | 61 | #include <net/sock.h> |
| 62 | 62 | ||
| @@ -1513,9 +1513,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, | |||
| 1513 | struct hash_desc desc; | 1513 | struct hash_desc desc; |
| 1514 | 1514 | ||
| 1515 | /* Sign the message. */ | 1515 | /* Sign the message. */ |
| 1516 | sg.page = virt_to_page(&cookie->c); | 1516 | sg_init_one(&sg, &cookie->c, bodysize); |
| 1517 | sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; | ||
| 1518 | sg.length = bodysize; | ||
| 1519 | keylen = SCTP_SECRET_SIZE; | 1517 | keylen = SCTP_SECRET_SIZE; |
| 1520 | key = (char *)ep->secret_key[ep->current_key]; | 1518 | key = (char *)ep->secret_key[ep->current_key]; |
| 1521 | desc.tfm = sctp_sk(ep->base.sk)->hmac; | 1519 | desc.tfm = sctp_sk(ep->base.sk)->hmac; |
| @@ -1585,9 +1583,7 @@ struct sctp_association *sctp_unpack_cookie( | |||
| 1585 | 1583 | ||
| 1586 | /* Check the signature. */ | 1584 | /* Check the signature. */ |
| 1587 | keylen = SCTP_SECRET_SIZE; | 1585 | keylen = SCTP_SECRET_SIZE; |
| 1588 | sg.page = virt_to_page(bear_cookie); | 1586 | sg_init_one(&sg, bear_cookie, bodysize); |
| 1589 | sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; | ||
| 1590 | sg.length = bodysize; | ||
| 1591 | key = (char *)ep->secret_key[ep->current_key]; | 1587 | key = (char *)ep->secret_key[ep->current_key]; |
| 1592 | desc.tfm = sctp_sk(ep->base.sk)->hmac; | 1588 | desc.tfm = sctp_sk(ep->base.sk)->hmac; |
| 1593 | desc.flags = 0; | 1589 | desc.flags = 0; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index bd6f42a15a4b..a7ecf3159e53 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -6455,6 +6455,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 6455 | } | 6455 | } |
| 6456 | 6456 | ||
| 6457 | 6457 | ||
| 6458 | DEFINE_PROTO_INUSE(sctp) | ||
| 6459 | |||
| 6458 | /* This proto struct describes the ULP interface for SCTP. */ | 6460 | /* This proto struct describes the ULP interface for SCTP. */ |
| 6459 | struct proto sctp_prot = { | 6461 | struct proto sctp_prot = { |
| 6460 | .name = "SCTP", | 6462 | .name = "SCTP", |
| @@ -6483,9 +6485,12 @@ struct proto sctp_prot = { | |||
| 6483 | .memory_pressure = &sctp_memory_pressure, | 6485 | .memory_pressure = &sctp_memory_pressure, |
| 6484 | .enter_memory_pressure = sctp_enter_memory_pressure, | 6486 | .enter_memory_pressure = sctp_enter_memory_pressure, |
| 6485 | .memory_allocated = &sctp_memory_allocated, | 6487 | .memory_allocated = &sctp_memory_allocated, |
| 6488 | REF_PROTO_INUSE(sctp) | ||
| 6486 | }; | 6489 | }; |
| 6487 | 6490 | ||
| 6488 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 6491 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
| 6492 | DEFINE_PROTO_INUSE(sctpv6) | ||
| 6493 | |||
| 6489 | struct proto sctpv6_prot = { | 6494 | struct proto sctpv6_prot = { |
| 6490 | .name = "SCTPv6", | 6495 | .name = "SCTPv6", |
| 6491 | .owner = THIS_MODULE, | 6496 | .owner = THIS_MODULE, |
| @@ -6513,5 +6518,6 @@ struct proto sctpv6_prot = { | |||
| 6513 | .memory_pressure = &sctp_memory_pressure, | 6518 | .memory_pressure = &sctp_memory_pressure, |
| 6514 | .enter_memory_pressure = sctp_enter_memory_pressure, | 6519 | .enter_memory_pressure = sctp_enter_memory_pressure, |
| 6515 | .memory_allocated = &sctp_memory_allocated, | 6520 | .memory_allocated = &sctp_memory_allocated, |
| 6521 | REF_PROTO_INUSE(sctpv6) | ||
| 6516 | }; | 6522 | }; |
| 6517 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ | 6523 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index b9370956b187..4be92d0a2cab 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
| @@ -908,8 +908,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | |||
| 908 | return; | 908 | return; |
| 909 | } | 909 | } |
| 910 | 910 | ||
| 911 | /* Renege 'needed' bytes from the ordering queue. */ | 911 | static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, |
| 912 | static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | 912 | struct sk_buff_head *list, __u16 needed) |
| 913 | { | 913 | { |
| 914 | __u16 freed = 0; | 914 | __u16 freed = 0; |
| 915 | __u32 tsn; | 915 | __u32 tsn; |
| @@ -919,7 +919,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | |||
| 919 | 919 | ||
| 920 | tsnmap = &ulpq->asoc->peer.tsn_map; | 920 | tsnmap = &ulpq->asoc->peer.tsn_map; |
| 921 | 921 | ||
| 922 | while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) { | 922 | while ((skb = __skb_dequeue_tail(list)) != NULL) { |
| 923 | freed += skb_headlen(skb); | 923 | freed += skb_headlen(skb); |
| 924 | event = sctp_skb2event(skb); | 924 | event = sctp_skb2event(skb); |
| 925 | tsn = event->tsn; | 925 | tsn = event->tsn; |
| @@ -933,30 +933,16 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | |||
| 933 | return freed; | 933 | return freed; |
| 934 | } | 934 | } |
| 935 | 935 | ||
| 936 | /* Renege 'needed' bytes from the ordering queue. */ | ||
| 937 | static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | ||
| 938 | { | ||
| 939 | return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); | ||
| 940 | } | ||
| 941 | |||
| 936 | /* Renege 'needed' bytes from the reassembly queue. */ | 942 | /* Renege 'needed' bytes from the reassembly queue. */ |
| 937 | static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) | 943 | static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) |
| 938 | { | 944 | { |
| 939 | __u16 freed = 0; | 945 | return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed); |
| 940 | __u32 tsn; | ||
| 941 | struct sk_buff *skb; | ||
| 942 | struct sctp_ulpevent *event; | ||
| 943 | struct sctp_tsnmap *tsnmap; | ||
| 944 | |||
| 945 | tsnmap = &ulpq->asoc->peer.tsn_map; | ||
| 946 | |||
| 947 | /* Walk backwards through the list, reneges the newest tsns. */ | ||
| 948 | while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) { | ||
| 949 | freed += skb_headlen(skb); | ||
| 950 | event = sctp_skb2event(skb); | ||
| 951 | tsn = event->tsn; | ||
| 952 | |||
| 953 | sctp_ulpevent_free(event); | ||
| 954 | sctp_tsnmap_renege(tsnmap, tsn); | ||
| 955 | if (freed >= needed) | ||
| 956 | return freed; | ||
| 957 | } | ||
| 958 | |||
| 959 | return freed; | ||
| 960 | } | 946 | } |
| 961 | 947 | ||
| 962 | /* Partial deliver the first message as there is pressure on rwnd. */ | 948 | /* Partial deliver the first message as there is pressure on rwnd. */ |
diff --git a/net/socket.c b/net/socket.c index 540013ea8620..5d879fd3d01d 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -1250,11 +1250,14 @@ asmlinkage long sys_socketpair(int family, int type, int protocol, | |||
| 1250 | goto out_release_both; | 1250 | goto out_release_both; |
| 1251 | 1251 | ||
| 1252 | fd1 = sock_alloc_fd(&newfile1); | 1252 | fd1 = sock_alloc_fd(&newfile1); |
| 1253 | if (unlikely(fd1 < 0)) | 1253 | if (unlikely(fd1 < 0)) { |
| 1254 | err = fd1; | ||
| 1254 | goto out_release_both; | 1255 | goto out_release_both; |
| 1256 | } | ||
| 1255 | 1257 | ||
| 1256 | fd2 = sock_alloc_fd(&newfile2); | 1258 | fd2 = sock_alloc_fd(&newfile2); |
| 1257 | if (unlikely(fd2 < 0)) { | 1259 | if (unlikely(fd2 < 0)) { |
| 1260 | err = fd2; | ||
| 1258 | put_filp(newfile1); | 1261 | put_filp(newfile1); |
| 1259 | put_unused_fd(fd1); | 1262 | put_unused_fd(fd1); |
| 1260 | goto out_release_both; | 1263 | goto out_release_both; |
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index bfb6a29633dd..0dd792338fa9 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
| @@ -75,7 +75,7 @@ krb5_encrypt( | |||
| 75 | memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); | 75 | memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); |
| 76 | 76 | ||
| 77 | memcpy(out, in, length); | 77 | memcpy(out, in, length); |
| 78 | sg_set_buf(sg, out, length); | 78 | sg_init_one(sg, out, length); |
| 79 | 79 | ||
| 80 | ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); | 80 | ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); |
| 81 | out: | 81 | out: |
| @@ -110,7 +110,7 @@ krb5_decrypt( | |||
| 110 | memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); | 110 | memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); |
| 111 | 111 | ||
| 112 | memcpy(out, in, length); | 112 | memcpy(out, in, length); |
| 113 | sg_set_buf(sg, out, length); | 113 | sg_init_one(sg, out, length); |
| 114 | 114 | ||
| 115 | ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); | 115 | ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); |
| 116 | out: | 116 | out: |
| @@ -146,7 +146,7 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, | |||
| 146 | err = crypto_hash_init(&desc); | 146 | err = crypto_hash_init(&desc); |
| 147 | if (err) | 147 | if (err) |
| 148 | goto out; | 148 | goto out; |
| 149 | sg_set_buf(sg, header, hdrlen); | 149 | sg_init_one(sg, header, hdrlen); |
| 150 | err = crypto_hash_update(&desc, sg, hdrlen); | 150 | err = crypto_hash_update(&desc, sg, hdrlen); |
| 151 | if (err) | 151 | if (err) |
| 152 | goto out; | 152 | goto out; |
| @@ -188,8 +188,6 @@ encryptor(struct scatterlist *sg, void *data) | |||
| 188 | /* Worst case is 4 fragments: head, end of page 1, start | 188 | /* Worst case is 4 fragments: head, end of page 1, start |
| 189 | * of page 2, tail. Anything more is a bug. */ | 189 | * of page 2, tail. Anything more is a bug. */ |
| 190 | BUG_ON(desc->fragno > 3); | 190 | BUG_ON(desc->fragno > 3); |
| 191 | desc->infrags[desc->fragno] = *sg; | ||
| 192 | desc->outfrags[desc->fragno] = *sg; | ||
| 193 | 191 | ||
| 194 | page_pos = desc->pos - outbuf->head[0].iov_len; | 192 | page_pos = desc->pos - outbuf->head[0].iov_len; |
| 195 | if (page_pos >= 0 && page_pos < outbuf->page_len) { | 193 | if (page_pos >= 0 && page_pos < outbuf->page_len) { |
| @@ -197,9 +195,12 @@ encryptor(struct scatterlist *sg, void *data) | |||
| 197 | int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; | 195 | int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; |
| 198 | in_page = desc->pages[i]; | 196 | in_page = desc->pages[i]; |
| 199 | } else { | 197 | } else { |
| 200 | in_page = sg->page; | 198 | in_page = sg_page(sg); |
| 201 | } | 199 | } |
| 202 | desc->infrags[desc->fragno].page = in_page; | 200 | sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, |
| 201 | sg->offset); | ||
| 202 | sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, | ||
| 203 | sg->offset); | ||
| 203 | desc->fragno++; | 204 | desc->fragno++; |
| 204 | desc->fraglen += sg->length; | 205 | desc->fraglen += sg->length; |
| 205 | desc->pos += sg->length; | 206 | desc->pos += sg->length; |
| @@ -210,16 +211,22 @@ encryptor(struct scatterlist *sg, void *data) | |||
| 210 | if (thislen == 0) | 211 | if (thislen == 0) |
| 211 | return 0; | 212 | return 0; |
| 212 | 213 | ||
| 214 | sg_mark_end(&desc->infrags[desc->fragno - 1]); | ||
| 215 | sg_mark_end(&desc->outfrags[desc->fragno - 1]); | ||
| 216 | |||
| 213 | ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, | 217 | ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, |
| 214 | desc->infrags, thislen); | 218 | desc->infrags, thislen); |
| 215 | if (ret) | 219 | if (ret) |
| 216 | return ret; | 220 | return ret; |
| 221 | |||
| 222 | sg_init_table(desc->infrags, 4); | ||
| 223 | sg_init_table(desc->outfrags, 4); | ||
| 224 | |||
| 217 | if (fraglen) { | 225 | if (fraglen) { |
| 218 | desc->outfrags[0].page = sg->page; | 226 | sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, |
| 219 | desc->outfrags[0].offset = sg->offset + sg->length - fraglen; | 227 | sg->offset + sg->length - fraglen); |
| 220 | desc->outfrags[0].length = fraglen; | ||
| 221 | desc->infrags[0] = desc->outfrags[0]; | 228 | desc->infrags[0] = desc->outfrags[0]; |
| 222 | desc->infrags[0].page = in_page; | 229 | sg_assign_page(&desc->infrags[0], in_page); |
| 223 | desc->fragno = 1; | 230 | desc->fragno = 1; |
| 224 | desc->fraglen = fraglen; | 231 | desc->fraglen = fraglen; |
| 225 | } else { | 232 | } else { |
| @@ -248,6 +255,9 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, | |||
| 248 | desc.fragno = 0; | 255 | desc.fragno = 0; |
| 249 | desc.fraglen = 0; | 256 | desc.fraglen = 0; |
| 250 | 257 | ||
| 258 | sg_init_table(desc.infrags, 4); | ||
| 259 | sg_init_table(desc.outfrags, 4); | ||
| 260 | |||
| 251 | ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); | 261 | ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); |
| 252 | return ret; | 262 | return ret; |
| 253 | } | 263 | } |
| @@ -272,7 +282,8 @@ decryptor(struct scatterlist *sg, void *data) | |||
| 272 | /* Worst case is 4 fragments: head, end of page 1, start | 282 | /* Worst case is 4 fragments: head, end of page 1, start |
| 273 | * of page 2, tail. Anything more is a bug. */ | 283 | * of page 2, tail. Anything more is a bug. */ |
| 274 | BUG_ON(desc->fragno > 3); | 284 | BUG_ON(desc->fragno > 3); |
| 275 | desc->frags[desc->fragno] = *sg; | 285 | sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, |
| 286 | sg->offset); | ||
| 276 | desc->fragno++; | 287 | desc->fragno++; |
| 277 | desc->fraglen += sg->length; | 288 | desc->fraglen += sg->length; |
| 278 | 289 | ||
| @@ -282,14 +293,18 @@ decryptor(struct scatterlist *sg, void *data) | |||
| 282 | if (thislen == 0) | 293 | if (thislen == 0) |
| 283 | return 0; | 294 | return 0; |
| 284 | 295 | ||
| 296 | sg_mark_end(&desc->frags[desc->fragno - 1]); | ||
| 297 | |||
| 285 | ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, | 298 | ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, |
| 286 | desc->frags, thislen); | 299 | desc->frags, thislen); |
| 287 | if (ret) | 300 | if (ret) |
| 288 | return ret; | 301 | return ret; |
| 302 | |||
| 303 | sg_init_table(desc->frags, 4); | ||
| 304 | |||
| 289 | if (fraglen) { | 305 | if (fraglen) { |
| 290 | desc->frags[0].page = sg->page; | 306 | sg_set_page(&desc->frags[0], sg_page(sg), fraglen, |
| 291 | desc->frags[0].offset = sg->offset + sg->length - fraglen; | 307 | sg->offset + sg->length - fraglen); |
| 292 | desc->frags[0].length = fraglen; | ||
| 293 | desc->fragno = 1; | 308 | desc->fragno = 1; |
| 294 | desc->fraglen = fraglen; | 309 | desc->fraglen = fraglen; |
| 295 | } else { | 310 | } else { |
| @@ -314,6 +329,9 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, | |||
| 314 | desc.desc.flags = 0; | 329 | desc.desc.flags = 0; |
| 315 | desc.fragno = 0; | 330 | desc.fragno = 0; |
| 316 | desc.fraglen = 0; | 331 | desc.fraglen = 0; |
| 332 | |||
| 333 | sg_init_table(desc.frags, 4); | ||
| 334 | |||
| 317 | return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); | 335 | return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); |
| 318 | } | 336 | } |
| 319 | 337 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index a0d9faa59cb5..1c6eda5077c1 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c | |||
| @@ -63,7 +63,6 @@ | |||
| 63 | #include <linux/jiffies.h> | 63 | #include <linux/jiffies.h> |
| 64 | #include <linux/sunrpc/gss_krb5.h> | 64 | #include <linux/sunrpc/gss_krb5.h> |
| 65 | #include <linux/random.h> | 65 | #include <linux/random.h> |
| 66 | #include <asm/scatterlist.h> | ||
| 67 | #include <linux/crypto.h> | 66 | #include <linux/crypto.h> |
| 68 | 67 | ||
| 69 | #ifdef RPC_DEBUG | 68 | #ifdef RPC_DEBUG |
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 8bd074df27d3..3bdc527ee64a 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | #include <linux/sunrpc/gss_krb5.h> | 4 | #include <linux/sunrpc/gss_krb5.h> |
| 5 | #include <linux/random.h> | 5 | #include <linux/random.h> |
| 6 | #include <linux/pagemap.h> | 6 | #include <linux/pagemap.h> |
| 7 | #include <asm/scatterlist.h> | ||
| 8 | #include <linux/crypto.h> | 7 | #include <linux/crypto.h> |
| 9 | 8 | ||
| 10 | #ifdef RPC_DEBUG | 9 | #ifdef RPC_DEBUG |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index d158635de6c0..abf17ce2e3b1 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c | |||
| @@ -173,7 +173,7 @@ make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header, | |||
| 173 | if (err) | 173 | if (err) |
| 174 | goto out; | 174 | goto out; |
| 175 | 175 | ||
| 176 | sg_set_buf(sg, header, hdrlen); | 176 | sg_init_one(sg, header, hdrlen); |
| 177 | crypto_hash_update(&desc, sg, sg->length); | 177 | crypto_hash_update(&desc, sg, sg->length); |
| 178 | 178 | ||
| 179 | xdr_process_buf(body, body_offset, body->len - body_offset, | 179 | xdr_process_buf(body, body_offset, body->len - body_offset, |
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c index 864b541bbf51..2be714e9b382 100644 --- a/net/sunrpc/sysctl.c +++ b/net/sunrpc/sysctl.c | |||
| @@ -87,9 +87,8 @@ proc_dodebug(ctl_table *table, int write, struct file *file, | |||
| 87 | left--, s++; | 87 | left--, s++; |
| 88 | *(unsigned int *) table->data = value; | 88 | *(unsigned int *) table->data = value; |
| 89 | /* Display the RPC tasks on writing to rpc_debug */ | 89 | /* Display the RPC tasks on writing to rpc_debug */ |
| 90 | if (table->ctl_name == CTL_RPCDEBUG) { | 90 | if (strcmp(table->procname, "rpc_debug") == 0) |
| 91 | rpc_show_tasks(); | 91 | rpc_show_tasks(); |
| 92 | } | ||
| 93 | } else { | 92 | } else { |
| 94 | if (!access_ok(VERIFY_WRITE, buffer, left)) | 93 | if (!access_ok(VERIFY_WRITE, buffer, left)) |
| 95 | return -EFAULT; | 94 | return -EFAULT; |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 6a59180e1667..fdc5e6d7562b 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
| @@ -1030,6 +1030,8 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, | |||
| 1030 | unsigned page_len, thislen, page_offset; | 1030 | unsigned page_len, thislen, page_offset; |
| 1031 | struct scatterlist sg[1]; | 1031 | struct scatterlist sg[1]; |
| 1032 | 1032 | ||
| 1033 | sg_init_table(sg, 1); | ||
| 1034 | |||
| 1033 | if (offset >= buf->head[0].iov_len) { | 1035 | if (offset >= buf->head[0].iov_len) { |
| 1034 | offset -= buf->head[0].iov_len; | 1036 | offset -= buf->head[0].iov_len; |
| 1035 | } else { | 1037 | } else { |
| @@ -1059,9 +1061,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, | |||
| 1059 | do { | 1061 | do { |
| 1060 | if (thislen > page_len) | 1062 | if (thislen > page_len) |
| 1061 | thislen = page_len; | 1063 | thislen = page_len; |
| 1062 | sg->page = buf->pages[i]; | 1064 | sg_set_page(sg, buf->pages[i], thislen, page_offset); |
| 1063 | sg->offset = page_offset; | ||
| 1064 | sg->length = thislen; | ||
| 1065 | ret = actor(sg, data); | 1065 | ret = actor(sg, data); |
| 1066 | if (ret) | 1066 | if (ret) |
| 1067 | goto out; | 1067 | goto out; |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 12db63580427..9e11ce715958 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
| @@ -181,7 +181,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
| 181 | struct rpcrdma_read_chunk *cur_rchunk = NULL; | 181 | struct rpcrdma_read_chunk *cur_rchunk = NULL; |
| 182 | struct rpcrdma_write_array *warray = NULL; | 182 | struct rpcrdma_write_array *warray = NULL; |
| 183 | struct rpcrdma_write_chunk *cur_wchunk = NULL; | 183 | struct rpcrdma_write_chunk *cur_wchunk = NULL; |
| 184 | u32 *iptr = headerp->rm_body.rm_chunks; | 184 | __be32 *iptr = headerp->rm_body.rm_chunks; |
| 185 | 185 | ||
| 186 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { | 186 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { |
| 187 | /* a read chunk - server will RDMA Read our memory */ | 187 | /* a read chunk - server will RDMA Read our memory */ |
| @@ -217,25 +217,25 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
| 217 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); | 217 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); |
| 218 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); | 218 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); |
| 219 | xdr_encode_hyper( | 219 | xdr_encode_hyper( |
| 220 | (u32 *)&cur_rchunk->rc_target.rs_offset, | 220 | (__be32 *)&cur_rchunk->rc_target.rs_offset, |
| 221 | seg->mr_base); | 221 | seg->mr_base); |
| 222 | dprintk("RPC: %s: read chunk " | 222 | dprintk("RPC: %s: read chunk " |
| 223 | "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, | 223 | "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, |
| 224 | seg->mr_len, seg->mr_base, seg->mr_rkey, pos, | 224 | seg->mr_len, (unsigned long long)seg->mr_base, |
| 225 | n < nsegs ? "more" : "last"); | 225 | seg->mr_rkey, pos, n < nsegs ? "more" : "last"); |
| 226 | cur_rchunk++; | 226 | cur_rchunk++; |
| 227 | r_xprt->rx_stats.read_chunk_count++; | 227 | r_xprt->rx_stats.read_chunk_count++; |
| 228 | } else { /* write/reply */ | 228 | } else { /* write/reply */ |
| 229 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); | 229 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); |
| 230 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); | 230 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); |
| 231 | xdr_encode_hyper( | 231 | xdr_encode_hyper( |
| 232 | (u32 *)&cur_wchunk->wc_target.rs_offset, | 232 | (__be32 *)&cur_wchunk->wc_target.rs_offset, |
| 233 | seg->mr_base); | 233 | seg->mr_base); |
| 234 | dprintk("RPC: %s: %s chunk " | 234 | dprintk("RPC: %s: %s chunk " |
| 235 | "elem %d@0x%llx:0x%x (%s)\n", __func__, | 235 | "elem %d@0x%llx:0x%x (%s)\n", __func__, |
| 236 | (type == rpcrdma_replych) ? "reply" : "write", | 236 | (type == rpcrdma_replych) ? "reply" : "write", |
| 237 | seg->mr_len, seg->mr_base, seg->mr_rkey, | 237 | seg->mr_len, (unsigned long long)seg->mr_base, |
| 238 | n < nsegs ? "more" : "last"); | 238 | seg->mr_rkey, n < nsegs ? "more" : "last"); |
| 239 | cur_wchunk++; | 239 | cur_wchunk++; |
| 240 | if (type == rpcrdma_replych) | 240 | if (type == rpcrdma_replych) |
| 241 | r_xprt->rx_stats.reply_chunk_count++; | 241 | r_xprt->rx_stats.reply_chunk_count++; |
| @@ -257,14 +257,14 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
| 257 | * finish off header. If write, marshal discrim and nchunks. | 257 | * finish off header. If write, marshal discrim and nchunks. |
| 258 | */ | 258 | */ |
| 259 | if (cur_rchunk) { | 259 | if (cur_rchunk) { |
| 260 | iptr = (u32 *) cur_rchunk; | 260 | iptr = (__be32 *) cur_rchunk; |
| 261 | *iptr++ = xdr_zero; /* finish the read chunk list */ | 261 | *iptr++ = xdr_zero; /* finish the read chunk list */ |
| 262 | *iptr++ = xdr_zero; /* encode a NULL write chunk list */ | 262 | *iptr++ = xdr_zero; /* encode a NULL write chunk list */ |
| 263 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | 263 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ |
| 264 | } else { | 264 | } else { |
| 265 | warray->wc_discrim = xdr_one; | 265 | warray->wc_discrim = xdr_one; |
| 266 | warray->wc_nchunks = htonl(nchunks); | 266 | warray->wc_nchunks = htonl(nchunks); |
| 267 | iptr = (u32 *) cur_wchunk; | 267 | iptr = (__be32 *) cur_wchunk; |
| 268 | if (type == rpcrdma_writech) { | 268 | if (type == rpcrdma_writech) { |
| 269 | *iptr++ = xdr_zero; /* finish the write chunk list */ | 269 | *iptr++ = xdr_zero; /* finish the write chunk list */ |
| 270 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | 270 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ |
| @@ -559,7 +559,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
| 559 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) | 559 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) |
| 560 | */ | 560 | */ |
| 561 | static int | 561 | static int |
| 562 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | 562 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **iptrp) |
| 563 | { | 563 | { |
| 564 | unsigned int i, total_len; | 564 | unsigned int i, total_len; |
| 565 | struct rpcrdma_write_chunk *cur_wchunk; | 565 | struct rpcrdma_write_chunk *cur_wchunk; |
| @@ -573,11 +573,11 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
| 573 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; | 573 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; |
| 574 | ifdebug(FACILITY) { | 574 | ifdebug(FACILITY) { |
| 575 | u64 off; | 575 | u64 off; |
| 576 | xdr_decode_hyper((u32 *)&seg->rs_offset, &off); | 576 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
| 577 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", | 577 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", |
| 578 | __func__, | 578 | __func__, |
| 579 | ntohl(seg->rs_length), | 579 | ntohl(seg->rs_length), |
| 580 | off, | 580 | (unsigned long long)off, |
| 581 | ntohl(seg->rs_handle)); | 581 | ntohl(seg->rs_handle)); |
| 582 | } | 582 | } |
| 583 | total_len += ntohl(seg->rs_length); | 583 | total_len += ntohl(seg->rs_length); |
| @@ -585,7 +585,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
| 585 | } | 585 | } |
| 586 | /* check and adjust for properly terminated write chunk */ | 586 | /* check and adjust for properly terminated write chunk */ |
| 587 | if (wrchunk) { | 587 | if (wrchunk) { |
| 588 | u32 *w = (u32 *) cur_wchunk; | 588 | __be32 *w = (__be32 *) cur_wchunk; |
| 589 | if (*w++ != xdr_zero) | 589 | if (*w++ != xdr_zero) |
| 590 | return -1; | 590 | return -1; |
| 591 | cur_wchunk = (struct rpcrdma_write_chunk *) w; | 591 | cur_wchunk = (struct rpcrdma_write_chunk *) w; |
| @@ -593,7 +593,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
| 593 | if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) | 593 | if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) |
| 594 | return -1; | 594 | return -1; |
| 595 | 595 | ||
| 596 | *iptrp = (u32 *) cur_wchunk; | 596 | *iptrp = (__be32 *) cur_wchunk; |
| 597 | return total_len; | 597 | return total_len; |
| 598 | } | 598 | } |
| 599 | 599 | ||
| @@ -721,7 +721,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
| 721 | struct rpc_rqst *rqst; | 721 | struct rpc_rqst *rqst; |
| 722 | struct rpc_xprt *xprt = rep->rr_xprt; | 722 | struct rpc_xprt *xprt = rep->rr_xprt; |
| 723 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 723 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
| 724 | u32 *iptr; | 724 | __be32 *iptr; |
| 725 | int i, rdmalen, status; | 725 | int i, rdmalen, status; |
| 726 | 726 | ||
| 727 | /* Check status. If bad, signal disconnect and return rep to pool */ | 727 | /* Check status. If bad, signal disconnect and return rep to pool */ |
| @@ -801,7 +801,7 @@ repost: | |||
| 801 | r_xprt->rx_stats.total_rdma_reply += rdmalen; | 801 | r_xprt->rx_stats.total_rdma_reply += rdmalen; |
| 802 | } else { | 802 | } else { |
| 803 | /* else ordinary inline */ | 803 | /* else ordinary inline */ |
| 804 | iptr = (u32 *)((unsigned char *)headerp + 28); | 804 | iptr = (__be32 *)((unsigned char *)headerp + 28); |
| 805 | rep->rr_len -= 28; /*sizeof *headerp;*/ | 805 | rep->rr_len -= 28; /*sizeof *headerp;*/ |
| 806 | status = rep->rr_len; | 806 | status = rep->rr_len; |
| 807 | } | 807 | } |
| @@ -816,7 +816,7 @@ repost: | |||
| 816 | headerp->rm_body.rm_chunks[2] != xdr_one || | 816 | headerp->rm_body.rm_chunks[2] != xdr_one || |
| 817 | req->rl_nchunks == 0) | 817 | req->rl_nchunks == 0) |
| 818 | goto badheader; | 818 | goto badheader; |
| 819 | iptr = (u32 *)((unsigned char *)headerp + 28); | 819 | iptr = (__be32 *)((unsigned char *)headerp + 28); |
| 820 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); | 820 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); |
| 821 | if (rdmalen < 0) | 821 | if (rdmalen < 0) |
| 822 | goto badheader; | 822 | goto badheader; |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index e36b4b5a5222..6b792265dc06 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -201,7 +201,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol) | |||
| 201 | return -EPROTOTYPE; | 201 | return -EPROTOTYPE; |
| 202 | } | 202 | } |
| 203 | 203 | ||
| 204 | sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, 1); | 204 | sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); |
| 205 | if (!sk) { | 205 | if (!sk) { |
| 206 | tipc_deleteport(ref); | 206 | tipc_deleteport(ref); |
| 207 | return -ENOMEM; | 207 | return -ENOMEM; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 9163ec526c2a..515e7a692f9b 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -602,7 +602,7 @@ static struct sock * unix_create1(struct net *net, struct socket *sock) | |||
| 602 | if (atomic_read(&unix_nr_socks) >= 2*get_max_files()) | 602 | if (atomic_read(&unix_nr_socks) >= 2*get_max_files()) |
| 603 | goto out; | 603 | goto out; |
| 604 | 604 | ||
| 605 | sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, 1); | 605 | sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto); |
| 606 | if (!sk) | 606 | if (!sk) |
| 607 | goto out; | 607 | goto out; |
| 608 | 608 | ||
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index fc416f9606a9..92cfe8e3e0b8 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
| @@ -472,7 +472,7 @@ static struct proto x25_proto = { | |||
| 472 | static struct sock *x25_alloc_socket(struct net *net) | 472 | static struct sock *x25_alloc_socket(struct net *net) |
| 473 | { | 473 | { |
| 474 | struct x25_sock *x25; | 474 | struct x25_sock *x25; |
| 475 | struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, 1); | 475 | struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto); |
| 476 | 476 | ||
| 477 | if (!sk) | 477 | if (!sk) |
| 478 | goto out; | 478 | goto out; |
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 5ced62c19c63..1686f64c4352 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/pfkeyv2.h> | 14 | #include <linux/pfkeyv2.h> |
| 15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
| 16 | #include <linux/scatterlist.h> | ||
| 16 | #include <net/xfrm.h> | 17 | #include <net/xfrm.h> |
| 17 | #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE) | 18 | #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE) |
| 18 | #include <net/ah.h> | 19 | #include <net/ah.h> |
| @@ -20,7 +21,6 @@ | |||
| 20 | #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) | 21 | #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) |
| 21 | #include <net/esp.h> | 22 | #include <net/esp.h> |
| 22 | #endif | 23 | #endif |
| 23 | #include <asm/scatterlist.h> | ||
| 24 | 24 | ||
| 25 | /* | 25 | /* |
| 26 | * Algorithms supported by IPsec. These entries contain properties which | 26 | * Algorithms supported by IPsec. These entries contain properties which |
| @@ -552,9 +552,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | |||
| 552 | if (copy > len) | 552 | if (copy > len) |
| 553 | copy = len; | 553 | copy = len; |
| 554 | 554 | ||
| 555 | sg.page = virt_to_page(skb->data + offset); | 555 | sg_init_one(&sg, skb->data + offset, copy); |
| 556 | sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; | ||
| 557 | sg.length = copy; | ||
| 558 | 556 | ||
| 559 | err = icv_update(desc, &sg, copy); | 557 | err = icv_update(desc, &sg, copy); |
| 560 | if (unlikely(err)) | 558 | if (unlikely(err)) |
| @@ -577,9 +575,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | |||
| 577 | if (copy > len) | 575 | if (copy > len) |
| 578 | copy = len; | 576 | copy = len; |
| 579 | 577 | ||
| 580 | sg.page = frag->page; | 578 | sg_init_table(&sg, 1); |
| 581 | sg.offset = frag->page_offset + offset-start; | 579 | sg_set_page(&sg, frag->page, copy, |
| 582 | sg.length = copy; | 580 | frag->page_offset + offset-start); |
| 583 | 581 | ||
| 584 | err = icv_update(desc, &sg, copy); | 582 | err = icv_update(desc, &sg, copy); |
| 585 | if (unlikely(err)) | 583 | if (unlikely(err)) |
