diff options
Diffstat (limited to 'net')
31 files changed, 905 insertions, 399 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 3948949a609a..458031bfff55 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
| @@ -364,6 +364,14 @@ static void vlan_transfer_operstate(const struct net_device *dev, struct net_dev | |||
| 364 | } | 364 | } |
| 365 | } | 365 | } |
| 366 | 366 | ||
| 367 | /* | ||
| 368 | * vlan network devices have devices nesting below it, and are a special | ||
| 369 | * "super class" of normal network devices; split their locks off into a | ||
| 370 | * separate class since they always nest. | ||
| 371 | */ | ||
| 372 | static struct lock_class_key vlan_netdev_xmit_lock_key; | ||
| 373 | |||
| 374 | |||
| 367 | /* Attach a VLAN device to a mac address (ie Ethernet Card). | 375 | /* Attach a VLAN device to a mac address (ie Ethernet Card). |
| 368 | * Returns the device that was created, or NULL if there was | 376 | * Returns the device that was created, or NULL if there was |
| 369 | * an error of some kind. | 377 | * an error of some kind. |
| @@ -460,6 +468,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name, | |||
| 460 | 468 | ||
| 461 | new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, | 469 | new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, |
| 462 | vlan_setup); | 470 | vlan_setup); |
| 471 | |||
| 463 | if (new_dev == NULL) | 472 | if (new_dev == NULL) |
| 464 | goto out_unlock; | 473 | goto out_unlock; |
| 465 | 474 | ||
| @@ -518,6 +527,8 @@ static struct net_device *register_vlan_device(const char *eth_IF_name, | |||
| 518 | if (register_netdevice(new_dev)) | 527 | if (register_netdevice(new_dev)) |
| 519 | goto out_free_newdev; | 528 | goto out_free_newdev; |
| 520 | 529 | ||
| 530 | lockdep_set_class(&new_dev->_xmit_lock, &vlan_netdev_xmit_lock_key); | ||
| 531 | |||
| 521 | new_dev->iflink = real_dev->ifindex; | 532 | new_dev->iflink = real_dev->ifindex; |
| 522 | vlan_transfer_operstate(real_dev, new_dev); | 533 | vlan_transfer_operstate(real_dev, new_dev); |
| 523 | linkwatch_fire_event(new_dev); /* _MUST_ call rfc2863_policy() */ | 534 | linkwatch_fire_event(new_dev); /* _MUST_ call rfc2863_policy() */ |
diff --git a/net/atm/clip.c b/net/atm/clip.c index 87a454f5c89c..121bf6f49148 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/if.h> /* for IFF_UP */ | 23 | #include <linux/if.h> /* for IFF_UP */ |
| 24 | #include <linux/inetdevice.h> | 24 | #include <linux/inetdevice.h> |
| 25 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
| 26 | #include <linux/poison.h> | ||
| 26 | #include <linux/proc_fs.h> | 27 | #include <linux/proc_fs.h> |
| 27 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
| 28 | #include <linux/rcupdate.h> | 29 | #include <linux/rcupdate.h> |
| @@ -266,7 +267,7 @@ static void clip_neigh_destroy(struct neighbour *neigh) | |||
| 266 | DPRINTK("clip_neigh_destroy (neigh %p)\n", neigh); | 267 | DPRINTK("clip_neigh_destroy (neigh %p)\n", neigh); |
| 267 | if (NEIGH2ENTRY(neigh)->vccs) | 268 | if (NEIGH2ENTRY(neigh)->vccs) |
| 268 | printk(KERN_CRIT "clip_neigh_destroy: vccs != NULL !!!\n"); | 269 | printk(KERN_CRIT "clip_neigh_destroy: vccs != NULL !!!\n"); |
| 269 | NEIGH2ENTRY(neigh)->vccs = (void *) 0xdeadbeef; | 270 | NEIGH2ENTRY(neigh)->vccs = (void *) NEIGHBOR_DEAD; |
| 270 | } | 271 | } |
| 271 | 272 | ||
| 272 | static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) | 273 | static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) |
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 9be5c15e63d3..136c3aefa9de 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c | |||
| @@ -103,11 +103,13 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
| 103 | { | 103 | { |
| 104 | struct sk_buff *ourskb; | 104 | struct sk_buff *ourskb; |
| 105 | unsigned char *bp = skb->data; | 105 | unsigned char *bp = skb->data; |
| 106 | struct net_device *dev; | 106 | ax25_route *route; |
| 107 | struct net_device *dev = NULL; | ||
| 107 | ax25_address *src, *dst; | 108 | ax25_address *src, *dst; |
| 109 | ax25_digi *digipeat = NULL; | ||
| 108 | ax25_dev *ax25_dev; | 110 | ax25_dev *ax25_dev; |
| 109 | ax25_route _route, *route = &_route; | ||
| 110 | ax25_cb *ax25; | 111 | ax25_cb *ax25; |
| 112 | char ip_mode = ' '; | ||
| 111 | 113 | ||
| 112 | dst = (ax25_address *)(bp + 1); | 114 | dst = (ax25_address *)(bp + 1); |
| 113 | src = (ax25_address *)(bp + 8); | 115 | src = (ax25_address *)(bp + 8); |
| @@ -115,8 +117,12 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
| 115 | if (arp_find(bp + 1, skb)) | 117 | if (arp_find(bp + 1, skb)) |
| 116 | return 1; | 118 | return 1; |
| 117 | 119 | ||
| 118 | route = ax25_rt_find_route(route, dst, NULL); | 120 | route = ax25_get_route(dst, NULL); |
| 119 | dev = route->dev; | 121 | if (route) { |
| 122 | digipeat = route->digipeat; | ||
| 123 | dev = route->dev; | ||
| 124 | ip_mode = route->ip_mode; | ||
| 125 | }; | ||
| 120 | 126 | ||
| 121 | if (dev == NULL) | 127 | if (dev == NULL) |
| 122 | dev = skb->dev; | 128 | dev = skb->dev; |
| @@ -126,7 +132,7 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
| 126 | } | 132 | } |
| 127 | 133 | ||
| 128 | if (bp[16] == AX25_P_IP) { | 134 | if (bp[16] == AX25_P_IP) { |
| 129 | if (route->ip_mode == 'V' || (route->ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) { | 135 | if (ip_mode == 'V' || (ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) { |
| 130 | /* | 136 | /* |
| 131 | * We copy the buffer and release the original thereby | 137 | * We copy the buffer and release the original thereby |
| 132 | * keeping it straight | 138 | * keeping it straight |
| @@ -172,7 +178,7 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
| 172 | ourskb, | 178 | ourskb, |
| 173 | ax25_dev->values[AX25_VALUES_PACLEN], | 179 | ax25_dev->values[AX25_VALUES_PACLEN], |
| 174 | &src_c, | 180 | &src_c, |
| 175 | &dst_c, route->digipeat, dev); | 181 | &dst_c, digipeat, dev); |
| 176 | if (ax25) { | 182 | if (ax25) { |
| 177 | ax25_cb_put(ax25); | 183 | ax25_cb_put(ax25); |
| 178 | } | 184 | } |
| @@ -190,7 +196,7 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
| 190 | 196 | ||
| 191 | skb_pull(skb, AX25_KISS_HEADER_LEN); | 197 | skb_pull(skb, AX25_KISS_HEADER_LEN); |
| 192 | 198 | ||
| 193 | if (route->digipeat != NULL) { | 199 | if (digipeat != NULL) { |
| 194 | if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) { | 200 | if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) { |
| 195 | kfree_skb(skb); | 201 | kfree_skb(skb); |
| 196 | goto put; | 202 | goto put; |
| @@ -202,7 +208,8 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
| 202 | ax25_queue_xmit(skb, dev); | 208 | ax25_queue_xmit(skb, dev); |
| 203 | 209 | ||
| 204 | put: | 210 | put: |
| 205 | ax25_put_route(route); | 211 | if (route) |
| 212 | ax25_put_route(route); | ||
| 206 | 213 | ||
| 207 | return 1; | 214 | return 1; |
| 208 | } | 215 | } |
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index 5ac98250797b..51b7bdaf27eb 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c | |||
| @@ -41,8 +41,6 @@ | |||
| 41 | static ax25_route *ax25_route_list; | 41 | static ax25_route *ax25_route_list; |
| 42 | static DEFINE_RWLOCK(ax25_route_lock); | 42 | static DEFINE_RWLOCK(ax25_route_lock); |
| 43 | 43 | ||
| 44 | static ax25_route *ax25_get_route(ax25_address *, struct net_device *); | ||
| 45 | |||
| 46 | void ax25_rt_device_down(struct net_device *dev) | 44 | void ax25_rt_device_down(struct net_device *dev) |
| 47 | { | 45 | { |
| 48 | ax25_route *s, *t, *ax25_rt; | 46 | ax25_route *s, *t, *ax25_rt; |
| @@ -115,7 +113,7 @@ static int ax25_rt_add(struct ax25_routes_struct *route) | |||
| 115 | return -ENOMEM; | 113 | return -ENOMEM; |
| 116 | } | 114 | } |
| 117 | 115 | ||
| 118 | atomic_set(&ax25_rt->ref, 0); | 116 | atomic_set(&ax25_rt->refcount, 1); |
| 119 | ax25_rt->callsign = route->dest_addr; | 117 | ax25_rt->callsign = route->dest_addr; |
| 120 | ax25_rt->dev = ax25_dev->dev; | 118 | ax25_rt->dev = ax25_dev->dev; |
| 121 | ax25_rt->digipeat = NULL; | 119 | ax25_rt->digipeat = NULL; |
| @@ -140,23 +138,10 @@ static int ax25_rt_add(struct ax25_routes_struct *route) | |||
| 140 | return 0; | 138 | return 0; |
| 141 | } | 139 | } |
| 142 | 140 | ||
| 143 | static void ax25_rt_destroy(ax25_route *ax25_rt) | 141 | void __ax25_put_route(ax25_route *ax25_rt) |
| 144 | { | 142 | { |
| 145 | if (atomic_read(&ax25_rt->ref) == 0) { | 143 | kfree(ax25_rt->digipeat); |
| 146 | kfree(ax25_rt->digipeat); | 144 | kfree(ax25_rt); |
| 147 | kfree(ax25_rt); | ||
| 148 | return; | ||
| 149 | } | ||
| 150 | |||
| 151 | /* | ||
| 152 | * Uh... Route is still in use; we can't yet destroy it. Retry later. | ||
| 153 | */ | ||
| 154 | init_timer(&ax25_rt->timer); | ||
| 155 | ax25_rt->timer.data = (unsigned long) ax25_rt; | ||
| 156 | ax25_rt->timer.function = (void *) ax25_rt_destroy; | ||
| 157 | ax25_rt->timer.expires = jiffies + 5 * HZ; | ||
| 158 | |||
| 159 | add_timer(&ax25_rt->timer); | ||
| 160 | } | 145 | } |
| 161 | 146 | ||
| 162 | static int ax25_rt_del(struct ax25_routes_struct *route) | 147 | static int ax25_rt_del(struct ax25_routes_struct *route) |
| @@ -177,12 +162,12 @@ static int ax25_rt_del(struct ax25_routes_struct *route) | |||
| 177 | ax25cmp(&route->dest_addr, &s->callsign) == 0) { | 162 | ax25cmp(&route->dest_addr, &s->callsign) == 0) { |
| 178 | if (ax25_route_list == s) { | 163 | if (ax25_route_list == s) { |
| 179 | ax25_route_list = s->next; | 164 | ax25_route_list = s->next; |
| 180 | ax25_rt_destroy(s); | 165 | ax25_put_route(s); |
| 181 | } else { | 166 | } else { |
| 182 | for (t = ax25_route_list; t != NULL; t = t->next) { | 167 | for (t = ax25_route_list; t != NULL; t = t->next) { |
| 183 | if (t->next == s) { | 168 | if (t->next == s) { |
| 184 | t->next = s->next; | 169 | t->next = s->next; |
| 185 | ax25_rt_destroy(s); | 170 | ax25_put_route(s); |
| 186 | break; | 171 | break; |
| 187 | } | 172 | } |
| 188 | } | 173 | } |
| @@ -362,7 +347,7 @@ struct file_operations ax25_route_fops = { | |||
| 362 | * | 347 | * |
| 363 | * Only routes with a reference count of zero can be destroyed. | 348 | * Only routes with a reference count of zero can be destroyed. |
| 364 | */ | 349 | */ |
| 365 | static ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) | 350 | ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) |
| 366 | { | 351 | { |
| 367 | ax25_route *ax25_spe_rt = NULL; | 352 | ax25_route *ax25_spe_rt = NULL; |
| 368 | ax25_route *ax25_def_rt = NULL; | 353 | ax25_route *ax25_def_rt = NULL; |
| @@ -392,7 +377,7 @@ static ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) | |||
| 392 | ax25_rt = ax25_spe_rt; | 377 | ax25_rt = ax25_spe_rt; |
| 393 | 378 | ||
| 394 | if (ax25_rt != NULL) | 379 | if (ax25_rt != NULL) |
| 395 | atomic_inc(&ax25_rt->ref); | 380 | ax25_hold_route(ax25_rt); |
| 396 | 381 | ||
| 397 | read_unlock(&ax25_route_lock); | 382 | read_unlock(&ax25_route_lock); |
| 398 | 383 | ||
| @@ -467,24 +452,6 @@ put: | |||
| 467 | return 0; | 452 | return 0; |
| 468 | } | 453 | } |
| 469 | 454 | ||
| 470 | ax25_route *ax25_rt_find_route(ax25_route * route, ax25_address *addr, | ||
| 471 | struct net_device *dev) | ||
| 472 | { | ||
| 473 | ax25_route *ax25_rt; | ||
| 474 | |||
| 475 | if ((ax25_rt = ax25_get_route(addr, dev))) | ||
| 476 | return ax25_rt; | ||
| 477 | |||
| 478 | route->next = NULL; | ||
| 479 | atomic_set(&route->ref, 1); | ||
| 480 | route->callsign = *addr; | ||
| 481 | route->dev = dev; | ||
| 482 | route->digipeat = NULL; | ||
| 483 | route->ip_mode = ' '; | ||
| 484 | |||
| 485 | return route; | ||
| 486 | } | ||
| 487 | |||
| 488 | struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, | 455 | struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, |
| 489 | ax25_address *dest, ax25_digi *digi) | 456 | ax25_address *dest, ax25_digi *digi) |
| 490 | { | 457 | { |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 51f867062e1d..788ea7a2b744 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
| @@ -48,7 +48,7 @@ | |||
| 48 | #define BT_DBG(D...) | 48 | #define BT_DBG(D...) |
| 49 | #endif | 49 | #endif |
| 50 | 50 | ||
| 51 | #define VERSION "2.8" | 51 | #define VERSION "2.10" |
| 52 | 52 | ||
| 53 | /* Bluetooth sockets */ | 53 | /* Bluetooth sockets */ |
| 54 | #define BT_MAX_PROTO 8 | 54 | #define BT_MAX_PROTO 8 |
| @@ -307,13 +307,21 @@ static struct net_proto_family bt_sock_family_ops = { | |||
| 307 | 307 | ||
| 308 | static int __init bt_init(void) | 308 | static int __init bt_init(void) |
| 309 | { | 309 | { |
| 310 | int err; | ||
| 311 | |||
| 310 | BT_INFO("Core ver %s", VERSION); | 312 | BT_INFO("Core ver %s", VERSION); |
| 311 | 313 | ||
| 312 | sock_register(&bt_sock_family_ops); | 314 | err = bt_sysfs_init(); |
| 315 | if (err < 0) | ||
| 316 | return err; | ||
| 313 | 317 | ||
| 314 | BT_INFO("HCI device and connection manager initialized"); | 318 | err = sock_register(&bt_sock_family_ops); |
| 319 | if (err < 0) { | ||
| 320 | bt_sysfs_cleanup(); | ||
| 321 | return err; | ||
| 322 | } | ||
| 315 | 323 | ||
| 316 | bt_sysfs_init(); | 324 | BT_INFO("HCI device and connection manager initialized"); |
| 317 | 325 | ||
| 318 | hci_sock_init(); | 326 | hci_sock_init(); |
| 319 | 327 | ||
| @@ -324,9 +332,9 @@ static void __exit bt_exit(void) | |||
| 324 | { | 332 | { |
| 325 | hci_sock_cleanup(); | 333 | hci_sock_cleanup(); |
| 326 | 334 | ||
| 327 | bt_sysfs_cleanup(); | ||
| 328 | |||
| 329 | sock_unregister(PF_BLUETOOTH); | 335 | sock_unregister(PF_BLUETOOTH); |
| 336 | |||
| 337 | bt_sysfs_cleanup(); | ||
| 330 | } | 338 | } |
| 331 | 339 | ||
| 332 | subsys_initcall(bt_init); | 340 | subsys_initcall(bt_init); |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 5c0c2b1ef34a..420ed4d7e57e 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
| @@ -115,8 +115,8 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle) | |||
| 115 | 115 | ||
| 116 | static void hci_conn_timeout(unsigned long arg) | 116 | static void hci_conn_timeout(unsigned long arg) |
| 117 | { | 117 | { |
| 118 | struct hci_conn *conn = (void *)arg; | 118 | struct hci_conn *conn = (void *) arg; |
| 119 | struct hci_dev *hdev = conn->hdev; | 119 | struct hci_dev *hdev = conn->hdev; |
| 120 | 120 | ||
| 121 | BT_DBG("conn %p state %d", conn, conn->state); | 121 | BT_DBG("conn %p state %d", conn, conn->state); |
| 122 | 122 | ||
| @@ -132,11 +132,13 @@ static void hci_conn_timeout(unsigned long arg) | |||
| 132 | return; | 132 | return; |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | static void hci_conn_init_timer(struct hci_conn *conn) | 135 | static void hci_conn_idle(unsigned long arg) |
| 136 | { | 136 | { |
| 137 | init_timer(&conn->timer); | 137 | struct hci_conn *conn = (void *) arg; |
| 138 | conn->timer.function = hci_conn_timeout; | 138 | |
| 139 | conn->timer.data = (unsigned long)conn; | 139 | BT_DBG("conn %p mode %d", conn, conn->mode); |
| 140 | |||
| 141 | hci_conn_enter_sniff_mode(conn); | ||
| 140 | } | 142 | } |
| 141 | 143 | ||
| 142 | struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | 144 | struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) |
| @@ -145,17 +147,27 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
| 145 | 147 | ||
| 146 | BT_DBG("%s dst %s", hdev->name, batostr(dst)); | 148 | BT_DBG("%s dst %s", hdev->name, batostr(dst)); |
| 147 | 149 | ||
| 148 | if (!(conn = kmalloc(sizeof(struct hci_conn), GFP_ATOMIC))) | 150 | conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC); |
| 151 | if (!conn) | ||
| 149 | return NULL; | 152 | return NULL; |
| 150 | memset(conn, 0, sizeof(struct hci_conn)); | ||
| 151 | 153 | ||
| 152 | bacpy(&conn->dst, dst); | 154 | bacpy(&conn->dst, dst); |
| 153 | conn->type = type; | ||
| 154 | conn->hdev = hdev; | 155 | conn->hdev = hdev; |
| 156 | conn->type = type; | ||
| 157 | conn->mode = HCI_CM_ACTIVE; | ||
| 155 | conn->state = BT_OPEN; | 158 | conn->state = BT_OPEN; |
| 156 | 159 | ||
| 160 | conn->power_save = 1; | ||
| 161 | |||
| 157 | skb_queue_head_init(&conn->data_q); | 162 | skb_queue_head_init(&conn->data_q); |
| 158 | hci_conn_init_timer(conn); | 163 | |
| 164 | init_timer(&conn->disc_timer); | ||
| 165 | conn->disc_timer.function = hci_conn_timeout; | ||
| 166 | conn->disc_timer.data = (unsigned long) conn; | ||
| 167 | |||
| 168 | init_timer(&conn->idle_timer); | ||
| 169 | conn->idle_timer.function = hci_conn_idle; | ||
| 170 | conn->idle_timer.data = (unsigned long) conn; | ||
| 159 | 171 | ||
| 160 | atomic_set(&conn->refcnt, 0); | 172 | atomic_set(&conn->refcnt, 0); |
| 161 | 173 | ||
| @@ -178,7 +190,9 @@ int hci_conn_del(struct hci_conn *conn) | |||
| 178 | 190 | ||
| 179 | BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); | 191 | BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); |
| 180 | 192 | ||
| 181 | hci_conn_del_timer(conn); | 193 | del_timer(&conn->idle_timer); |
| 194 | |||
| 195 | del_timer(&conn->disc_timer); | ||
| 182 | 196 | ||
| 183 | if (conn->type == SCO_LINK) { | 197 | if (conn->type == SCO_LINK) { |
| 184 | struct hci_conn *acl = conn->link; | 198 | struct hci_conn *acl = conn->link; |
| @@ -364,6 +378,70 @@ int hci_conn_switch_role(struct hci_conn *conn, uint8_t role) | |||
| 364 | } | 378 | } |
| 365 | EXPORT_SYMBOL(hci_conn_switch_role); | 379 | EXPORT_SYMBOL(hci_conn_switch_role); |
| 366 | 380 | ||
| 381 | /* Enter active mode */ | ||
| 382 | void hci_conn_enter_active_mode(struct hci_conn *conn) | ||
| 383 | { | ||
| 384 | struct hci_dev *hdev = conn->hdev; | ||
| 385 | |||
| 386 | BT_DBG("conn %p mode %d", conn, conn->mode); | ||
| 387 | |||
| 388 | if (test_bit(HCI_RAW, &hdev->flags)) | ||
| 389 | return; | ||
| 390 | |||
| 391 | if (conn->mode != HCI_CM_SNIFF || !conn->power_save) | ||
| 392 | goto timer; | ||
| 393 | |||
| 394 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | ||
| 395 | struct hci_cp_exit_sniff_mode cp; | ||
| 396 | cp.handle = __cpu_to_le16(conn->handle); | ||
| 397 | hci_send_cmd(hdev, OGF_LINK_POLICY, | ||
| 398 | OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp); | ||
| 399 | } | ||
| 400 | |||
| 401 | timer: | ||
| 402 | if (hdev->idle_timeout > 0) | ||
| 403 | mod_timer(&conn->idle_timer, | ||
| 404 | jiffies + msecs_to_jiffies(hdev->idle_timeout)); | ||
| 405 | } | ||
| 406 | |||
| 407 | /* Enter sniff mode */ | ||
| 408 | void hci_conn_enter_sniff_mode(struct hci_conn *conn) | ||
| 409 | { | ||
| 410 | struct hci_dev *hdev = conn->hdev; | ||
| 411 | |||
| 412 | BT_DBG("conn %p mode %d", conn, conn->mode); | ||
| 413 | |||
| 414 | if (test_bit(HCI_RAW, &hdev->flags)) | ||
| 415 | return; | ||
| 416 | |||
| 417 | if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) | ||
| 418 | return; | ||
| 419 | |||
| 420 | if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) | ||
| 421 | return; | ||
| 422 | |||
| 423 | if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { | ||
| 424 | struct hci_cp_sniff_subrate cp; | ||
| 425 | cp.handle = __cpu_to_le16(conn->handle); | ||
| 426 | cp.max_latency = __constant_cpu_to_le16(0); | ||
| 427 | cp.min_remote_timeout = __constant_cpu_to_le16(0); | ||
| 428 | cp.min_local_timeout = __constant_cpu_to_le16(0); | ||
| 429 | hci_send_cmd(hdev, OGF_LINK_POLICY, | ||
| 430 | OCF_SNIFF_SUBRATE, sizeof(cp), &cp); | ||
| 431 | } | ||
| 432 | |||
| 433 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | ||
| 434 | struct hci_cp_sniff_mode cp; | ||
| 435 | cp.handle = __cpu_to_le16(conn->handle); | ||
| 436 | cp.max_interval = __cpu_to_le16(hdev->sniff_max_interval); | ||
| 437 | cp.min_interval = __cpu_to_le16(hdev->sniff_min_interval); | ||
| 438 | cp.attempt = __constant_cpu_to_le16(4); | ||
| 439 | cp.timeout = __constant_cpu_to_le16(1); | ||
| 440 | hci_send_cmd(hdev, OGF_LINK_POLICY, | ||
| 441 | OCF_SNIFF_MODE, sizeof(cp), &cp); | ||
| 442 | } | ||
| 443 | } | ||
| 444 | |||
| 367 | /* Drop all connection on the device */ | 445 | /* Drop all connection on the device */ |
| 368 | void hci_conn_hash_flush(struct hci_dev *hdev) | 446 | void hci_conn_hash_flush(struct hci_dev *hdev) |
| 369 | { | 447 | { |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index f67240beb0dd..54e8e5ea2154 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
| @@ -411,7 +411,7 @@ int hci_inquiry(void __user *arg) | |||
| 411 | } | 411 | } |
| 412 | hci_dev_unlock_bh(hdev); | 412 | hci_dev_unlock_bh(hdev); |
| 413 | 413 | ||
| 414 | timeo = ir.length * 2 * HZ; | 414 | timeo = ir.length * msecs_to_jiffies(2000); |
| 415 | if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) | 415 | if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) |
| 416 | goto done; | 416 | goto done; |
| 417 | 417 | ||
| @@ -479,7 +479,8 @@ int hci_dev_open(__u16 dev) | |||
| 479 | set_bit(HCI_INIT, &hdev->flags); | 479 | set_bit(HCI_INIT, &hdev->flags); |
| 480 | 480 | ||
| 481 | //__hci_request(hdev, hci_reset_req, 0, HZ); | 481 | //__hci_request(hdev, hci_reset_req, 0, HZ); |
| 482 | ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT); | 482 | ret = __hci_request(hdev, hci_init_req, 0, |
| 483 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | ||
| 483 | 484 | ||
| 484 | clear_bit(HCI_INIT, &hdev->flags); | 485 | clear_bit(HCI_INIT, &hdev->flags); |
| 485 | } | 486 | } |
| @@ -546,7 +547,8 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
| 546 | atomic_set(&hdev->cmd_cnt, 1); | 547 | atomic_set(&hdev->cmd_cnt, 1); |
| 547 | if (!test_bit(HCI_RAW, &hdev->flags)) { | 548 | if (!test_bit(HCI_RAW, &hdev->flags)) { |
| 548 | set_bit(HCI_INIT, &hdev->flags); | 549 | set_bit(HCI_INIT, &hdev->flags); |
| 549 | __hci_request(hdev, hci_reset_req, 0, HZ/4); | 550 | __hci_request(hdev, hci_reset_req, 0, |
| 551 | msecs_to_jiffies(250)); | ||
| 550 | clear_bit(HCI_INIT, &hdev->flags); | 552 | clear_bit(HCI_INIT, &hdev->flags); |
| 551 | } | 553 | } |
| 552 | 554 | ||
| @@ -619,7 +621,8 @@ int hci_dev_reset(__u16 dev) | |||
| 619 | hdev->acl_cnt = 0; hdev->sco_cnt = 0; | 621 | hdev->acl_cnt = 0; hdev->sco_cnt = 0; |
| 620 | 622 | ||
| 621 | if (!test_bit(HCI_RAW, &hdev->flags)) | 623 | if (!test_bit(HCI_RAW, &hdev->flags)) |
| 622 | ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); | 624 | ret = __hci_request(hdev, hci_reset_req, 0, |
| 625 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | ||
| 623 | 626 | ||
| 624 | done: | 627 | done: |
| 625 | tasklet_enable(&hdev->tx_task); | 628 | tasklet_enable(&hdev->tx_task); |
| @@ -657,7 +660,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
| 657 | 660 | ||
| 658 | switch (cmd) { | 661 | switch (cmd) { |
| 659 | case HCISETAUTH: | 662 | case HCISETAUTH: |
| 660 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT); | 663 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, |
| 664 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | ||
| 661 | break; | 665 | break; |
| 662 | 666 | ||
| 663 | case HCISETENCRYPT: | 667 | case HCISETENCRYPT: |
| @@ -668,18 +672,19 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
| 668 | 672 | ||
| 669 | if (!test_bit(HCI_AUTH, &hdev->flags)) { | 673 | if (!test_bit(HCI_AUTH, &hdev->flags)) { |
| 670 | /* Auth must be enabled first */ | 674 | /* Auth must be enabled first */ |
| 671 | err = hci_request(hdev, hci_auth_req, | 675 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, |
| 672 | dr.dev_opt, HCI_INIT_TIMEOUT); | 676 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
| 673 | if (err) | 677 | if (err) |
| 674 | break; | 678 | break; |
| 675 | } | 679 | } |
| 676 | 680 | ||
| 677 | err = hci_request(hdev, hci_encrypt_req, | 681 | err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, |
| 678 | dr.dev_opt, HCI_INIT_TIMEOUT); | 682 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
| 679 | break; | 683 | break; |
| 680 | 684 | ||
| 681 | case HCISETSCAN: | 685 | case HCISETSCAN: |
| 682 | err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT); | 686 | err = hci_request(hdev, hci_scan_req, dr.dev_opt, |
| 687 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | ||
| 683 | break; | 688 | break; |
| 684 | 689 | ||
| 685 | case HCISETPTYPE: | 690 | case HCISETPTYPE: |
| @@ -812,8 +817,8 @@ void hci_free_dev(struct hci_dev *hdev) | |||
| 812 | { | 817 | { |
| 813 | skb_queue_purge(&hdev->driver_init); | 818 | skb_queue_purge(&hdev->driver_init); |
| 814 | 819 | ||
| 815 | /* will free via class release */ | 820 | /* will free via device release */ |
| 816 | class_device_put(&hdev->class_dev); | 821 | put_device(&hdev->dev); |
| 817 | } | 822 | } |
| 818 | EXPORT_SYMBOL(hci_free_dev); | 823 | EXPORT_SYMBOL(hci_free_dev); |
| 819 | 824 | ||
| @@ -848,6 +853,10 @@ int hci_register_dev(struct hci_dev *hdev) | |||
| 848 | hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); | 853 | hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); |
| 849 | hdev->link_mode = (HCI_LM_ACCEPT); | 854 | hdev->link_mode = (HCI_LM_ACCEPT); |
| 850 | 855 | ||
| 856 | hdev->idle_timeout = 0; | ||
| 857 | hdev->sniff_max_interval = 800; | ||
| 858 | hdev->sniff_min_interval = 80; | ||
| 859 | |||
| 851 | tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); | 860 | tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); |
| 852 | tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); | 861 | tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); |
| 853 | tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); | 862 | tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); |
| @@ -1220,6 +1229,9 @@ static inline void hci_sched_acl(struct hci_dev *hdev) | |||
| 1220 | while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { | 1229 | while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { |
| 1221 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | 1230 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { |
| 1222 | BT_DBG("skb %p len %d", skb, skb->len); | 1231 | BT_DBG("skb %p len %d", skb, skb->len); |
| 1232 | |||
| 1233 | hci_conn_enter_active_mode(conn); | ||
| 1234 | |||
| 1223 | hci_send_frame(skb); | 1235 | hci_send_frame(skb); |
| 1224 | hdev->acl_last_tx = jiffies; | 1236 | hdev->acl_last_tx = jiffies; |
| 1225 | 1237 | ||
| @@ -1298,6 +1310,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 1298 | if (conn) { | 1310 | if (conn) { |
| 1299 | register struct hci_proto *hp; | 1311 | register struct hci_proto *hp; |
| 1300 | 1312 | ||
| 1313 | hci_conn_enter_active_mode(conn); | ||
| 1314 | |||
| 1301 | /* Send to upper protocol */ | 1315 | /* Send to upper protocol */ |
| 1302 | if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { | 1316 | if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { |
| 1303 | hp->recv_acldata(conn, skb, flags); | 1317 | hp->recv_acldata(conn, skb, flags); |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 618bacee1b1c..3896dabab11d 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
| @@ -83,6 +83,8 @@ static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff * | |||
| 83 | { | 83 | { |
| 84 | struct hci_conn *conn; | 84 | struct hci_conn *conn; |
| 85 | struct hci_rp_role_discovery *rd; | 85 | struct hci_rp_role_discovery *rd; |
| 86 | struct hci_rp_write_link_policy *lp; | ||
| 87 | void *sent; | ||
| 86 | 88 | ||
| 87 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | 89 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); |
| 88 | 90 | ||
| @@ -106,6 +108,27 @@ static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff * | |||
| 106 | hci_dev_unlock(hdev); | 108 | hci_dev_unlock(hdev); |
| 107 | break; | 109 | break; |
| 108 | 110 | ||
| 111 | case OCF_WRITE_LINK_POLICY: | ||
| 112 | sent = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY); | ||
| 113 | if (!sent) | ||
| 114 | break; | ||
| 115 | |||
| 116 | lp = (struct hci_rp_write_link_policy *) skb->data; | ||
| 117 | |||
| 118 | if (lp->status) | ||
| 119 | break; | ||
| 120 | |||
| 121 | hci_dev_lock(hdev); | ||
| 122 | |||
| 123 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(lp->handle)); | ||
| 124 | if (conn) { | ||
| 125 | __le16 policy = get_unaligned((__le16 *) (sent + 2)); | ||
| 126 | conn->link_policy = __le16_to_cpu(policy); | ||
| 127 | } | ||
| 128 | |||
| 129 | hci_dev_unlock(hdev); | ||
| 130 | break; | ||
| 131 | |||
| 109 | default: | 132 | default: |
| 110 | BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x", | 133 | BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x", |
| 111 | hdev->name, ocf); | 134 | hdev->name, ocf); |
| @@ -274,7 +297,7 @@ static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb | |||
| 274 | /* Command Complete OGF INFO_PARAM */ | 297 | /* Command Complete OGF INFO_PARAM */ |
| 275 | static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) | 298 | static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) |
| 276 | { | 299 | { |
| 277 | struct hci_rp_read_loc_features *lf; | 300 | struct hci_rp_read_local_features *lf; |
| 278 | struct hci_rp_read_buffer_size *bs; | 301 | struct hci_rp_read_buffer_size *bs; |
| 279 | struct hci_rp_read_bd_addr *ba; | 302 | struct hci_rp_read_bd_addr *ba; |
| 280 | 303 | ||
| @@ -282,7 +305,7 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s | |||
| 282 | 305 | ||
| 283 | switch (ocf) { | 306 | switch (ocf) { |
| 284 | case OCF_READ_LOCAL_FEATURES: | 307 | case OCF_READ_LOCAL_FEATURES: |
| 285 | lf = (struct hci_rp_read_loc_features *) skb->data; | 308 | lf = (struct hci_rp_read_local_features *) skb->data; |
| 286 | 309 | ||
| 287 | if (lf->status) { | 310 | if (lf->status) { |
| 288 | BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status); | 311 | BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status); |
| @@ -319,9 +342,17 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s | |||
| 319 | } | 342 | } |
| 320 | 343 | ||
| 321 | hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu); | 344 | hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu); |
| 322 | hdev->sco_mtu = bs->sco_mtu ? bs->sco_mtu : 64; | 345 | hdev->sco_mtu = bs->sco_mtu; |
| 323 | hdev->acl_pkts = hdev->acl_cnt = __le16_to_cpu(bs->acl_max_pkt); | 346 | hdev->acl_pkts = __le16_to_cpu(bs->acl_max_pkt); |
| 324 | hdev->sco_pkts = hdev->sco_cnt = __le16_to_cpu(bs->sco_max_pkt); | 347 | hdev->sco_pkts = __le16_to_cpu(bs->sco_max_pkt); |
| 348 | |||
| 349 | if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { | ||
| 350 | hdev->sco_mtu = 64; | ||
| 351 | hdev->sco_pkts = 8; | ||
| 352 | } | ||
| 353 | |||
| 354 | hdev->acl_cnt = hdev->acl_pkts; | ||
| 355 | hdev->sco_cnt = hdev->sco_pkts; | ||
| 325 | 356 | ||
| 326 | BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name, | 357 | BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name, |
| 327 | hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts); | 358 | hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts); |
| @@ -439,8 +470,46 @@ static void hci_cs_link_policy(struct hci_dev *hdev, __u16 ocf, __u8 status) | |||
| 439 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | 470 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); |
| 440 | 471 | ||
| 441 | switch (ocf) { | 472 | switch (ocf) { |
| 473 | case OCF_SNIFF_MODE: | ||
| 474 | if (status) { | ||
| 475 | struct hci_conn *conn; | ||
| 476 | struct hci_cp_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_SNIFF_MODE); | ||
| 477 | |||
| 478 | if (!cp) | ||
| 479 | break; | ||
| 480 | |||
| 481 | hci_dev_lock(hdev); | ||
| 482 | |||
| 483 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); | ||
| 484 | if (conn) { | ||
| 485 | clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); | ||
| 486 | } | ||
| 487 | |||
| 488 | hci_dev_unlock(hdev); | ||
| 489 | } | ||
| 490 | break; | ||
| 491 | |||
| 492 | case OCF_EXIT_SNIFF_MODE: | ||
| 493 | if (status) { | ||
| 494 | struct hci_conn *conn; | ||
| 495 | struct hci_cp_exit_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_EXIT_SNIFF_MODE); | ||
| 496 | |||
| 497 | if (!cp) | ||
| 498 | break; | ||
| 499 | |||
| 500 | hci_dev_lock(hdev); | ||
| 501 | |||
| 502 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); | ||
| 503 | if (conn) { | ||
| 504 | clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); | ||
| 505 | } | ||
| 506 | |||
| 507 | hci_dev_unlock(hdev); | ||
| 508 | } | ||
| 509 | break; | ||
| 510 | |||
| 442 | default: | 511 | default: |
| 443 | BT_DBG("%s Command status: ogf HOST_POLICY ocf %x", hdev->name, ocf); | 512 | BT_DBG("%s Command status: ogf LINK_POLICY ocf %x", hdev->name, ocf); |
| 444 | break; | 513 | break; |
| 445 | } | 514 | } |
| 446 | } | 515 | } |
| @@ -622,14 +691,16 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
| 622 | else | 691 | else |
| 623 | cp.role = 0x01; /* Remain slave */ | 692 | cp.role = 0x01; /* Remain slave */ |
| 624 | 693 | ||
| 625 | hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ACCEPT_CONN_REQ, sizeof(cp), &cp); | 694 | hci_send_cmd(hdev, OGF_LINK_CTL, |
| 695 | OCF_ACCEPT_CONN_REQ, sizeof(cp), &cp); | ||
| 626 | } else { | 696 | } else { |
| 627 | /* Connection rejected */ | 697 | /* Connection rejected */ |
| 628 | struct hci_cp_reject_conn_req cp; | 698 | struct hci_cp_reject_conn_req cp; |
| 629 | 699 | ||
| 630 | bacpy(&cp.bdaddr, &ev->bdaddr); | 700 | bacpy(&cp.bdaddr, &ev->bdaddr); |
| 631 | cp.reason = 0x0f; | 701 | cp.reason = 0x0f; |
| 632 | hci_send_cmd(hdev, OGF_LINK_CTL, OCF_REJECT_CONN_REQ, sizeof(cp), &cp); | 702 | hci_send_cmd(hdev, OGF_LINK_CTL, |
| 703 | OCF_REJECT_CONN_REQ, sizeof(cp), &cp); | ||
| 633 | } | 704 | } |
| 634 | } | 705 | } |
| 635 | 706 | ||
| @@ -637,7 +708,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
| 637 | static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 708 | static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 638 | { | 709 | { |
| 639 | struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data; | 710 | struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data; |
| 640 | struct hci_conn *conn = NULL; | 711 | struct hci_conn *conn; |
| 641 | 712 | ||
| 642 | BT_DBG("%s", hdev->name); | 713 | BT_DBG("%s", hdev->name); |
| 643 | 714 | ||
| @@ -659,12 +730,21 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 659 | if (test_bit(HCI_ENCRYPT, &hdev->flags)) | 730 | if (test_bit(HCI_ENCRYPT, &hdev->flags)) |
| 660 | conn->link_mode |= HCI_LM_ENCRYPT; | 731 | conn->link_mode |= HCI_LM_ENCRYPT; |
| 661 | 732 | ||
| 733 | /* Get remote features */ | ||
| 734 | if (conn->type == ACL_LINK) { | ||
| 735 | struct hci_cp_read_remote_features cp; | ||
| 736 | cp.handle = ev->handle; | ||
| 737 | hci_send_cmd(hdev, OGF_LINK_CTL, | ||
| 738 | OCF_READ_REMOTE_FEATURES, sizeof(cp), &cp); | ||
| 739 | } | ||
| 740 | |||
| 662 | /* Set link policy */ | 741 | /* Set link policy */ |
| 663 | if (conn->type == ACL_LINK && hdev->link_policy) { | 742 | if (conn->type == ACL_LINK && hdev->link_policy) { |
| 664 | struct hci_cp_write_link_policy cp; | 743 | struct hci_cp_write_link_policy cp; |
| 665 | cp.handle = ev->handle; | 744 | cp.handle = ev->handle; |
| 666 | cp.policy = __cpu_to_le16(hdev->link_policy); | 745 | cp.policy = __cpu_to_le16(hdev->link_policy); |
| 667 | hci_send_cmd(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY, sizeof(cp), &cp); | 746 | hci_send_cmd(hdev, OGF_LINK_POLICY, |
| 747 | OCF_WRITE_LINK_POLICY, sizeof(cp), &cp); | ||
| 668 | } | 748 | } |
| 669 | 749 | ||
| 670 | /* Set packet type for incoming connection */ | 750 | /* Set packet type for incoming connection */ |
| @@ -675,7 +755,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 675 | __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): | 755 | __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): |
| 676 | __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); | 756 | __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); |
| 677 | 757 | ||
| 678 | hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp); | 758 | hci_send_cmd(hdev, OGF_LINK_CTL, |
| 759 | OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp); | ||
| 679 | } | 760 | } |
| 680 | } else | 761 | } else |
| 681 | conn->state = BT_CLOSED; | 762 | conn->state = BT_CLOSED; |
| @@ -703,8 +784,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 703 | static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 784 | static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 704 | { | 785 | { |
| 705 | struct hci_ev_disconn_complete *ev = (struct hci_ev_disconn_complete *) skb->data; | 786 | struct hci_ev_disconn_complete *ev = (struct hci_ev_disconn_complete *) skb->data; |
| 706 | struct hci_conn *conn = NULL; | 787 | struct hci_conn *conn; |
| 707 | __u16 handle = __le16_to_cpu(ev->handle); | ||
| 708 | 788 | ||
| 709 | BT_DBG("%s status %d", hdev->name, ev->status); | 789 | BT_DBG("%s status %d", hdev->name, ev->status); |
| 710 | 790 | ||
| @@ -713,7 +793,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff | |||
| 713 | 793 | ||
| 714 | hci_dev_lock(hdev); | 794 | hci_dev_lock(hdev); |
| 715 | 795 | ||
| 716 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 796 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
| 717 | if (conn) { | 797 | if (conn) { |
| 718 | conn->state = BT_CLOSED; | 798 | conn->state = BT_CLOSED; |
| 719 | hci_proto_disconn_ind(conn, ev->reason); | 799 | hci_proto_disconn_ind(conn, ev->reason); |
| @@ -770,7 +850,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 770 | static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | 850 | static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 771 | { | 851 | { |
| 772 | struct hci_ev_role_change *ev = (struct hci_ev_role_change *) skb->data; | 852 | struct hci_ev_role_change *ev = (struct hci_ev_role_change *) skb->data; |
| 773 | struct hci_conn *conn = NULL; | 853 | struct hci_conn *conn; |
| 774 | 854 | ||
| 775 | BT_DBG("%s status %d", hdev->name, ev->status); | 855 | BT_DBG("%s status %d", hdev->name, ev->status); |
| 776 | 856 | ||
| @@ -793,18 +873,43 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb | |||
| 793 | hci_dev_unlock(hdev); | 873 | hci_dev_unlock(hdev); |
| 794 | } | 874 | } |
| 795 | 875 | ||
| 876 | /* Mode Change */ | ||
| 877 | static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 878 | { | ||
| 879 | struct hci_ev_mode_change *ev = (struct hci_ev_mode_change *) skb->data; | ||
| 880 | struct hci_conn *conn; | ||
| 881 | |||
| 882 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
| 883 | |||
| 884 | hci_dev_lock(hdev); | ||
| 885 | |||
| 886 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
| 887 | if (conn) { | ||
| 888 | conn->mode = ev->mode; | ||
| 889 | conn->interval = __le16_to_cpu(ev->interval); | ||
| 890 | |||
| 891 | if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | ||
| 892 | if (conn->mode == HCI_CM_ACTIVE) | ||
| 893 | conn->power_save = 1; | ||
| 894 | else | ||
| 895 | conn->power_save = 0; | ||
| 896 | } | ||
| 897 | } | ||
| 898 | |||
| 899 | hci_dev_unlock(hdev); | ||
| 900 | } | ||
| 901 | |||
| 796 | /* Authentication Complete */ | 902 | /* Authentication Complete */ |
| 797 | static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 903 | static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 798 | { | 904 | { |
| 799 | struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data; | 905 | struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data; |
| 800 | struct hci_conn *conn = NULL; | 906 | struct hci_conn *conn; |
| 801 | __u16 handle = __le16_to_cpu(ev->handle); | ||
| 802 | 907 | ||
| 803 | BT_DBG("%s status %d", hdev->name, ev->status); | 908 | BT_DBG("%s status %d", hdev->name, ev->status); |
| 804 | 909 | ||
| 805 | hci_dev_lock(hdev); | 910 | hci_dev_lock(hdev); |
| 806 | 911 | ||
| 807 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 912 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
| 808 | if (conn) { | 913 | if (conn) { |
| 809 | if (!ev->status) | 914 | if (!ev->status) |
| 810 | conn->link_mode |= HCI_LM_AUTH; | 915 | conn->link_mode |= HCI_LM_AUTH; |
| @@ -819,8 +924,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 819 | cp.handle = __cpu_to_le16(conn->handle); | 924 | cp.handle = __cpu_to_le16(conn->handle); |
| 820 | cp.encrypt = 1; | 925 | cp.encrypt = 1; |
| 821 | hci_send_cmd(conn->hdev, OGF_LINK_CTL, | 926 | hci_send_cmd(conn->hdev, OGF_LINK_CTL, |
| 822 | OCF_SET_CONN_ENCRYPT, | 927 | OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); |
| 823 | sizeof(cp), &cp); | ||
| 824 | } else { | 928 | } else { |
| 825 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); | 929 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); |
| 826 | hci_encrypt_cfm(conn, ev->status, 0x00); | 930 | hci_encrypt_cfm(conn, ev->status, 0x00); |
| @@ -835,14 +939,13 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
| 835 | static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | 939 | static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 836 | { | 940 | { |
| 837 | struct hci_ev_encrypt_change *ev = (struct hci_ev_encrypt_change *) skb->data; | 941 | struct hci_ev_encrypt_change *ev = (struct hci_ev_encrypt_change *) skb->data; |
| 838 | struct hci_conn *conn = NULL; | 942 | struct hci_conn *conn; |
| 839 | __u16 handle = __le16_to_cpu(ev->handle); | ||
| 840 | 943 | ||
| 841 | BT_DBG("%s status %d", hdev->name, ev->status); | 944 | BT_DBG("%s status %d", hdev->name, ev->status); |
| 842 | 945 | ||
| 843 | hci_dev_lock(hdev); | 946 | hci_dev_lock(hdev); |
| 844 | 947 | ||
| 845 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 948 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
| 846 | if (conn) { | 949 | if (conn) { |
| 847 | if (!ev->status) { | 950 | if (!ev->status) { |
| 848 | if (ev->encrypt) | 951 | if (ev->encrypt) |
| @@ -863,14 +966,13 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff * | |||
| 863 | static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 966 | static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 864 | { | 967 | { |
| 865 | struct hci_ev_change_conn_link_key_complete *ev = (struct hci_ev_change_conn_link_key_complete *) skb->data; | 968 | struct hci_ev_change_conn_link_key_complete *ev = (struct hci_ev_change_conn_link_key_complete *) skb->data; |
| 866 | struct hci_conn *conn = NULL; | 969 | struct hci_conn *conn; |
| 867 | __u16 handle = __le16_to_cpu(ev->handle); | ||
| 868 | 970 | ||
| 869 | BT_DBG("%s status %d", hdev->name, ev->status); | 971 | BT_DBG("%s status %d", hdev->name, ev->status); |
| 870 | 972 | ||
| 871 | hci_dev_lock(hdev); | 973 | hci_dev_lock(hdev); |
| 872 | 974 | ||
| 873 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 975 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
| 874 | if (conn) { | 976 | if (conn) { |
| 875 | if (!ev->status) | 977 | if (!ev->status) |
| 876 | conn->link_mode |= HCI_LM_SECURE; | 978 | conn->link_mode |= HCI_LM_SECURE; |
| @@ -898,18 +1000,35 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff | |||
| 898 | { | 1000 | { |
| 899 | } | 1001 | } |
| 900 | 1002 | ||
| 1003 | /* Remote Features */ | ||
| 1004 | static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1005 | { | ||
| 1006 | struct hci_ev_remote_features *ev = (struct hci_ev_remote_features *) skb->data; | ||
| 1007 | struct hci_conn *conn; | ||
| 1008 | |||
| 1009 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
| 1010 | |||
| 1011 | hci_dev_lock(hdev); | ||
| 1012 | |||
| 1013 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
| 1014 | if (conn && !ev->status) { | ||
| 1015 | memcpy(conn->features, ev->features, sizeof(conn->features)); | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | hci_dev_unlock(hdev); | ||
| 1019 | } | ||
| 1020 | |||
| 901 | /* Clock Offset */ | 1021 | /* Clock Offset */ |
| 902 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1022 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) |
| 903 | { | 1023 | { |
| 904 | struct hci_ev_clock_offset *ev = (struct hci_ev_clock_offset *) skb->data; | 1024 | struct hci_ev_clock_offset *ev = (struct hci_ev_clock_offset *) skb->data; |
| 905 | struct hci_conn *conn = NULL; | 1025 | struct hci_conn *conn; |
| 906 | __u16 handle = __le16_to_cpu(ev->handle); | ||
| 907 | 1026 | ||
| 908 | BT_DBG("%s status %d", hdev->name, ev->status); | 1027 | BT_DBG("%s status %d", hdev->name, ev->status); |
| 909 | 1028 | ||
| 910 | hci_dev_lock(hdev); | 1029 | hci_dev_lock(hdev); |
| 911 | 1030 | ||
| 912 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 1031 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
| 913 | if (conn && !ev->status) { | 1032 | if (conn && !ev->status) { |
| 914 | struct inquiry_entry *ie; | 1033 | struct inquiry_entry *ie; |
| 915 | 1034 | ||
| @@ -940,6 +1059,23 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff * | |||
| 940 | hci_dev_unlock(hdev); | 1059 | hci_dev_unlock(hdev); |
| 941 | } | 1060 | } |
| 942 | 1061 | ||
| 1062 | /* Sniff Subrate */ | ||
| 1063 | static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
| 1064 | { | ||
| 1065 | struct hci_ev_sniff_subrate *ev = (struct hci_ev_sniff_subrate *) skb->data; | ||
| 1066 | struct hci_conn *conn; | ||
| 1067 | |||
| 1068 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
| 1069 | |||
| 1070 | hci_dev_lock(hdev); | ||
| 1071 | |||
| 1072 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
| 1073 | if (conn) { | ||
| 1074 | } | ||
| 1075 | |||
| 1076 | hci_dev_unlock(hdev); | ||
| 1077 | } | ||
| 1078 | |||
| 943 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | 1079 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) |
| 944 | { | 1080 | { |
| 945 | struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data; | 1081 | struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data; |
| @@ -988,6 +1124,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 988 | hci_role_change_evt(hdev, skb); | 1124 | hci_role_change_evt(hdev, skb); |
| 989 | break; | 1125 | break; |
| 990 | 1126 | ||
| 1127 | case HCI_EV_MODE_CHANGE: | ||
| 1128 | hci_mode_change_evt(hdev, skb); | ||
| 1129 | break; | ||
| 1130 | |||
| 991 | case HCI_EV_AUTH_COMPLETE: | 1131 | case HCI_EV_AUTH_COMPLETE: |
| 992 | hci_auth_complete_evt(hdev, skb); | 1132 | hci_auth_complete_evt(hdev, skb); |
| 993 | break; | 1133 | break; |
| @@ -1012,6 +1152,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 1012 | hci_link_key_notify_evt(hdev, skb); | 1152 | hci_link_key_notify_evt(hdev, skb); |
| 1013 | break; | 1153 | break; |
| 1014 | 1154 | ||
| 1155 | case HCI_EV_REMOTE_FEATURES: | ||
| 1156 | hci_remote_features_evt(hdev, skb); | ||
| 1157 | break; | ||
| 1158 | |||
| 1015 | case HCI_EV_CLOCK_OFFSET: | 1159 | case HCI_EV_CLOCK_OFFSET: |
| 1016 | hci_clock_offset_evt(hdev, skb); | 1160 | hci_clock_offset_evt(hdev, skb); |
| 1017 | break; | 1161 | break; |
| @@ -1020,6 +1164,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 1020 | hci_pscan_rep_mode_evt(hdev, skb); | 1164 | hci_pscan_rep_mode_evt(hdev, skb); |
| 1021 | break; | 1165 | break; |
| 1022 | 1166 | ||
| 1167 | case HCI_EV_SNIFF_SUBRATE: | ||
| 1168 | hci_sniff_subrate_evt(hdev, skb); | ||
| 1169 | break; | ||
| 1170 | |||
| 1023 | case HCI_EV_CMD_STATUS: | 1171 | case HCI_EV_CMD_STATUS: |
| 1024 | cs = (struct hci_ev_cmd_status *) skb->data; | 1172 | cs = (struct hci_ev_cmd_status *) skb->data; |
| 1025 | skb_pull(skb, sizeof(cs)); | 1173 | skb_pull(skb, sizeof(cs)); |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 19b234c86f33..3987d167f04e 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
| 4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
| 5 | 5 | ||
| 6 | #include <linux/platform_device.h> | ||
| 7 | |||
| 6 | #include <net/bluetooth/bluetooth.h> | 8 | #include <net/bluetooth/bluetooth.h> |
| 7 | #include <net/bluetooth/hci_core.h> | 9 | #include <net/bluetooth/hci_core.h> |
| 8 | 10 | ||
| @@ -11,35 +13,35 @@ | |||
| 11 | #define BT_DBG(D...) | 13 | #define BT_DBG(D...) |
| 12 | #endif | 14 | #endif |
| 13 | 15 | ||
| 14 | static ssize_t show_name(struct class_device *cdev, char *buf) | 16 | static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) |
| 15 | { | 17 | { |
| 16 | struct hci_dev *hdev = class_get_devdata(cdev); | 18 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| 17 | return sprintf(buf, "%s\n", hdev->name); | 19 | return sprintf(buf, "%s\n", hdev->name); |
| 18 | } | 20 | } |
| 19 | 21 | ||
| 20 | static ssize_t show_type(struct class_device *cdev, char *buf) | 22 | static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) |
| 21 | { | 23 | { |
| 22 | struct hci_dev *hdev = class_get_devdata(cdev); | 24 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| 23 | return sprintf(buf, "%d\n", hdev->type); | 25 | return sprintf(buf, "%d\n", hdev->type); |
| 24 | } | 26 | } |
| 25 | 27 | ||
| 26 | static ssize_t show_address(struct class_device *cdev, char *buf) | 28 | static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) |
| 27 | { | 29 | { |
| 28 | struct hci_dev *hdev = class_get_devdata(cdev); | 30 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| 29 | bdaddr_t bdaddr; | 31 | bdaddr_t bdaddr; |
| 30 | baswap(&bdaddr, &hdev->bdaddr); | 32 | baswap(&bdaddr, &hdev->bdaddr); |
| 31 | return sprintf(buf, "%s\n", batostr(&bdaddr)); | 33 | return sprintf(buf, "%s\n", batostr(&bdaddr)); |
| 32 | } | 34 | } |
| 33 | 35 | ||
| 34 | static ssize_t show_flags(struct class_device *cdev, char *buf) | 36 | static ssize_t show_flags(struct device *dev, struct device_attribute *attr, char *buf) |
| 35 | { | 37 | { |
| 36 | struct hci_dev *hdev = class_get_devdata(cdev); | 38 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| 37 | return sprintf(buf, "0x%lx\n", hdev->flags); | 39 | return sprintf(buf, "0x%lx\n", hdev->flags); |
| 38 | } | 40 | } |
| 39 | 41 | ||
| 40 | static ssize_t show_inquiry_cache(struct class_device *cdev, char *buf) | 42 | static ssize_t show_inquiry_cache(struct device *dev, struct device_attribute *attr, char *buf) |
| 41 | { | 43 | { |
| 42 | struct hci_dev *hdev = class_get_devdata(cdev); | 44 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| 43 | struct inquiry_cache *cache = &hdev->inq_cache; | 45 | struct inquiry_cache *cache = &hdev->inq_cache; |
| 44 | struct inquiry_entry *e; | 46 | struct inquiry_entry *e; |
| 45 | int n = 0; | 47 | int n = 0; |
| @@ -61,94 +63,193 @@ static ssize_t show_inquiry_cache(struct class_device *cdev, char *buf) | |||
| 61 | return n; | 63 | return n; |
| 62 | } | 64 | } |
| 63 | 65 | ||
| 64 | static CLASS_DEVICE_ATTR(name, S_IRUGO, show_name, NULL); | 66 | static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) |
| 65 | static CLASS_DEVICE_ATTR(type, S_IRUGO, show_type, NULL); | 67 | { |
| 66 | static CLASS_DEVICE_ATTR(address, S_IRUGO, show_address, NULL); | 68 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| 67 | static CLASS_DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL); | 69 | return sprintf(buf, "%d\n", hdev->idle_timeout); |
| 68 | static CLASS_DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL); | 70 | } |
| 69 | 71 | ||
| 70 | static struct class_device_attribute *bt_attrs[] = { | 72 | static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
| 71 | &class_device_attr_name, | 73 | { |
| 72 | &class_device_attr_type, | 74 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| 73 | &class_device_attr_address, | 75 | char *ptr; |
| 74 | &class_device_attr_flags, | 76 | __u32 val; |
| 75 | &class_device_attr_inquiry_cache, | 77 | |
| 76 | NULL | 78 | val = simple_strtoul(buf, &ptr, 10); |
| 77 | }; | 79 | if (ptr == buf) |
| 80 | return -EINVAL; | ||
| 78 | 81 | ||
| 79 | #ifdef CONFIG_HOTPLUG | 82 | if (val != 0 && (val < 500 || val > 3600000)) |
| 80 | static int bt_uevent(struct class_device *cdev, char **envp, int num_envp, char *buf, int size) | 83 | return -EINVAL; |
| 84 | |||
| 85 | hdev->idle_timeout = val; | ||
| 86 | |||
| 87 | return count; | ||
| 88 | } | ||
| 89 | |||
| 90 | static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 81 | { | 91 | { |
| 82 | struct hci_dev *hdev = class_get_devdata(cdev); | 92 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| 83 | int n, i = 0; | 93 | return sprintf(buf, "%d\n", hdev->sniff_max_interval); |
| 94 | } | ||
| 84 | 95 | ||
| 85 | envp[i++] = buf; | 96 | static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
| 86 | n = snprintf(buf, size, "INTERFACE=%s", hdev->name) + 1; | 97 | { |
| 87 | buf += n; | 98 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| 88 | size -= n; | 99 | char *ptr; |
| 100 | __u16 val; | ||
| 89 | 101 | ||
| 90 | if ((size <= 0) || (i >= num_envp)) | 102 | val = simple_strtoul(buf, &ptr, 10); |
| 91 | return -ENOMEM; | 103 | if (ptr == buf) |
| 104 | return -EINVAL; | ||
| 92 | 105 | ||
| 93 | envp[i] = NULL; | 106 | if (val < 0x0002 || val > 0xFFFE || val % 2) |
| 94 | return 0; | 107 | return -EINVAL; |
| 108 | |||
| 109 | if (val < hdev->sniff_min_interval) | ||
| 110 | return -EINVAL; | ||
| 111 | |||
| 112 | hdev->sniff_max_interval = val; | ||
| 113 | |||
| 114 | return count; | ||
| 115 | } | ||
| 116 | |||
| 117 | static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 118 | { | ||
| 119 | struct hci_dev *hdev = dev_get_drvdata(dev); | ||
| 120 | return sprintf(buf, "%d\n", hdev->sniff_min_interval); | ||
| 95 | } | 121 | } |
| 96 | #endif | ||
| 97 | 122 | ||
| 98 | static void bt_release(struct class_device *cdev) | 123 | static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
| 99 | { | 124 | { |
| 100 | struct hci_dev *hdev = class_get_devdata(cdev); | 125 | struct hci_dev *hdev = dev_get_drvdata(dev); |
| 126 | char *ptr; | ||
| 127 | __u16 val; | ||
| 101 | 128 | ||
| 102 | kfree(hdev); | 129 | val = simple_strtoul(buf, &ptr, 10); |
| 130 | if (ptr == buf) | ||
| 131 | return -EINVAL; | ||
| 132 | |||
| 133 | if (val < 0x0002 || val > 0xFFFE || val % 2) | ||
| 134 | return -EINVAL; | ||
| 135 | |||
| 136 | if (val > hdev->sniff_max_interval) | ||
| 137 | return -EINVAL; | ||
| 138 | |||
| 139 | hdev->sniff_min_interval = val; | ||
| 140 | |||
| 141 | return count; | ||
| 103 | } | 142 | } |
| 104 | 143 | ||
| 105 | struct class bt_class = { | 144 | static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); |
| 106 | .name = "bluetooth", | 145 | static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); |
| 107 | .release = bt_release, | 146 | static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); |
| 108 | #ifdef CONFIG_HOTPLUG | 147 | static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL); |
| 109 | .uevent = bt_uevent, | 148 | static DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL); |
| 110 | #endif | 149 | |
| 150 | static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, | ||
| 151 | show_idle_timeout, store_idle_timeout); | ||
| 152 | static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, | ||
| 153 | show_sniff_max_interval, store_sniff_max_interval); | ||
| 154 | static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, | ||
| 155 | show_sniff_min_interval, store_sniff_min_interval); | ||
| 156 | |||
| 157 | static struct device_attribute *bt_attrs[] = { | ||
| 158 | &dev_attr_name, | ||
| 159 | &dev_attr_type, | ||
| 160 | &dev_attr_address, | ||
| 161 | &dev_attr_flags, | ||
| 162 | &dev_attr_inquiry_cache, | ||
| 163 | &dev_attr_idle_timeout, | ||
| 164 | &dev_attr_sniff_max_interval, | ||
| 165 | &dev_attr_sniff_min_interval, | ||
| 166 | NULL | ||
| 111 | }; | 167 | }; |
| 112 | 168 | ||
| 169 | struct class *bt_class = NULL; | ||
| 113 | EXPORT_SYMBOL_GPL(bt_class); | 170 | EXPORT_SYMBOL_GPL(bt_class); |
| 114 | 171 | ||
| 172 | static struct bus_type bt_bus = { | ||
| 173 | .name = "bluetooth", | ||
| 174 | }; | ||
| 175 | |||
| 176 | static struct platform_device *bt_platform; | ||
| 177 | |||
| 178 | static void bt_release(struct device *dev) | ||
| 179 | { | ||
| 180 | struct hci_dev *hdev = dev_get_drvdata(dev); | ||
| 181 | kfree(hdev); | ||
| 182 | } | ||
| 183 | |||
| 115 | int hci_register_sysfs(struct hci_dev *hdev) | 184 | int hci_register_sysfs(struct hci_dev *hdev) |
| 116 | { | 185 | { |
| 117 | struct class_device *cdev = &hdev->class_dev; | 186 | struct device *dev = &hdev->dev; |
| 118 | unsigned int i; | 187 | unsigned int i; |
| 119 | int err; | 188 | int err; |
| 120 | 189 | ||
| 121 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); | 190 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); |
| 122 | 191 | ||
| 123 | cdev->class = &bt_class; | 192 | dev->class = bt_class; |
| 124 | class_set_devdata(cdev, hdev); | 193 | |
| 194 | if (hdev->parent) | ||
| 195 | dev->parent = hdev->parent; | ||
| 196 | else | ||
| 197 | dev->parent = &bt_platform->dev; | ||
| 198 | |||
| 199 | strlcpy(dev->bus_id, hdev->name, BUS_ID_SIZE); | ||
| 200 | |||
| 201 | dev->release = bt_release; | ||
| 125 | 202 | ||
| 126 | strlcpy(cdev->class_id, hdev->name, BUS_ID_SIZE); | 203 | dev_set_drvdata(dev, hdev); |
| 127 | err = class_device_register(cdev); | 204 | |
| 205 | err = device_register(dev); | ||
| 128 | if (err < 0) | 206 | if (err < 0) |
| 129 | return err; | 207 | return err; |
| 130 | 208 | ||
| 131 | for (i = 0; bt_attrs[i]; i++) | 209 | for (i = 0; bt_attrs[i]; i++) |
| 132 | class_device_create_file(cdev, bt_attrs[i]); | 210 | device_create_file(dev, bt_attrs[i]); |
| 133 | 211 | ||
| 134 | return 0; | 212 | return 0; |
| 135 | } | 213 | } |
| 136 | 214 | ||
| 137 | void hci_unregister_sysfs(struct hci_dev *hdev) | 215 | void hci_unregister_sysfs(struct hci_dev *hdev) |
| 138 | { | 216 | { |
| 139 | struct class_device * cdev = &hdev->class_dev; | 217 | struct device *dev = &hdev->dev; |
| 140 | 218 | ||
| 141 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); | 219 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); |
| 142 | 220 | ||
| 143 | class_device_del(cdev); | 221 | device_del(dev); |
| 144 | } | 222 | } |
| 145 | 223 | ||
| 146 | int __init bt_sysfs_init(void) | 224 | int __init bt_sysfs_init(void) |
| 147 | { | 225 | { |
| 148 | return class_register(&bt_class); | 226 | int err; |
| 227 | |||
| 228 | bt_platform = platform_device_register_simple("bluetooth", -1, NULL, 0); | ||
| 229 | if (IS_ERR(bt_platform)) | ||
| 230 | return PTR_ERR(bt_platform); | ||
| 231 | |||
| 232 | err = bus_register(&bt_bus); | ||
| 233 | if (err < 0) { | ||
| 234 | platform_device_unregister(bt_platform); | ||
| 235 | return err; | ||
| 236 | } | ||
| 237 | |||
| 238 | bt_class = class_create(THIS_MODULE, "bluetooth"); | ||
| 239 | if (IS_ERR(bt_class)) { | ||
| 240 | bus_unregister(&bt_bus); | ||
| 241 | platform_device_unregister(bt_platform); | ||
| 242 | return PTR_ERR(bt_class); | ||
| 243 | } | ||
| 244 | |||
| 245 | return 0; | ||
| 149 | } | 246 | } |
| 150 | 247 | ||
| 151 | void __exit bt_sysfs_cleanup(void) | 248 | void __exit bt_sysfs_cleanup(void) |
| 152 | { | 249 | { |
| 153 | class_unregister(&bt_class); | 250 | class_destroy(bt_class); |
| 251 | |||
| 252 | bus_unregister(&bt_bus); | ||
| 253 | |||
| 254 | platform_device_unregister(bt_platform); | ||
| 154 | } | 255 | } |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 770101177da1..eaaad658d11d 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
| @@ -63,11 +63,6 @@ static struct bt_sock_list l2cap_sk_list = { | |||
| 63 | .lock = RW_LOCK_UNLOCKED | 63 | .lock = RW_LOCK_UNLOCKED |
| 64 | }; | 64 | }; |
| 65 | 65 | ||
| 66 | static int l2cap_conn_del(struct hci_conn *conn, int err); | ||
| 67 | |||
| 68 | static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent); | ||
| 69 | static void l2cap_chan_del(struct sock *sk, int err); | ||
| 70 | |||
| 71 | static void __l2cap_sock_close(struct sock *sk, int reason); | 66 | static void __l2cap_sock_close(struct sock *sk, int reason); |
| 72 | static void l2cap_sock_close(struct sock *sk); | 67 | static void l2cap_sock_close(struct sock *sk); |
| 73 | static void l2cap_sock_kill(struct sock *sk); | 68 | static void l2cap_sock_kill(struct sock *sk); |
| @@ -109,24 +104,177 @@ static void l2cap_sock_init_timer(struct sock *sk) | |||
| 109 | sk->sk_timer.data = (unsigned long)sk; | 104 | sk->sk_timer.data = (unsigned long)sk; |
| 110 | } | 105 | } |
| 111 | 106 | ||
| 107 | /* ---- L2CAP channels ---- */ | ||
| 108 | static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) | ||
| 109 | { | ||
| 110 | struct sock *s; | ||
| 111 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
| 112 | if (l2cap_pi(s)->dcid == cid) | ||
| 113 | break; | ||
| 114 | } | ||
| 115 | return s; | ||
| 116 | } | ||
| 117 | |||
| 118 | static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) | ||
| 119 | { | ||
| 120 | struct sock *s; | ||
| 121 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
| 122 | if (l2cap_pi(s)->scid == cid) | ||
| 123 | break; | ||
| 124 | } | ||
| 125 | return s; | ||
| 126 | } | ||
| 127 | |||
| 128 | /* Find channel with given SCID. | ||
| 129 | * Returns locked socket */ | ||
| 130 | static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) | ||
| 131 | { | ||
| 132 | struct sock *s; | ||
| 133 | read_lock(&l->lock); | ||
| 134 | s = __l2cap_get_chan_by_scid(l, cid); | ||
| 135 | if (s) bh_lock_sock(s); | ||
| 136 | read_unlock(&l->lock); | ||
| 137 | return s; | ||
| 138 | } | ||
| 139 | |||
| 140 | static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) | ||
| 141 | { | ||
| 142 | struct sock *s; | ||
| 143 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
| 144 | if (l2cap_pi(s)->ident == ident) | ||
| 145 | break; | ||
| 146 | } | ||
| 147 | return s; | ||
| 148 | } | ||
| 149 | |||
| 150 | static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) | ||
| 151 | { | ||
| 152 | struct sock *s; | ||
| 153 | read_lock(&l->lock); | ||
| 154 | s = __l2cap_get_chan_by_ident(l, ident); | ||
| 155 | if (s) bh_lock_sock(s); | ||
| 156 | read_unlock(&l->lock); | ||
| 157 | return s; | ||
| 158 | } | ||
| 159 | |||
| 160 | static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) | ||
| 161 | { | ||
| 162 | u16 cid = 0x0040; | ||
| 163 | |||
| 164 | for (; cid < 0xffff; cid++) { | ||
| 165 | if(!__l2cap_get_chan_by_scid(l, cid)) | ||
| 166 | return cid; | ||
| 167 | } | ||
| 168 | |||
| 169 | return 0; | ||
| 170 | } | ||
| 171 | |||
| 172 | static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk) | ||
| 173 | { | ||
| 174 | sock_hold(sk); | ||
| 175 | |||
| 176 | if (l->head) | ||
| 177 | l2cap_pi(l->head)->prev_c = sk; | ||
| 178 | |||
| 179 | l2cap_pi(sk)->next_c = l->head; | ||
| 180 | l2cap_pi(sk)->prev_c = NULL; | ||
| 181 | l->head = sk; | ||
| 182 | } | ||
| 183 | |||
| 184 | static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) | ||
| 185 | { | ||
| 186 | struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; | ||
| 187 | |||
| 188 | write_lock(&l->lock); | ||
| 189 | if (sk == l->head) | ||
| 190 | l->head = next; | ||
| 191 | |||
| 192 | if (next) | ||
| 193 | l2cap_pi(next)->prev_c = prev; | ||
| 194 | if (prev) | ||
| 195 | l2cap_pi(prev)->next_c = next; | ||
| 196 | write_unlock(&l->lock); | ||
| 197 | |||
| 198 | __sock_put(sk); | ||
| 199 | } | ||
| 200 | |||
| 201 | static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) | ||
| 202 | { | ||
| 203 | struct l2cap_chan_list *l = &conn->chan_list; | ||
| 204 | |||
| 205 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); | ||
| 206 | |||
| 207 | l2cap_pi(sk)->conn = conn; | ||
| 208 | |||
| 209 | if (sk->sk_type == SOCK_SEQPACKET) { | ||
| 210 | /* Alloc CID for connection-oriented socket */ | ||
| 211 | l2cap_pi(sk)->scid = l2cap_alloc_cid(l); | ||
| 212 | } else if (sk->sk_type == SOCK_DGRAM) { | ||
| 213 | /* Connectionless socket */ | ||
| 214 | l2cap_pi(sk)->scid = 0x0002; | ||
| 215 | l2cap_pi(sk)->dcid = 0x0002; | ||
| 216 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; | ||
| 217 | } else { | ||
| 218 | /* Raw socket can send/recv signalling messages only */ | ||
| 219 | l2cap_pi(sk)->scid = 0x0001; | ||
| 220 | l2cap_pi(sk)->dcid = 0x0001; | ||
| 221 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; | ||
| 222 | } | ||
| 223 | |||
| 224 | __l2cap_chan_link(l, sk); | ||
| 225 | |||
| 226 | if (parent) | ||
| 227 | bt_accept_enqueue(parent, sk); | ||
| 228 | } | ||
| 229 | |||
| 230 | /* Delete channel. | ||
| 231 | * Must be called on the locked socket. */ | ||
| 232 | static void l2cap_chan_del(struct sock *sk, int err) | ||
| 233 | { | ||
| 234 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | ||
| 235 | struct sock *parent = bt_sk(sk)->parent; | ||
| 236 | |||
| 237 | l2cap_sock_clear_timer(sk); | ||
| 238 | |||
| 239 | BT_DBG("sk %p, conn %p, err %d", sk, conn, err); | ||
| 240 | |||
| 241 | if (conn) { | ||
| 242 | /* Unlink from channel list */ | ||
| 243 | l2cap_chan_unlink(&conn->chan_list, sk); | ||
| 244 | l2cap_pi(sk)->conn = NULL; | ||
| 245 | hci_conn_put(conn->hcon); | ||
| 246 | } | ||
| 247 | |||
| 248 | sk->sk_state = BT_CLOSED; | ||
| 249 | sock_set_flag(sk, SOCK_ZAPPED); | ||
| 250 | |||
| 251 | if (err) | ||
| 252 | sk->sk_err = err; | ||
| 253 | |||
| 254 | if (parent) { | ||
| 255 | bt_accept_unlink(sk); | ||
| 256 | parent->sk_data_ready(parent, 0); | ||
| 257 | } else | ||
| 258 | sk->sk_state_change(sk); | ||
| 259 | } | ||
| 260 | |||
| 112 | /* ---- L2CAP connections ---- */ | 261 | /* ---- L2CAP connections ---- */ |
| 113 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | 262 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) |
| 114 | { | 263 | { |
| 115 | struct l2cap_conn *conn; | 264 | struct l2cap_conn *conn = hcon->l2cap_data; |
| 116 | |||
| 117 | if ((conn = hcon->l2cap_data)) | ||
| 118 | return conn; | ||
| 119 | 265 | ||
| 120 | if (status) | 266 | if (conn || status) |
| 121 | return conn; | 267 | return conn; |
| 122 | 268 | ||
| 123 | if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC))) | 269 | conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); |
| 270 | if (!conn) | ||
| 124 | return NULL; | 271 | return NULL; |
| 125 | memset(conn, 0, sizeof(struct l2cap_conn)); | ||
| 126 | 272 | ||
| 127 | hcon->l2cap_data = conn; | 273 | hcon->l2cap_data = conn; |
| 128 | conn->hcon = hcon; | 274 | conn->hcon = hcon; |
| 129 | 275 | ||
| 276 | BT_DBG("hcon %p conn %p", hcon, conn); | ||
| 277 | |||
| 130 | conn->mtu = hcon->hdev->acl_mtu; | 278 | conn->mtu = hcon->hdev->acl_mtu; |
| 131 | conn->src = &hcon->hdev->bdaddr; | 279 | conn->src = &hcon->hdev->bdaddr; |
| 132 | conn->dst = &hcon->dst; | 280 | conn->dst = &hcon->dst; |
| @@ -134,17 +282,16 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | |||
| 134 | spin_lock_init(&conn->lock); | 282 | spin_lock_init(&conn->lock); |
| 135 | rwlock_init(&conn->chan_list.lock); | 283 | rwlock_init(&conn->chan_list.lock); |
| 136 | 284 | ||
| 137 | BT_DBG("hcon %p conn %p", hcon, conn); | ||
| 138 | return conn; | 285 | return conn; |
| 139 | } | 286 | } |
| 140 | 287 | ||
| 141 | static int l2cap_conn_del(struct hci_conn *hcon, int err) | 288 | static void l2cap_conn_del(struct hci_conn *hcon, int err) |
| 142 | { | 289 | { |
| 143 | struct l2cap_conn *conn; | 290 | struct l2cap_conn *conn = hcon->l2cap_data; |
| 144 | struct sock *sk; | 291 | struct sock *sk; |
| 145 | 292 | ||
| 146 | if (!(conn = hcon->l2cap_data)) | 293 | if (!conn) |
| 147 | return 0; | 294 | return; |
| 148 | 295 | ||
| 149 | BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); | 296 | BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); |
| 150 | 297 | ||
| @@ -161,7 +308,6 @@ static int l2cap_conn_del(struct hci_conn *hcon, int err) | |||
| 161 | 308 | ||
| 162 | hcon->l2cap_data = NULL; | 309 | hcon->l2cap_data = NULL; |
| 163 | kfree(conn); | 310 | kfree(conn); |
| 164 | return 0; | ||
| 165 | } | 311 | } |
| 166 | 312 | ||
| 167 | static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) | 313 | static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) |
| @@ -925,160 +1071,6 @@ static int l2cap_sock_release(struct socket *sock) | |||
| 925 | return err; | 1071 | return err; |
| 926 | } | 1072 | } |
| 927 | 1073 | ||
| 928 | /* ---- L2CAP channels ---- */ | ||
| 929 | static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) | ||
| 930 | { | ||
| 931 | struct sock *s; | ||
| 932 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
| 933 | if (l2cap_pi(s)->dcid == cid) | ||
| 934 | break; | ||
| 935 | } | ||
| 936 | return s; | ||
| 937 | } | ||
| 938 | |||
| 939 | static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) | ||
| 940 | { | ||
| 941 | struct sock *s; | ||
| 942 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
| 943 | if (l2cap_pi(s)->scid == cid) | ||
| 944 | break; | ||
| 945 | } | ||
| 946 | return s; | ||
| 947 | } | ||
| 948 | |||
| 949 | /* Find channel with given SCID. | ||
| 950 | * Returns locked socket */ | ||
| 951 | static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) | ||
| 952 | { | ||
| 953 | struct sock *s; | ||
| 954 | read_lock(&l->lock); | ||
| 955 | s = __l2cap_get_chan_by_scid(l, cid); | ||
| 956 | if (s) bh_lock_sock(s); | ||
| 957 | read_unlock(&l->lock); | ||
| 958 | return s; | ||
| 959 | } | ||
| 960 | |||
| 961 | static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) | ||
| 962 | { | ||
| 963 | struct sock *s; | ||
| 964 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
| 965 | if (l2cap_pi(s)->ident == ident) | ||
| 966 | break; | ||
| 967 | } | ||
| 968 | return s; | ||
| 969 | } | ||
| 970 | |||
| 971 | static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) | ||
| 972 | { | ||
| 973 | struct sock *s; | ||
| 974 | read_lock(&l->lock); | ||
| 975 | s = __l2cap_get_chan_by_ident(l, ident); | ||
| 976 | if (s) bh_lock_sock(s); | ||
| 977 | read_unlock(&l->lock); | ||
| 978 | return s; | ||
| 979 | } | ||
| 980 | |||
| 981 | static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) | ||
| 982 | { | ||
| 983 | u16 cid = 0x0040; | ||
| 984 | |||
| 985 | for (; cid < 0xffff; cid++) { | ||
| 986 | if(!__l2cap_get_chan_by_scid(l, cid)) | ||
| 987 | return cid; | ||
| 988 | } | ||
| 989 | |||
| 990 | return 0; | ||
| 991 | } | ||
| 992 | |||
| 993 | static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk) | ||
| 994 | { | ||
| 995 | sock_hold(sk); | ||
| 996 | |||
| 997 | if (l->head) | ||
| 998 | l2cap_pi(l->head)->prev_c = sk; | ||
| 999 | |||
| 1000 | l2cap_pi(sk)->next_c = l->head; | ||
| 1001 | l2cap_pi(sk)->prev_c = NULL; | ||
| 1002 | l->head = sk; | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) | ||
| 1006 | { | ||
| 1007 | struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; | ||
| 1008 | |||
| 1009 | write_lock(&l->lock); | ||
| 1010 | if (sk == l->head) | ||
| 1011 | l->head = next; | ||
| 1012 | |||
| 1013 | if (next) | ||
| 1014 | l2cap_pi(next)->prev_c = prev; | ||
| 1015 | if (prev) | ||
| 1016 | l2cap_pi(prev)->next_c = next; | ||
| 1017 | write_unlock(&l->lock); | ||
| 1018 | |||
| 1019 | __sock_put(sk); | ||
| 1020 | } | ||
| 1021 | |||
| 1022 | static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) | ||
| 1023 | { | ||
| 1024 | struct l2cap_chan_list *l = &conn->chan_list; | ||
| 1025 | |||
| 1026 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); | ||
| 1027 | |||
| 1028 | l2cap_pi(sk)->conn = conn; | ||
| 1029 | |||
| 1030 | if (sk->sk_type == SOCK_SEQPACKET) { | ||
| 1031 | /* Alloc CID for connection-oriented socket */ | ||
| 1032 | l2cap_pi(sk)->scid = l2cap_alloc_cid(l); | ||
| 1033 | } else if (sk->sk_type == SOCK_DGRAM) { | ||
| 1034 | /* Connectionless socket */ | ||
| 1035 | l2cap_pi(sk)->scid = 0x0002; | ||
| 1036 | l2cap_pi(sk)->dcid = 0x0002; | ||
| 1037 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; | ||
| 1038 | } else { | ||
| 1039 | /* Raw socket can send/recv signalling messages only */ | ||
| 1040 | l2cap_pi(sk)->scid = 0x0001; | ||
| 1041 | l2cap_pi(sk)->dcid = 0x0001; | ||
| 1042 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | __l2cap_chan_link(l, sk); | ||
| 1046 | |||
| 1047 | if (parent) | ||
| 1048 | bt_accept_enqueue(parent, sk); | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | /* Delete channel. | ||
| 1052 | * Must be called on the locked socket. */ | ||
| 1053 | static void l2cap_chan_del(struct sock *sk, int err) | ||
| 1054 | { | ||
| 1055 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | ||
| 1056 | struct sock *parent = bt_sk(sk)->parent; | ||
| 1057 | |||
| 1058 | l2cap_sock_clear_timer(sk); | ||
| 1059 | |||
| 1060 | BT_DBG("sk %p, conn %p, err %d", sk, conn, err); | ||
| 1061 | |||
| 1062 | if (conn) { | ||
| 1063 | /* Unlink from channel list */ | ||
| 1064 | l2cap_chan_unlink(&conn->chan_list, sk); | ||
| 1065 | l2cap_pi(sk)->conn = NULL; | ||
| 1066 | hci_conn_put(conn->hcon); | ||
| 1067 | } | ||
| 1068 | |||
| 1069 | sk->sk_state = BT_CLOSED; | ||
| 1070 | sock_set_flag(sk, SOCK_ZAPPED); | ||
| 1071 | |||
| 1072 | if (err) | ||
| 1073 | sk->sk_err = err; | ||
| 1074 | |||
| 1075 | if (parent) { | ||
| 1076 | bt_accept_unlink(sk); | ||
| 1077 | parent->sk_data_ready(parent, 0); | ||
| 1078 | } else | ||
| 1079 | sk->sk_state_change(sk); | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | static void l2cap_conn_ready(struct l2cap_conn *conn) | 1074 | static void l2cap_conn_ready(struct l2cap_conn *conn) |
| 1083 | { | 1075 | { |
| 1084 | struct l2cap_chan_list *l = &conn->chan_list; | 1076 | struct l2cap_chan_list *l = &conn->chan_list; |
| @@ -1834,7 +1826,9 @@ drop: | |||
| 1834 | kfree_skb(skb); | 1826 | kfree_skb(skb); |
| 1835 | 1827 | ||
| 1836 | done: | 1828 | done: |
| 1837 | if (sk) bh_unlock_sock(sk); | 1829 | if (sk) |
| 1830 | bh_unlock_sock(sk); | ||
| 1831 | |||
| 1838 | return 0; | 1832 | return 0; |
| 1839 | } | 1833 | } |
| 1840 | 1834 | ||
| @@ -1925,18 +1919,18 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) | |||
| 1925 | 1919 | ||
| 1926 | static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) | 1920 | static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) |
| 1927 | { | 1921 | { |
| 1922 | struct l2cap_conn *conn; | ||
| 1923 | |||
| 1928 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); | 1924 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); |
| 1929 | 1925 | ||
| 1930 | if (hcon->type != ACL_LINK) | 1926 | if (hcon->type != ACL_LINK) |
| 1931 | return 0; | 1927 | return 0; |
| 1932 | 1928 | ||
| 1933 | if (!status) { | 1929 | if (!status) { |
| 1934 | struct l2cap_conn *conn; | ||
| 1935 | |||
| 1936 | conn = l2cap_conn_add(hcon, status); | 1930 | conn = l2cap_conn_add(hcon, status); |
| 1937 | if (conn) | 1931 | if (conn) |
| 1938 | l2cap_conn_ready(conn); | 1932 | l2cap_conn_ready(conn); |
| 1939 | } else | 1933 | } else |
| 1940 | l2cap_conn_del(hcon, bt_err(status)); | 1934 | l2cap_conn_del(hcon, bt_err(status)); |
| 1941 | 1935 | ||
| 1942 | return 0; | 1936 | return 0; |
| @@ -1950,19 +1944,21 @@ static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason) | |||
| 1950 | return 0; | 1944 | return 0; |
| 1951 | 1945 | ||
| 1952 | l2cap_conn_del(hcon, bt_err(reason)); | 1946 | l2cap_conn_del(hcon, bt_err(reason)); |
| 1947 | |||
| 1953 | return 0; | 1948 | return 0; |
| 1954 | } | 1949 | } |
| 1955 | 1950 | ||
| 1956 | static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) | 1951 | static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) |
| 1957 | { | 1952 | { |
| 1958 | struct l2cap_chan_list *l; | 1953 | struct l2cap_chan_list *l; |
| 1959 | struct l2cap_conn *conn; | 1954 | struct l2cap_conn *conn = conn = hcon->l2cap_data; |
| 1960 | struct l2cap_conn_rsp rsp; | 1955 | struct l2cap_conn_rsp rsp; |
| 1961 | struct sock *sk; | 1956 | struct sock *sk; |
| 1962 | int result; | 1957 | int result; |
| 1963 | 1958 | ||
| 1964 | if (!(conn = hcon->l2cap_data)) | 1959 | if (!conn) |
| 1965 | return 0; | 1960 | return 0; |
| 1961 | |||
| 1966 | l = &conn->chan_list; | 1962 | l = &conn->chan_list; |
| 1967 | 1963 | ||
| 1968 | BT_DBG("conn %p", conn); | 1964 | BT_DBG("conn %p", conn); |
| @@ -2005,13 +2001,14 @@ static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) | |||
| 2005 | static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status) | 2001 | static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status) |
| 2006 | { | 2002 | { |
| 2007 | struct l2cap_chan_list *l; | 2003 | struct l2cap_chan_list *l; |
| 2008 | struct l2cap_conn *conn; | 2004 | struct l2cap_conn *conn = hcon->l2cap_data; |
| 2009 | struct l2cap_conn_rsp rsp; | 2005 | struct l2cap_conn_rsp rsp; |
| 2010 | struct sock *sk; | 2006 | struct sock *sk; |
| 2011 | int result; | 2007 | int result; |
| 2012 | 2008 | ||
| 2013 | if (!(conn = hcon->l2cap_data)) | 2009 | if (!conn) |
| 2014 | return 0; | 2010 | return 0; |
| 2011 | |||
| 2015 | l = &conn->chan_list; | 2012 | l = &conn->chan_list; |
| 2016 | 2013 | ||
| 2017 | BT_DBG("conn %p", conn); | 2014 | BT_DBG("conn %p", conn); |
| @@ -2219,7 +2216,7 @@ static int __init l2cap_init(void) | |||
| 2219 | goto error; | 2216 | goto error; |
| 2220 | } | 2217 | } |
| 2221 | 2218 | ||
| 2222 | class_create_file(&bt_class, &class_attr_l2cap); | 2219 | class_create_file(bt_class, &class_attr_l2cap); |
| 2223 | 2220 | ||
| 2224 | BT_INFO("L2CAP ver %s", VERSION); | 2221 | BT_INFO("L2CAP ver %s", VERSION); |
| 2225 | BT_INFO("L2CAP socket layer initialized"); | 2222 | BT_INFO("L2CAP socket layer initialized"); |
| @@ -2233,7 +2230,7 @@ error: | |||
| 2233 | 2230 | ||
| 2234 | static void __exit l2cap_exit(void) | 2231 | static void __exit l2cap_exit(void) |
| 2235 | { | 2232 | { |
| 2236 | class_remove_file(&bt_class, &class_attr_l2cap); | 2233 | class_remove_file(bt_class, &class_attr_l2cap); |
| 2237 | 2234 | ||
| 2238 | if (bt_sock_unregister(BTPROTO_L2CAP) < 0) | 2235 | if (bt_sock_unregister(BTPROTO_L2CAP) < 0) |
| 2239 | BT_ERR("L2CAP socket unregistration failed"); | 2236 | BT_ERR("L2CAP socket unregistration failed"); |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index bd46e8927f29..155a2b93760e 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
| @@ -52,8 +52,9 @@ | |||
| 52 | #define BT_DBG(D...) | 52 | #define BT_DBG(D...) |
| 53 | #endif | 53 | #endif |
| 54 | 54 | ||
| 55 | #define VERSION "1.7" | 55 | #define VERSION "1.8" |
| 56 | 56 | ||
| 57 | static int disable_cfc = 0; | ||
| 57 | static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; | 58 | static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; |
| 58 | 59 | ||
| 59 | static struct task_struct *rfcomm_thread; | 60 | static struct task_struct *rfcomm_thread; |
| @@ -533,7 +534,7 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) | |||
| 533 | s->sock = sock; | 534 | s->sock = sock; |
| 534 | 535 | ||
| 535 | s->mtu = RFCOMM_DEFAULT_MTU; | 536 | s->mtu = RFCOMM_DEFAULT_MTU; |
| 536 | s->cfc = RFCOMM_CFC_UNKNOWN; | 537 | s->cfc = disable_cfc ? RFCOMM_CFC_DISABLED : RFCOMM_CFC_UNKNOWN; |
| 537 | 538 | ||
| 538 | /* Do not increment module usage count for listening sessions. | 539 | /* Do not increment module usage count for listening sessions. |
| 539 | * Otherwise we won't be able to unload the module. */ | 540 | * Otherwise we won't be able to unload the module. */ |
| @@ -1149,6 +1150,8 @@ static inline int rfcomm_check_link_mode(struct rfcomm_dlc *d) | |||
| 1149 | 1150 | ||
| 1150 | static void rfcomm_dlc_accept(struct rfcomm_dlc *d) | 1151 | static void rfcomm_dlc_accept(struct rfcomm_dlc *d) |
| 1151 | { | 1152 | { |
| 1153 | struct sock *sk = d->session->sock->sk; | ||
| 1154 | |||
| 1152 | BT_DBG("dlc %p", d); | 1155 | BT_DBG("dlc %p", d); |
| 1153 | 1156 | ||
| 1154 | rfcomm_send_ua(d->session, d->dlci); | 1157 | rfcomm_send_ua(d->session, d->dlci); |
| @@ -1158,6 +1161,9 @@ static void rfcomm_dlc_accept(struct rfcomm_dlc *d) | |||
| 1158 | d->state_change(d, 0); | 1161 | d->state_change(d, 0); |
| 1159 | rfcomm_dlc_unlock(d); | 1162 | rfcomm_dlc_unlock(d); |
| 1160 | 1163 | ||
| 1164 | if (d->link_mode & RFCOMM_LM_MASTER) | ||
| 1165 | hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00); | ||
| 1166 | |||
| 1161 | rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); | 1167 | rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); |
| 1162 | } | 1168 | } |
| 1163 | 1169 | ||
| @@ -1222,14 +1228,18 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn) | |||
| 1222 | BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d", | 1228 | BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d", |
| 1223 | d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits); | 1229 | d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits); |
| 1224 | 1230 | ||
| 1225 | if (pn->flow_ctrl == 0xf0 || pn->flow_ctrl == 0xe0) { | 1231 | if ((pn->flow_ctrl == 0xf0 && s->cfc != RFCOMM_CFC_DISABLED) || |
| 1226 | d->cfc = s->cfc = RFCOMM_CFC_ENABLED; | 1232 | pn->flow_ctrl == 0xe0) { |
| 1233 | d->cfc = RFCOMM_CFC_ENABLED; | ||
| 1227 | d->tx_credits = pn->credits; | 1234 | d->tx_credits = pn->credits; |
| 1228 | } else { | 1235 | } else { |
| 1229 | d->cfc = s->cfc = RFCOMM_CFC_DISABLED; | 1236 | d->cfc = RFCOMM_CFC_DISABLED; |
| 1230 | set_bit(RFCOMM_TX_THROTTLED, &d->flags); | 1237 | set_bit(RFCOMM_TX_THROTTLED, &d->flags); |
| 1231 | } | 1238 | } |
| 1232 | 1239 | ||
| 1240 | if (s->cfc == RFCOMM_CFC_UNKNOWN) | ||
| 1241 | s->cfc = d->cfc; | ||
| 1242 | |||
| 1233 | d->priority = pn->priority; | 1243 | d->priority = pn->priority; |
| 1234 | 1244 | ||
| 1235 | d->mtu = s->mtu = btohs(pn->mtu); | 1245 | d->mtu = s->mtu = btohs(pn->mtu); |
| @@ -2035,7 +2045,7 @@ static int __init rfcomm_init(void) | |||
| 2035 | 2045 | ||
| 2036 | kernel_thread(rfcomm_run, NULL, CLONE_KERNEL); | 2046 | kernel_thread(rfcomm_run, NULL, CLONE_KERNEL); |
| 2037 | 2047 | ||
| 2038 | class_create_file(&bt_class, &class_attr_rfcomm_dlc); | 2048 | class_create_file(bt_class, &class_attr_rfcomm_dlc); |
| 2039 | 2049 | ||
| 2040 | rfcomm_init_sockets(); | 2050 | rfcomm_init_sockets(); |
| 2041 | 2051 | ||
| @@ -2050,7 +2060,7 @@ static int __init rfcomm_init(void) | |||
| 2050 | 2060 | ||
| 2051 | static void __exit rfcomm_exit(void) | 2061 | static void __exit rfcomm_exit(void) |
| 2052 | { | 2062 | { |
| 2053 | class_remove_file(&bt_class, &class_attr_rfcomm_dlc); | 2063 | class_remove_file(bt_class, &class_attr_rfcomm_dlc); |
| 2054 | 2064 | ||
| 2055 | hci_unregister_cb(&rfcomm_cb); | 2065 | hci_unregister_cb(&rfcomm_cb); |
| 2056 | 2066 | ||
| @@ -2073,6 +2083,9 @@ static void __exit rfcomm_exit(void) | |||
| 2073 | module_init(rfcomm_init); | 2083 | module_init(rfcomm_init); |
| 2074 | module_exit(rfcomm_exit); | 2084 | module_exit(rfcomm_exit); |
| 2075 | 2085 | ||
| 2086 | module_param(disable_cfc, bool, 0644); | ||
| 2087 | MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); | ||
| 2088 | |||
| 2076 | module_param(l2cap_mtu, uint, 0644); | 2089 | module_param(l2cap_mtu, uint, 0644); |
| 2077 | MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); | 2090 | MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); |
| 2078 | 2091 | ||
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 4e9962c8cfa6..220fee04e7f2 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
| @@ -944,7 +944,7 @@ int __init rfcomm_init_sockets(void) | |||
| 944 | if (err < 0) | 944 | if (err < 0) |
| 945 | goto error; | 945 | goto error; |
| 946 | 946 | ||
| 947 | class_create_file(&bt_class, &class_attr_rfcomm); | 947 | class_create_file(bt_class, &class_attr_rfcomm); |
| 948 | 948 | ||
| 949 | BT_INFO("RFCOMM socket layer initialized"); | 949 | BT_INFO("RFCOMM socket layer initialized"); |
| 950 | 950 | ||
| @@ -958,7 +958,7 @@ error: | |||
| 958 | 958 | ||
| 959 | void __exit rfcomm_cleanup_sockets(void) | 959 | void __exit rfcomm_cleanup_sockets(void) |
| 960 | { | 960 | { |
| 961 | class_remove_file(&bt_class, &class_attr_rfcomm); | 961 | class_remove_file(bt_class, &class_attr_rfcomm); |
| 962 | 962 | ||
| 963 | if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) | 963 | if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) |
| 964 | BT_ERR("RFCOMM socket layer unregistration failed"); | 964 | BT_ERR("RFCOMM socket layer unregistration failed"); |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index a5f1e44db5d3..85defccc0287 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
| @@ -969,7 +969,7 @@ static int __init sco_init(void) | |||
| 969 | goto error; | 969 | goto error; |
| 970 | } | 970 | } |
| 971 | 971 | ||
| 972 | class_create_file(&bt_class, &class_attr_sco); | 972 | class_create_file(bt_class, &class_attr_sco); |
| 973 | 973 | ||
| 974 | BT_INFO("SCO (Voice Link) ver %s", VERSION); | 974 | BT_INFO("SCO (Voice Link) ver %s", VERSION); |
| 975 | BT_INFO("SCO socket layer initialized"); | 975 | BT_INFO("SCO socket layer initialized"); |
| @@ -983,7 +983,7 @@ error: | |||
| 983 | 983 | ||
| 984 | static void __exit sco_exit(void) | 984 | static void __exit sco_exit(void) |
| 985 | { | 985 | { |
| 986 | class_remove_file(&bt_class, &class_attr_sco); | 986 | class_remove_file(bt_class, &class_attr_sco); |
| 987 | 987 | ||
| 988 | if (bt_sock_unregister(BTPROTO_SCO) < 0) | 988 | if (bt_sock_unregister(BTPROTO_SCO) < 0) |
| 989 | BT_ERR("SCO socket unregistration failed"); | 989 | BT_ERR("SCO socket unregistration failed"); |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 881d7d1a732a..06abb6634f5b 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
| @@ -117,12 +117,13 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 117 | continue; | 117 | continue; |
| 118 | 118 | ||
| 119 | if (idx < s_idx) | 119 | if (idx < s_idx) |
| 120 | continue; | 120 | goto cont; |
| 121 | 121 | ||
| 122 | err = br_fill_ifinfo(skb, p, NETLINK_CB(cb->skb).pid, | 122 | err = br_fill_ifinfo(skb, p, NETLINK_CB(cb->skb).pid, |
| 123 | cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); | 123 | cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); |
| 124 | if (err <= 0) | 124 | if (err <= 0) |
| 125 | break; | 125 | break; |
| 126 | cont: | ||
| 126 | ++idx; | 127 | ++idx; |
| 127 | } | 128 | } |
| 128 | read_unlock(&dev_base_lock); | 129 | read_unlock(&dev_base_lock); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7cfbdb215ba2..44f6a181a754 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -71,6 +71,13 @@ static kmem_cache_t *skbuff_head_cache __read_mostly; | |||
| 71 | static kmem_cache_t *skbuff_fclone_cache __read_mostly; | 71 | static kmem_cache_t *skbuff_fclone_cache __read_mostly; |
| 72 | 72 | ||
| 73 | /* | 73 | /* |
| 74 | * lockdep: lock class key used by skb_queue_head_init(): | ||
| 75 | */ | ||
| 76 | struct lock_class_key skb_queue_lock_key; | ||
| 77 | |||
| 78 | EXPORT_SYMBOL(skb_queue_lock_key); | ||
| 79 | |||
| 80 | /* | ||
| 74 | * Keep out-of-line to prevent kernel bloat. | 81 | * Keep out-of-line to prevent kernel bloat. |
| 75 | * __builtin_return_address is not used because it is not always | 82 | * __builtin_return_address is not used because it is not always |
| 76 | * reliable. | 83 | * reliable. |
diff --git a/net/core/sock.c b/net/core/sock.c index 533b9317144b..51fcfbc041a7 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -129,6 +129,53 @@ | |||
| 129 | #include <net/tcp.h> | 129 | #include <net/tcp.h> |
| 130 | #endif | 130 | #endif |
| 131 | 131 | ||
| 132 | /* | ||
| 133 | * Each address family might have different locking rules, so we have | ||
| 134 | * one slock key per address family: | ||
| 135 | */ | ||
| 136 | static struct lock_class_key af_family_keys[AF_MAX]; | ||
| 137 | static struct lock_class_key af_family_slock_keys[AF_MAX]; | ||
| 138 | |||
| 139 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 140 | /* | ||
| 141 | * Make lock validator output more readable. (we pre-construct these | ||
| 142 | * strings build-time, so that runtime initialization of socket | ||
| 143 | * locks is fast): | ||
| 144 | */ | ||
| 145 | static const char *af_family_key_strings[AF_MAX+1] = { | ||
| 146 | "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , | ||
| 147 | "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", | ||
| 148 | "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , | ||
| 149 | "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , | ||
| 150 | "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , | ||
| 151 | "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , | ||
| 152 | "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , | ||
| 153 | "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , | ||
| 154 | "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , | ||
| 155 | "sk_lock-27" , "sk_lock-28" , "sk_lock-29" , | ||
| 156 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-AF_MAX" | ||
| 157 | }; | ||
| 158 | static const char *af_family_slock_key_strings[AF_MAX+1] = { | ||
| 159 | "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , | ||
| 160 | "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", | ||
| 161 | "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , | ||
| 162 | "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , | ||
| 163 | "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , | ||
| 164 | "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , | ||
| 165 | "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , | ||
| 166 | "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" , | ||
| 167 | "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , | ||
| 168 | "slock-27" , "slock-28" , "slock-29" , | ||
| 169 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_MAX" | ||
| 170 | }; | ||
| 171 | #endif | ||
| 172 | |||
| 173 | /* | ||
| 174 | * sk_callback_lock locking rules are per-address-family, | ||
| 175 | * so split the lock classes by using a per-AF key: | ||
| 176 | */ | ||
| 177 | static struct lock_class_key af_callback_keys[AF_MAX]; | ||
| 178 | |||
| 132 | /* Take into consideration the size of the struct sk_buff overhead in the | 179 | /* Take into consideration the size of the struct sk_buff overhead in the |
| 133 | * determination of these values, since that is non-constant across | 180 | * determination of these values, since that is non-constant across |
| 134 | * platforms. This makes socket queueing behavior and performance | 181 | * platforms. This makes socket queueing behavior and performance |
| @@ -237,9 +284,16 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb) | |||
| 237 | skb->dev = NULL; | 284 | skb->dev = NULL; |
| 238 | 285 | ||
| 239 | bh_lock_sock(sk); | 286 | bh_lock_sock(sk); |
| 240 | if (!sock_owned_by_user(sk)) | 287 | if (!sock_owned_by_user(sk)) { |
| 288 | /* | ||
| 289 | * trylock + unlock semantics: | ||
| 290 | */ | ||
| 291 | mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); | ||
| 292 | |||
| 241 | rc = sk->sk_backlog_rcv(sk, skb); | 293 | rc = sk->sk_backlog_rcv(sk, skb); |
| 242 | else | 294 | |
| 295 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); | ||
| 296 | } else | ||
| 243 | sk_add_backlog(sk, skb); | 297 | sk_add_backlog(sk, skb); |
| 244 | bh_unlock_sock(sk); | 298 | bh_unlock_sock(sk); |
| 245 | out: | 299 | out: |
| @@ -749,6 +803,33 @@ lenout: | |||
| 749 | return 0; | 803 | return 0; |
| 750 | } | 804 | } |
| 751 | 805 | ||
| 806 | /* | ||
| 807 | * Initialize an sk_lock. | ||
| 808 | * | ||
| 809 | * (We also register the sk_lock with the lock validator.) | ||
| 810 | */ | ||
| 811 | static void inline sock_lock_init(struct sock *sk) | ||
| 812 | { | ||
| 813 | spin_lock_init(&sk->sk_lock.slock); | ||
| 814 | sk->sk_lock.owner = NULL; | ||
| 815 | init_waitqueue_head(&sk->sk_lock.wq); | ||
| 816 | /* | ||
| 817 | * Make sure we are not reinitializing a held lock: | ||
| 818 | */ | ||
| 819 | debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock)); | ||
| 820 | |||
| 821 | /* | ||
| 822 | * Mark both the sk_lock and the sk_lock.slock as a | ||
| 823 | * per-address-family lock class: | ||
| 824 | */ | ||
| 825 | lockdep_set_class_and_name(&sk->sk_lock.slock, | ||
| 826 | af_family_slock_keys + sk->sk_family, | ||
| 827 | af_family_slock_key_strings[sk->sk_family]); | ||
| 828 | lockdep_init_map(&sk->sk_lock.dep_map, | ||
| 829 | af_family_key_strings[sk->sk_family], | ||
| 830 | af_family_keys + sk->sk_family); | ||
| 831 | } | ||
| 832 | |||
| 752 | /** | 833 | /** |
| 753 | * sk_alloc - All socket objects are allocated here | 834 | * sk_alloc - All socket objects are allocated here |
| 754 | * @family: protocol family | 835 | * @family: protocol family |
| @@ -848,6 +929,8 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
| 848 | 929 | ||
| 849 | rwlock_init(&newsk->sk_dst_lock); | 930 | rwlock_init(&newsk->sk_dst_lock); |
| 850 | rwlock_init(&newsk->sk_callback_lock); | 931 | rwlock_init(&newsk->sk_callback_lock); |
| 932 | lockdep_set_class(&newsk->sk_callback_lock, | ||
| 933 | af_callback_keys + newsk->sk_family); | ||
| 851 | 934 | ||
| 852 | newsk->sk_dst_cache = NULL; | 935 | newsk->sk_dst_cache = NULL; |
| 853 | newsk->sk_wmem_queued = 0; | 936 | newsk->sk_wmem_queued = 0; |
| @@ -1422,6 +1505,8 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
| 1422 | 1505 | ||
| 1423 | rwlock_init(&sk->sk_dst_lock); | 1506 | rwlock_init(&sk->sk_dst_lock); |
| 1424 | rwlock_init(&sk->sk_callback_lock); | 1507 | rwlock_init(&sk->sk_callback_lock); |
| 1508 | lockdep_set_class(&sk->sk_callback_lock, | ||
| 1509 | af_callback_keys + sk->sk_family); | ||
| 1425 | 1510 | ||
| 1426 | sk->sk_state_change = sock_def_wakeup; | 1511 | sk->sk_state_change = sock_def_wakeup; |
| 1427 | sk->sk_data_ready = sock_def_readable; | 1512 | sk->sk_data_ready = sock_def_readable; |
| @@ -1449,24 +1534,34 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
| 1449 | void fastcall lock_sock(struct sock *sk) | 1534 | void fastcall lock_sock(struct sock *sk) |
| 1450 | { | 1535 | { |
| 1451 | might_sleep(); | 1536 | might_sleep(); |
| 1452 | spin_lock_bh(&(sk->sk_lock.slock)); | 1537 | spin_lock_bh(&sk->sk_lock.slock); |
| 1453 | if (sk->sk_lock.owner) | 1538 | if (sk->sk_lock.owner) |
| 1454 | __lock_sock(sk); | 1539 | __lock_sock(sk); |
| 1455 | sk->sk_lock.owner = (void *)1; | 1540 | sk->sk_lock.owner = (void *)1; |
| 1456 | spin_unlock_bh(&(sk->sk_lock.slock)); | 1541 | spin_unlock(&sk->sk_lock.slock); |
| 1542 | /* | ||
| 1543 | * The sk_lock has mutex_lock() semantics here: | ||
| 1544 | */ | ||
| 1545 | mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); | ||
| 1546 | local_bh_enable(); | ||
| 1457 | } | 1547 | } |
| 1458 | 1548 | ||
| 1459 | EXPORT_SYMBOL(lock_sock); | 1549 | EXPORT_SYMBOL(lock_sock); |
| 1460 | 1550 | ||
| 1461 | void fastcall release_sock(struct sock *sk) | 1551 | void fastcall release_sock(struct sock *sk) |
| 1462 | { | 1552 | { |
| 1463 | spin_lock_bh(&(sk->sk_lock.slock)); | 1553 | /* |
| 1554 | * The sk_lock has mutex_unlock() semantics: | ||
| 1555 | */ | ||
| 1556 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); | ||
| 1557 | |||
| 1558 | spin_lock_bh(&sk->sk_lock.slock); | ||
| 1464 | if (sk->sk_backlog.tail) | 1559 | if (sk->sk_backlog.tail) |
| 1465 | __release_sock(sk); | 1560 | __release_sock(sk); |
| 1466 | sk->sk_lock.owner = NULL; | 1561 | sk->sk_lock.owner = NULL; |
| 1467 | if (waitqueue_active(&(sk->sk_lock.wq))) | 1562 | if (waitqueue_active(&sk->sk_lock.wq)) |
| 1468 | wake_up(&(sk->sk_lock.wq)); | 1563 | wake_up(&sk->sk_lock.wq); |
| 1469 | spin_unlock_bh(&(sk->sk_lock.slock)); | 1564 | spin_unlock_bh(&sk->sk_lock.slock); |
| 1470 | } | 1565 | } |
| 1471 | EXPORT_SYMBOL(release_sock); | 1566 | EXPORT_SYMBOL(release_sock); |
| 1472 | 1567 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8d157157bf8e..318d4674faa1 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -1106,7 +1106,15 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
| 1106 | int ihl; | 1106 | int ihl; |
| 1107 | int id; | 1107 | int id; |
| 1108 | 1108 | ||
| 1109 | if (!pskb_may_pull(skb, sizeof(*iph))) | 1109 | if (unlikely(skb_shinfo(skb)->gso_type & |
| 1110 | ~(SKB_GSO_TCPV4 | | ||
| 1111 | SKB_GSO_UDP | | ||
| 1112 | SKB_GSO_DODGY | | ||
| 1113 | SKB_GSO_TCP_ECN | | ||
| 1114 | 0))) | ||
| 1115 | goto out; | ||
| 1116 | |||
| 1117 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | ||
| 1110 | goto out; | 1118 | goto out; |
| 1111 | 1119 | ||
| 1112 | iph = skb->nh.iph; | 1120 | iph = skb->nh.iph; |
| @@ -1114,7 +1122,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
| 1114 | if (ihl < sizeof(*iph)) | 1122 | if (ihl < sizeof(*iph)) |
| 1115 | goto out; | 1123 | goto out; |
| 1116 | 1124 | ||
| 1117 | if (!pskb_may_pull(skb, ihl)) | 1125 | if (unlikely(!pskb_may_pull(skb, ihl))) |
| 1118 | goto out; | 1126 | goto out; |
| 1119 | 1127 | ||
| 1120 | skb->h.raw = __skb_pull(skb, ihl); | 1128 | skb->h.raw = __skb_pull(skb, ihl); |
| @@ -1125,7 +1133,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
| 1125 | 1133 | ||
| 1126 | rcu_read_lock(); | 1134 | rcu_read_lock(); |
| 1127 | ops = rcu_dereference(inet_protos[proto]); | 1135 | ops = rcu_dereference(inet_protos[proto]); |
| 1128 | if (ops && ops->gso_segment) | 1136 | if (likely(ops && ops->gso_segment)) |
| 1129 | segs = ops->gso_segment(skb, features); | 1137 | segs = ops->gso_segment(skb, features); |
| 1130 | rcu_read_unlock(); | 1138 | rcu_read_unlock(); |
| 1131 | 1139 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index da44fabf4dc5..2dc6dbb28467 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -205,21 +205,27 @@ __u8 ip_tos2prio[16] = { | |||
| 205 | struct rt_hash_bucket { | 205 | struct rt_hash_bucket { |
| 206 | struct rtable *chain; | 206 | struct rtable *chain; |
| 207 | }; | 207 | }; |
| 208 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 208 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ |
| 209 | defined(CONFIG_PROVE_LOCKING) | ||
| 209 | /* | 210 | /* |
| 210 | * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks | 211 | * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks |
| 211 | * The size of this table is a power of two and depends on the number of CPUS. | 212 | * The size of this table is a power of two and depends on the number of CPUS. |
| 213 | * (on lockdep we have a quite big spinlock_t, so keep the size down there) | ||
| 212 | */ | 214 | */ |
| 213 | #if NR_CPUS >= 32 | 215 | #ifdef CONFIG_LOCKDEP |
| 214 | #define RT_HASH_LOCK_SZ 4096 | 216 | # define RT_HASH_LOCK_SZ 256 |
| 215 | #elif NR_CPUS >= 16 | ||
| 216 | #define RT_HASH_LOCK_SZ 2048 | ||
| 217 | #elif NR_CPUS >= 8 | ||
| 218 | #define RT_HASH_LOCK_SZ 1024 | ||
| 219 | #elif NR_CPUS >= 4 | ||
| 220 | #define RT_HASH_LOCK_SZ 512 | ||
| 221 | #else | 217 | #else |
| 222 | #define RT_HASH_LOCK_SZ 256 | 218 | # if NR_CPUS >= 32 |
| 219 | # define RT_HASH_LOCK_SZ 4096 | ||
| 220 | # elif NR_CPUS >= 16 | ||
| 221 | # define RT_HASH_LOCK_SZ 2048 | ||
| 222 | # elif NR_CPUS >= 8 | ||
| 223 | # define RT_HASH_LOCK_SZ 1024 | ||
| 224 | # elif NR_CPUS >= 4 | ||
| 225 | # define RT_HASH_LOCK_SZ 512 | ||
| 226 | # else | ||
| 227 | # define RT_HASH_LOCK_SZ 256 | ||
| 228 | # endif | ||
| 223 | #endif | 229 | #endif |
| 224 | 230 | ||
| 225 | static spinlock_t *rt_hash_locks; | 231 | static spinlock_t *rt_hash_locks; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 804458712d88..f6a2d9223d07 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -2170,8 +2170,19 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
| 2170 | 2170 | ||
| 2171 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { | 2171 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
| 2172 | /* Packet is from an untrusted source, reset gso_segs. */ | 2172 | /* Packet is from an untrusted source, reset gso_segs. */ |
| 2173 | int mss = skb_shinfo(skb)->gso_size; | 2173 | int type = skb_shinfo(skb)->gso_type; |
| 2174 | int mss; | ||
| 2175 | |||
| 2176 | if (unlikely(type & | ||
| 2177 | ~(SKB_GSO_TCPV4 | | ||
| 2178 | SKB_GSO_DODGY | | ||
| 2179 | SKB_GSO_TCP_ECN | | ||
| 2180 | SKB_GSO_TCPV6 | | ||
| 2181 | 0) || | ||
| 2182 | !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) | ||
| 2183 | goto out; | ||
| 2174 | 2184 | ||
| 2185 | mss = skb_shinfo(skb)->gso_size; | ||
| 2175 | skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; | 2186 | skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; |
| 2176 | 2187 | ||
| 2177 | segs = NULL; | 2188 | segs = NULL; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8355b729fa95..5a886e6efbbe 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -90,7 +90,7 @@ static struct socket *tcp_socket; | |||
| 90 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); | 90 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); |
| 91 | 91 | ||
| 92 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | 92 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { |
| 93 | .lhash_lock = RW_LOCK_UNLOCKED, | 93 | .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), |
| 94 | .lhash_users = ATOMIC_INIT(0), | 94 | .lhash_users = ATOMIC_INIT(0), |
| 95 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), | 95 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), |
| 96 | }; | 96 | }; |
| @@ -1090,7 +1090,7 @@ process: | |||
| 1090 | 1090 | ||
| 1091 | skb->dev = NULL; | 1091 | skb->dev = NULL; |
| 1092 | 1092 | ||
| 1093 | bh_lock_sock(sk); | 1093 | bh_lock_sock_nested(sk); |
| 1094 | ret = 0; | 1094 | ret = 0; |
| 1095 | if (!sock_owned_by_user(sk)) { | 1095 | if (!sock_owned_by_user(sk)) { |
| 1096 | #ifdef CONFIG_NET_DMA | 1096 | #ifdef CONFIG_NET_DMA |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e0851697ad5e..0ccb7cb22b15 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
| @@ -40,7 +40,7 @@ int sysctl_tcp_abort_on_overflow; | |||
| 40 | struct inet_timewait_death_row tcp_death_row = { | 40 | struct inet_timewait_death_row tcp_death_row = { |
| 41 | .sysctl_max_tw_buckets = NR_FILE * 2, | 41 | .sysctl_max_tw_buckets = NR_FILE * 2, |
| 42 | .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, | 42 | .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, |
| 43 | .death_lock = SPIN_LOCK_UNLOCKED, | 43 | .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock), |
| 44 | .hashinfo = &tcp_hashinfo, | 44 | .hashinfo = &tcp_hashinfo, |
| 45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | 45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, |
| 46 | (unsigned long)&tcp_death_row), | 46 | (unsigned long)&tcp_death_row), |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index c28e5c287447..0c17dec11c8d 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
| @@ -64,6 +64,14 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | |||
| 64 | struct inet6_protocol *ops; | 64 | struct inet6_protocol *ops; |
| 65 | int proto; | 65 | int proto; |
| 66 | 66 | ||
| 67 | if (unlikely(skb_shinfo(skb)->gso_type & | ||
| 68 | ~(SKB_GSO_UDP | | ||
| 69 | SKB_GSO_DODGY | | ||
| 70 | SKB_GSO_TCP_ECN | | ||
| 71 | SKB_GSO_TCPV6 | | ||
| 72 | 0))) | ||
| 73 | goto out; | ||
| 74 | |||
| 67 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | 75 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) |
| 68 | goto out; | 76 | goto out; |
| 69 | 77 | ||
| @@ -111,7 +119,8 @@ unlock: | |||
| 111 | 119 | ||
| 112 | for (skb = segs; skb; skb = skb->next) { | 120 | for (skb = segs; skb; skb = skb->next) { |
| 113 | ipv6h = skb->nh.ipv6h; | 121 | ipv6h = skb->nh.ipv6h; |
| 114 | ipv6h->payload_len = htons(skb->len - skb->mac_len); | 122 | ipv6h->payload_len = htons(skb->len - skb->mac_len - |
| 123 | sizeof(*ipv6h)); | ||
| 115 | } | 124 | } |
| 116 | 125 | ||
| 117 | out: | 126 | out: |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 7ef143c0ebf6..f26898b00347 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
| 26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
| 27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 28 | #include <linux/poison.h> | ||
| 28 | #include <linux/icmpv6.h> | 29 | #include <linux/icmpv6.h> |
| 29 | #include <net/ipv6.h> | 30 | #include <net/ipv6.h> |
| 30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
| @@ -376,7 +377,7 @@ ip6t_do_table(struct sk_buff **pskb, | |||
| 376 | } while (!hotdrop); | 377 | } while (!hotdrop); |
| 377 | 378 | ||
| 378 | #ifdef CONFIG_NETFILTER_DEBUG | 379 | #ifdef CONFIG_NETFILTER_DEBUG |
| 379 | ((struct ip6t_entry *)table_base)->comefrom = 0xdead57ac; | 380 | ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; |
| 380 | #endif | 381 | #endif |
| 381 | read_unlock_bh(&table->lock); | 382 | read_unlock_bh(&table->lock); |
| 382 | 383 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 70cee82a98bf..55c0adc8f115 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -156,7 +156,7 @@ static void netlink_sock_destruct(struct sock *sk) | |||
| 156 | 156 | ||
| 157 | static void netlink_table_grab(void) | 157 | static void netlink_table_grab(void) |
| 158 | { | 158 | { |
| 159 | write_lock_bh(&nl_table_lock); | 159 | write_lock_irq(&nl_table_lock); |
| 160 | 160 | ||
| 161 | if (atomic_read(&nl_table_users)) { | 161 | if (atomic_read(&nl_table_users)) { |
| 162 | DECLARE_WAITQUEUE(wait, current); | 162 | DECLARE_WAITQUEUE(wait, current); |
| @@ -166,9 +166,9 @@ static void netlink_table_grab(void) | |||
| 166 | set_current_state(TASK_UNINTERRUPTIBLE); | 166 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 167 | if (atomic_read(&nl_table_users) == 0) | 167 | if (atomic_read(&nl_table_users) == 0) |
| 168 | break; | 168 | break; |
| 169 | write_unlock_bh(&nl_table_lock); | 169 | write_unlock_irq(&nl_table_lock); |
| 170 | schedule(); | 170 | schedule(); |
| 171 | write_lock_bh(&nl_table_lock); | 171 | write_lock_irq(&nl_table_lock); |
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | __set_current_state(TASK_RUNNING); | 174 | __set_current_state(TASK_RUNNING); |
| @@ -178,7 +178,7 @@ static void netlink_table_grab(void) | |||
| 178 | 178 | ||
| 179 | static __inline__ void netlink_table_ungrab(void) | 179 | static __inline__ void netlink_table_ungrab(void) |
| 180 | { | 180 | { |
| 181 | write_unlock_bh(&nl_table_lock); | 181 | write_unlock_irq(&nl_table_lock); |
| 182 | wake_up(&nl_table_wait); | 182 | wake_up(&nl_table_wait); |
| 183 | } | 183 | } |
| 184 | 184 | ||
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index eba6df054b1f..389a4119e1b4 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
| @@ -800,7 +800,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags) | |||
| 800 | 800 | ||
| 801 | /* Now attach up the new socket */ | 801 | /* Now attach up the new socket */ |
| 802 | kfree_skb(skb); | 802 | kfree_skb(skb); |
| 803 | sk->sk_ack_backlog--; | 803 | sk_acceptq_removed(sk); |
| 804 | newsock->sk = newsk; | 804 | newsock->sk = newsk; |
| 805 | 805 | ||
| 806 | out: | 806 | out: |
| @@ -985,7 +985,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) | |||
| 985 | nr_make->vr = 0; | 985 | nr_make->vr = 0; |
| 986 | nr_make->vl = 0; | 986 | nr_make->vl = 0; |
| 987 | nr_make->state = NR_STATE_3; | 987 | nr_make->state = NR_STATE_3; |
| 988 | sk->sk_ack_backlog++; | 988 | sk_acceptq_added(sk); |
| 989 | 989 | ||
| 990 | nr_insert_socket(make); | 990 | nr_insert_socket(make); |
| 991 | 991 | ||
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 7799fe82aeb6..d0a67bb31363 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
| @@ -752,7 +752,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le | |||
| 752 | 752 | ||
| 753 | rose_insert_socket(sk); /* Finish the bind */ | 753 | rose_insert_socket(sk); /* Finish the bind */ |
| 754 | } | 754 | } |
| 755 | 755 | rose_try_next_neigh: | |
| 756 | rose->dest_addr = addr->srose_addr; | 756 | rose->dest_addr = addr->srose_addr; |
| 757 | rose->dest_call = addr->srose_call; | 757 | rose->dest_call = addr->srose_call; |
| 758 | rose->rand = ((long)rose & 0xFFFF) + rose->lci; | 758 | rose->rand = ((long)rose & 0xFFFF) + rose->lci; |
| @@ -810,6 +810,11 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le | |||
| 810 | } | 810 | } |
| 811 | 811 | ||
| 812 | if (sk->sk_state != TCP_ESTABLISHED) { | 812 | if (sk->sk_state != TCP_ESTABLISHED) { |
| 813 | /* Try next neighbour */ | ||
| 814 | rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic); | ||
| 815 | if (rose->neighbour) | ||
| 816 | goto rose_try_next_neigh; | ||
| 817 | /* No more neighbour */ | ||
| 813 | sock->state = SS_UNCONNECTED; | 818 | sock->state = SS_UNCONNECTED; |
| 814 | return sock_error(sk); /* Always set at this point */ | 819 | return sock_error(sk); /* Always set at this point */ |
| 815 | } | 820 | } |
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 9d0bf2a1ea3f..7c279e2659ec 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c | |||
| @@ -59,6 +59,7 @@ static int rose_rebuild_header(struct sk_buff *skb) | |||
| 59 | struct net_device_stats *stats = netdev_priv(dev); | 59 | struct net_device_stats *stats = netdev_priv(dev); |
| 60 | unsigned char *bp = (unsigned char *)skb->data; | 60 | unsigned char *bp = (unsigned char *)skb->data; |
| 61 | struct sk_buff *skbn; | 61 | struct sk_buff *skbn; |
| 62 | unsigned int len; | ||
| 62 | 63 | ||
| 63 | #ifdef CONFIG_INET | 64 | #ifdef CONFIG_INET |
| 64 | if (arp_find(bp + 7, skb)) { | 65 | if (arp_find(bp + 7, skb)) { |
| @@ -75,6 +76,8 @@ static int rose_rebuild_header(struct sk_buff *skb) | |||
| 75 | 76 | ||
| 76 | kfree_skb(skb); | 77 | kfree_skb(skb); |
| 77 | 78 | ||
| 79 | len = skbn->len; | ||
| 80 | |||
| 78 | if (!rose_route_frame(skbn, NULL)) { | 81 | if (!rose_route_frame(skbn, NULL)) { |
| 79 | kfree_skb(skbn); | 82 | kfree_skb(skbn); |
| 80 | stats->tx_errors++; | 83 | stats->tx_errors++; |
| @@ -82,7 +85,7 @@ static int rose_rebuild_header(struct sk_buff *skb) | |||
| 82 | } | 85 | } |
| 83 | 86 | ||
| 84 | stats->tx_packets++; | 87 | stats->tx_packets++; |
| 85 | stats->tx_bytes += skbn->len; | 88 | stats->tx_bytes += len; |
| 86 | #endif | 89 | #endif |
| 87 | return 1; | 90 | return 1; |
| 88 | } | 91 | } |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 6db6006616c6..dc6cb93c8830 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -515,7 +515,7 @@ rpc_depopulate(struct dentry *parent) | |||
| 515 | struct dentry *dentry, *dvec[10]; | 515 | struct dentry *dentry, *dvec[10]; |
| 516 | int n = 0; | 516 | int n = 0; |
| 517 | 517 | ||
| 518 | mutex_lock(&dir->i_mutex); | 518 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD); |
| 519 | repeat: | 519 | repeat: |
| 520 | spin_lock(&dcache_lock); | 520 | spin_lock(&dcache_lock); |
| 521 | list_for_each_safe(pos, next, &parent->d_subdirs) { | 521 | list_for_each_safe(pos, next, &parent->d_subdirs) { |
| @@ -631,7 +631,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd) | |||
| 631 | if ((error = rpc_lookup_parent(path, nd)) != 0) | 631 | if ((error = rpc_lookup_parent(path, nd)) != 0) |
| 632 | return ERR_PTR(error); | 632 | return ERR_PTR(error); |
| 633 | dir = nd->dentry->d_inode; | 633 | dir = nd->dentry->d_inode; |
| 634 | mutex_lock(&dir->i_mutex); | 634 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); |
| 635 | dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len); | 635 | dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len); |
| 636 | if (IS_ERR(dentry)) | 636 | if (IS_ERR(dentry)) |
| 637 | goto out_err; | 637 | goto out_err; |
| @@ -693,7 +693,7 @@ rpc_rmdir(char *path) | |||
| 693 | if ((error = rpc_lookup_parent(path, &nd)) != 0) | 693 | if ((error = rpc_lookup_parent(path, &nd)) != 0) |
| 694 | return error; | 694 | return error; |
| 695 | dir = nd.dentry->d_inode; | 695 | dir = nd.dentry->d_inode; |
| 696 | mutex_lock(&dir->i_mutex); | 696 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); |
| 697 | dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); | 697 | dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); |
| 698 | if (IS_ERR(dentry)) { | 698 | if (IS_ERR(dentry)) { |
| 699 | error = PTR_ERR(dentry); | 699 | error = PTR_ERR(dentry); |
| @@ -754,7 +754,7 @@ rpc_unlink(char *path) | |||
| 754 | if ((error = rpc_lookup_parent(path, &nd)) != 0) | 754 | if ((error = rpc_lookup_parent(path, &nd)) != 0) |
| 755 | return error; | 755 | return error; |
| 756 | dir = nd.dentry->d_inode; | 756 | dir = nd.dentry->d_inode; |
| 757 | mutex_lock(&dir->i_mutex); | 757 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); |
| 758 | dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); | 758 | dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); |
| 759 | if (IS_ERR(dentry)) { | 759 | if (IS_ERR(dentry)) { |
| 760 | error = PTR_ERR(dentry); | 760 | error = PTR_ERR(dentry); |
diff --git a/net/tipc/core.h b/net/tipc/core.h index 86f54f3512f1..762aac2572be 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
| @@ -297,7 +297,10 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb) | |||
| 297 | * buf_acquire - creates a TIPC message buffer | 297 | * buf_acquire - creates a TIPC message buffer |
| 298 | * @size: message size (including TIPC header) | 298 | * @size: message size (including TIPC header) |
| 299 | * | 299 | * |
| 300 | * Returns a new buffer. Space is reserved for a data link header. | 300 | * Returns a new buffer with data pointers set to the specified size. |
| 301 | * | ||
| 302 | * NOTE: Headroom is reserved to allow prepending of a data link header. | ||
| 303 | * There may also be unrequested tailroom present at the buffer's end. | ||
| 301 | */ | 304 | */ |
| 302 | 305 | ||
| 303 | static inline struct sk_buff *buf_acquire(u32 size) | 306 | static inline struct sk_buff *buf_acquire(u32 size) |
diff --git a/net/tipc/link.c b/net/tipc/link.c index c6831c75cfa4..c10e18a49b96 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
| @@ -998,6 +998,8 @@ static int link_bundle_buf(struct link *l_ptr, | |||
| 998 | return 0; | 998 | return 0; |
| 999 | if (skb_tailroom(bundler) < (pad + size)) | 999 | if (skb_tailroom(bundler) < (pad + size)) |
| 1000 | return 0; | 1000 | return 0; |
| 1001 | if (link_max_pkt(l_ptr) < (to_pos + size)) | ||
| 1002 | return 0; | ||
| 1001 | 1003 | ||
| 1002 | skb_put(bundler, pad + size); | 1004 | skb_put(bundler, pad + size); |
| 1003 | memcpy(bundler->data + to_pos, buf->data, size); | 1005 | memcpy(bundler->data + to_pos, buf->data, size); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index aca650109425..f70475bfb62a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -144,7 +144,7 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) | |||
| 144 | scm->seclen = *UNIXSECLEN(skb); | 144 | scm->seclen = *UNIXSECLEN(skb); |
| 145 | } | 145 | } |
| 146 | #else | 146 | #else |
| 147 | static void unix_get_peersec_dgram(struct sk_buff *skb) | 147 | static inline void unix_get_peersec_dgram(struct sk_buff *skb) |
| 148 | { } | 148 | { } |
| 149 | 149 | ||
| 150 | static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) | 150 | static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) |
| @@ -565,6 +565,14 @@ static struct proto unix_proto = { | |||
| 565 | .obj_size = sizeof(struct unix_sock), | 565 | .obj_size = sizeof(struct unix_sock), |
| 566 | }; | 566 | }; |
| 567 | 567 | ||
| 568 | /* | ||
| 569 | * AF_UNIX sockets do not interact with hardware, hence they | ||
| 570 | * dont trigger interrupts - so it's safe for them to have | ||
| 571 | * bh-unsafe locking for their sk_receive_queue.lock. Split off | ||
| 572 | * this special lock-class by reinitializing the spinlock key: | ||
| 573 | */ | ||
| 574 | static struct lock_class_key af_unix_sk_receive_queue_lock_key; | ||
| 575 | |||
| 568 | static struct sock * unix_create1(struct socket *sock) | 576 | static struct sock * unix_create1(struct socket *sock) |
| 569 | { | 577 | { |
| 570 | struct sock *sk = NULL; | 578 | struct sock *sk = NULL; |
| @@ -580,6 +588,8 @@ static struct sock * unix_create1(struct socket *sock) | |||
| 580 | atomic_inc(&unix_nr_socks); | 588 | atomic_inc(&unix_nr_socks); |
| 581 | 589 | ||
| 582 | sock_init_data(sock,sk); | 590 | sock_init_data(sock,sk); |
| 591 | lockdep_set_class(&sk->sk_receive_queue.lock, | ||
| 592 | &af_unix_sk_receive_queue_lock_key); | ||
| 583 | 593 | ||
| 584 | sk->sk_write_space = unix_write_space; | 594 | sk->sk_write_space = unix_write_space; |
| 585 | sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen; | 595 | sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen; |
| @@ -1045,7 +1055,7 @@ restart: | |||
| 1045 | goto out_unlock; | 1055 | goto out_unlock; |
| 1046 | } | 1056 | } |
| 1047 | 1057 | ||
| 1048 | unix_state_wlock(sk); | 1058 | unix_state_wlock_nested(sk); |
| 1049 | 1059 | ||
| 1050 | if (sk->sk_state != st) { | 1060 | if (sk->sk_state != st) { |
| 1051 | unix_state_wunlock(sk); | 1061 | unix_state_wunlock(sk); |
