diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-04 00:28:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-04 00:28:14 -0400 |
commit | 67ab33db8be1cd466c09dfcba334d69d3e2f92e6 (patch) | |
tree | 3ae6448755977f0bbeea0f8da028b58bb1564580 | |
parent | f7d57e42e7ebd085133506ef6325e70e822196dc (diff) | |
parent | 300b93974ff64f1bef1ac8294547c573954f0300 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (27 commits)
[Bluetooth] Add RFCOMM role switch support
[Bluetooth] Allow disabling of credit based flow control
[Bluetooth] Small cleanup of the L2CAP source code
[Bluetooth] Use real devices for host controllers
[Bluetooth] Add platform device for virtual and serial devices
[Bluetooth] Add automatic sniff mode support
[Bluetooth] Correct SCO buffer size on request
[Bluetooth] Add suspend/resume support to the HCI USB driver
[Bluetooth] Use raw mode for the Frontline sniffer device
[BRIDGE]: br_dump_ifinfo index fix
[ATM]: add+use poison defines
[NET]: add+use poison defines
[IOAT]: fix kernel-doc in source files
[IOAT]: fix header file kernel-doc
[TG3]: Add ipv6 TSO feature
[IPV6]: Fix ipv6 GSO payload length
[TIPC] Fixed sk_buff panic caused by tipc_link_bundle_buf (REVISED)
[NET]: Verify gso_type too in gso_segment
[IPVS]: Add sysctl documentation
[ROSE]: Try all routes when establishing a ROSE connections.
...
44 files changed, 1159 insertions, 464 deletions
diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt new file mode 100644 index 000000000000..4ccdbca03811 --- /dev/null +++ b/Documentation/networking/ipvs-sysctl.txt | |||
@@ -0,0 +1,143 @@ | |||
1 | /proc/sys/net/ipv4/vs/* Variables: | ||
2 | |||
3 | am_droprate - INTEGER | ||
4 | default 10 | ||
5 | |||
6 | It sets the always mode drop rate, which is used in the mode 3 | ||
7 | of the drop_rate defense. | ||
8 | |||
9 | amemthresh - INTEGER | ||
10 | default 1024 | ||
11 | |||
12 | It sets the available memory threshold (in pages), which is | ||
13 | used in the automatic modes of defense. When there is no | ||
14 | enough available memory, the respective strategy will be | ||
15 | enabled and the variable is automatically set to 2, otherwise | ||
16 | the strategy is disabled and the variable is set to 1. | ||
17 | |||
18 | cache_bypass - BOOLEAN | ||
19 | 0 - disabled (default) | ||
20 | not 0 - enabled | ||
21 | |||
22 | If it is enabled, forward packets to the original destination | ||
23 | directly when no cache server is available and destination | ||
24 | address is not local (iph->daddr is RTN_UNICAST). It is mostly | ||
25 | used in transparent web cache cluster. | ||
26 | |||
27 | debug_level - INTEGER | ||
28 | 0 - transmission error messages (default) | ||
29 | 1 - non-fatal error messages | ||
30 | 2 - configuration | ||
31 | 3 - destination trash | ||
32 | 4 - drop entry | ||
33 | 5 - service lookup | ||
34 | 6 - scheduling | ||
35 | 7 - connection new/expire, lookup and synchronization | ||
36 | 8 - state transition | ||
37 | 9 - binding destination, template checks and applications | ||
38 | 10 - IPVS packet transmission | ||
39 | 11 - IPVS packet handling (ip_vs_in/ip_vs_out) | ||
40 | 12 or more - packet traversal | ||
41 | |||
42 | Only available when IPVS is compiled with the CONFIG_IPVS_DEBUG | ||
43 | |||
44 | Higher debugging levels include the messages for lower debugging | ||
45 | levels, so setting debug level 2, includes level 0, 1 and 2 | ||
46 | messages. Thus, logging becomes more and more verbose the higher | ||
47 | the level. | ||
48 | |||
49 | drop_entry - INTEGER | ||
50 | 0 - disabled (default) | ||
51 | |||
52 | The drop_entry defense is to randomly drop entries in the | ||
53 | connection hash table, just in order to collect back some | ||
54 | memory for new connections. In the current code, the | ||
55 | drop_entry procedure can be activated every second, then it | ||
56 | randomly scans 1/32 of the whole and drops entries that are in | ||
57 | the SYN-RECV/SYNACK state, which should be effective against | ||
58 | syn-flooding attack. | ||
59 | |||
60 | The valid values of drop_entry are from 0 to 3, where 0 means | ||
61 | that this strategy is always disabled, 1 and 2 mean automatic | ||
62 | modes (when there is no enough available memory, the strategy | ||
63 | is enabled and the variable is automatically set to 2, | ||
64 | otherwise the strategy is disabled and the variable is set to | ||
65 | 1), and 3 means that that the strategy is always enabled. | ||
66 | |||
67 | drop_packet - INTEGER | ||
68 | 0 - disabled (default) | ||
69 | |||
70 | The drop_packet defense is designed to drop 1/rate packets | ||
71 | before forwarding them to real servers. If the rate is 1, then | ||
72 | drop all the incoming packets. | ||
73 | |||
74 | The value definition is the same as that of the drop_entry. In | ||
75 | the automatic mode, the rate is determined by the follow | ||
76 | formula: rate = amemthresh / (amemthresh - available_memory) | ||
77 | when available memory is less than the available memory | ||
78 | threshold. When the mode 3 is set, the always mode drop rate | ||
79 | is controlled by the /proc/sys/net/ipv4/vs/am_droprate. | ||
80 | |||
81 | expire_nodest_conn - BOOLEAN | ||
82 | 0 - disabled (default) | ||
83 | not 0 - enabled | ||
84 | |||
85 | The default value is 0, the load balancer will silently drop | ||
86 | packets when its destination server is not available. It may | ||
87 | be useful, when user-space monitoring program deletes the | ||
88 | destination server (because of server overload or wrong | ||
89 | detection) and add back the server later, and the connections | ||
90 | to the server can continue. | ||
91 | |||
92 | If this feature is enabled, the load balancer will expire the | ||
93 | connection immediately when a packet arrives and its | ||
94 | destination server is not available, then the client program | ||
95 | will be notified that the connection is closed. This is | ||
96 | equivalent to the feature some people requires to flush | ||
97 | connections when its destination is not available. | ||
98 | |||
99 | expire_quiescent_template - BOOLEAN | ||
100 | 0 - disabled (default) | ||
101 | not 0 - enabled | ||
102 | |||
103 | When set to a non-zero value, the load balancer will expire | ||
104 | persistent templates when the destination server is quiescent. | ||
105 | This may be useful, when a user makes a destination server | ||
106 | quiescent by setting its weight to 0 and it is desired that | ||
107 | subsequent otherwise persistent connections are sent to a | ||
108 | different destination server. By default new persistent | ||
109 | connections are allowed to quiescent destination servers. | ||
110 | |||
111 | If this feature is enabled, the load balancer will expire the | ||
112 | persistence template if it is to be used to schedule a new | ||
113 | connection and the destination server is quiescent. | ||
114 | |||
115 | nat_icmp_send - BOOLEAN | ||
116 | 0 - disabled (default) | ||
117 | not 0 - enabled | ||
118 | |||
119 | It controls sending icmp error messages (ICMP_DEST_UNREACH) | ||
120 | for VS/NAT when the load balancer receives packets from real | ||
121 | servers but the connection entries don't exist. | ||
122 | |||
123 | secure_tcp - INTEGER | ||
124 | 0 - disabled (default) | ||
125 | |||
126 | The secure_tcp defense is to use a more complicated state | ||
127 | transition table and some possible short timeouts of each | ||
128 | state. In the VS/NAT, it delays the entering the ESTABLISHED | ||
129 | until the real server starts to send data and ACK packet | ||
130 | (after 3-way handshake). | ||
131 | |||
132 | The value definition is the same as that of drop_entry or | ||
133 | drop_packet. | ||
134 | |||
135 | sync_threshold - INTEGER | ||
136 | default 3 | ||
137 | |||
138 | It sets synchronization threshold, which is the minimum number | ||
139 | of incoming packets that a connection needs to receive before | ||
140 | the connection will be synchronized. A connection will be | ||
141 | synchronized, every time the number of its incoming packets | ||
142 | modulus 50 equals the threshold. The range of the threshold is | ||
143 | from 0 to 49. | ||
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index d3b426313a41..4521a249dd56 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/atmdev.h> | 31 | #include <linux/atmdev.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/interrupt.h> | 33 | #include <linux/interrupt.h> |
34 | #include <linux/poison.h> | ||
34 | 35 | ||
35 | #include <asm/atomic.h> | 36 | #include <asm/atomic.h> |
36 | #include <asm/io.h> | 37 | #include <asm/io.h> |
@@ -1995,7 +1996,7 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) { | |||
1995 | } | 1996 | } |
1996 | i += 1; | 1997 | i += 1; |
1997 | } | 1998 | } |
1998 | if (*pointer == 0xdeadbeef) { | 1999 | if (*pointer == ATM_POISON) { |
1999 | return loader_start (lb, dev, ucode_start); | 2000 | return loader_start (lb, dev, ucode_start); |
2000 | } else { | 2001 | } else { |
2001 | // cast needed as there is no %? for pointer differnces | 2002 | // cast needed as there is no %? for pointer differnces |
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 5d1c6c95262c..b0369bb20f08 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c | |||
@@ -35,6 +35,7 @@ static char const rcsid[] = | |||
35 | 35 | ||
36 | #include <linux/module.h> | 36 | #include <linux/module.h> |
37 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
38 | #include <linux/poison.h> | ||
38 | #include <linux/skbuff.h> | 39 | #include <linux/skbuff.h> |
39 | #include <linux/kernel.h> | 40 | #include <linux/kernel.h> |
40 | #include <linux/vmalloc.h> | 41 | #include <linux/vmalloc.h> |
@@ -3657,7 +3658,7 @@ probe_sram(struct idt77252_dev *card) | |||
3657 | writel(SAR_CMD_WRITE_SRAM | (0 << 2), SAR_REG_CMD); | 3658 | writel(SAR_CMD_WRITE_SRAM | (0 << 2), SAR_REG_CMD); |
3658 | 3659 | ||
3659 | for (addr = 0x4000; addr < 0x80000; addr += 0x4000) { | 3660 | for (addr = 0x4000; addr < 0x80000; addr += 0x4000) { |
3660 | writel(0xdeadbeef, SAR_REG_DR0); | 3661 | writel(ATM_POISON, SAR_REG_DR0); |
3661 | writel(SAR_CMD_WRITE_SRAM | (addr << 2), SAR_REG_CMD); | 3662 | writel(SAR_CMD_WRITE_SRAM | (addr << 2), SAR_REG_CMD); |
3662 | 3663 | ||
3663 | writel(SAR_CMD_READ_SRAM | (0 << 2), SAR_REG_CMD); | 3664 | writel(SAR_CMD_READ_SRAM | (0 << 2), SAR_REG_CMD); |
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index 2830f58d6f77..8eebf9ca3786 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c | |||
@@ -739,6 +739,7 @@ static int bluecard_open(bluecard_info_t *info) | |||
739 | 739 | ||
740 | hdev->type = HCI_PCCARD; | 740 | hdev->type = HCI_PCCARD; |
741 | hdev->driver_data = info; | 741 | hdev->driver_data = info; |
742 | SET_HCIDEV_DEV(hdev, &info->p_dev->dev); | ||
742 | 743 | ||
743 | hdev->open = bluecard_hci_open; | 744 | hdev->open = bluecard_hci_open; |
744 | hdev->close = bluecard_hci_close; | 745 | hdev->close = bluecard_hci_close; |
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index c9dba5565cac..df7bb016df49 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c | |||
@@ -582,6 +582,7 @@ static int bt3c_open(bt3c_info_t *info) | |||
582 | 582 | ||
583 | hdev->type = HCI_PCCARD; | 583 | hdev->type = HCI_PCCARD; |
584 | hdev->driver_data = info; | 584 | hdev->driver_data = info; |
585 | SET_HCIDEV_DEV(hdev, &info->p_dev->dev); | ||
585 | 586 | ||
586 | hdev->open = bt3c_hci_open; | 587 | hdev->open = bt3c_hci_open; |
587 | hdev->close = bt3c_hci_close; | 588 | hdev->close = bt3c_hci_close; |
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index c889bf8109a1..746ccca97f6f 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c | |||
@@ -502,6 +502,7 @@ static int btuart_open(btuart_info_t *info) | |||
502 | 502 | ||
503 | hdev->type = HCI_PCCARD; | 503 | hdev->type = HCI_PCCARD; |
504 | hdev->driver_data = info; | 504 | hdev->driver_data = info; |
505 | SET_HCIDEV_DEV(hdev, &info->p_dev->dev); | ||
505 | 506 | ||
506 | hdev->open = btuart_hci_open; | 507 | hdev->open = btuart_hci_open; |
507 | hdev->close = btuart_hci_close; | 508 | hdev->close = btuart_hci_close; |
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index be6eed175aa3..0e99def8a1e3 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c | |||
@@ -484,6 +484,7 @@ static int dtl1_open(dtl1_info_t *info) | |||
484 | 484 | ||
485 | hdev->type = HCI_PCCARD; | 485 | hdev->type = HCI_PCCARD; |
486 | hdev->driver_data = info; | 486 | hdev->driver_data = info; |
487 | SET_HCIDEV_DEV(hdev, &info->p_dev->dev); | ||
487 | 488 | ||
488 | hdev->open = dtl1_hci_open; | 489 | hdev->open = dtl1_hci_open; |
489 | hdev->close = dtl1_hci_close; | 490 | hdev->close = dtl1_hci_close; |
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c index a7d9d7e99e72..6a0c2230f82f 100644 --- a/drivers/bluetooth/hci_usb.c +++ b/drivers/bluetooth/hci_usb.c | |||
@@ -122,6 +122,9 @@ static struct usb_device_id blacklist_ids[] = { | |||
122 | /* RTX Telecom based adapter with buggy SCO support */ | 122 | /* RTX Telecom based adapter with buggy SCO support */ |
123 | { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, | 123 | { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, |
124 | 124 | ||
125 | /* Belkin F8T012 */ | ||
126 | { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU }, | ||
127 | |||
125 | /* Digianswer devices */ | 128 | /* Digianswer devices */ |
126 | { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER }, | 129 | { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER }, |
127 | { USB_DEVICE(0x08fd, 0x0002), .driver_info = HCI_IGNORE }, | 130 | { USB_DEVICE(0x08fd, 0x0002), .driver_info = HCI_IGNORE }, |
@@ -129,6 +132,9 @@ static struct usb_device_id blacklist_ids[] = { | |||
129 | /* CSR BlueCore Bluetooth Sniffer */ | 132 | /* CSR BlueCore Bluetooth Sniffer */ |
130 | { USB_DEVICE(0x0a12, 0x0002), .driver_info = HCI_SNIFFER }, | 133 | { USB_DEVICE(0x0a12, 0x0002), .driver_info = HCI_SNIFFER }, |
131 | 134 | ||
135 | /* Frontline ComProbe Bluetooth Sniffer */ | ||
136 | { USB_DEVICE(0x16d3, 0x0002), .driver_info = HCI_SNIFFER }, | ||
137 | |||
132 | { } /* Terminating entry */ | 138 | { } /* Terminating entry */ |
133 | }; | 139 | }; |
134 | 140 | ||
@@ -984,6 +990,9 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id | |||
984 | if (reset || id->driver_info & HCI_RESET) | 990 | if (reset || id->driver_info & HCI_RESET) |
985 | set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); | 991 | set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); |
986 | 992 | ||
993 | if (id->driver_info & HCI_WRONG_SCO_MTU) | ||
994 | set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); | ||
995 | |||
987 | if (id->driver_info & HCI_SNIFFER) { | 996 | if (id->driver_info & HCI_SNIFFER) { |
988 | if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) | 997 | if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) |
989 | set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); | 998 | set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); |
@@ -1042,10 +1051,81 @@ static void hci_usb_disconnect(struct usb_interface *intf) | |||
1042 | hci_free_dev(hdev); | 1051 | hci_free_dev(hdev); |
1043 | } | 1052 | } |
1044 | 1053 | ||
1054 | static int hci_usb_suspend(struct usb_interface *intf, pm_message_t message) | ||
1055 | { | ||
1056 | struct hci_usb *husb = usb_get_intfdata(intf); | ||
1057 | struct list_head killed; | ||
1058 | unsigned long flags; | ||
1059 | int i; | ||
1060 | |||
1061 | if (!husb || intf == husb->isoc_iface) | ||
1062 | return 0; | ||
1063 | |||
1064 | hci_suspend_dev(husb->hdev); | ||
1065 | |||
1066 | INIT_LIST_HEAD(&killed); | ||
1067 | |||
1068 | for (i = 0; i < 4; i++) { | ||
1069 | struct _urb_queue *q = &husb->pending_q[i]; | ||
1070 | struct _urb *_urb, *_tmp; | ||
1071 | |||
1072 | while ((_urb = _urb_dequeue(q))) { | ||
1073 | /* reset queue since _urb_dequeue sets it to NULL */ | ||
1074 | _urb->queue = q; | ||
1075 | usb_kill_urb(&_urb->urb); | ||
1076 | list_add(&_urb->list, &killed); | ||
1077 | } | ||
1078 | |||
1079 | spin_lock_irqsave(&q->lock, flags); | ||
1080 | |||
1081 | list_for_each_entry_safe(_urb, _tmp, &killed, list) { | ||
1082 | list_move_tail(&_urb->list, &q->head); | ||
1083 | } | ||
1084 | |||
1085 | spin_unlock_irqrestore(&q->lock, flags); | ||
1086 | } | ||
1087 | |||
1088 | return 0; | ||
1089 | } | ||
1090 | |||
1091 | static int hci_usb_resume(struct usb_interface *intf) | ||
1092 | { | ||
1093 | struct hci_usb *husb = usb_get_intfdata(intf); | ||
1094 | unsigned long flags; | ||
1095 | int i, err = 0; | ||
1096 | |||
1097 | if (!husb || intf == husb->isoc_iface) | ||
1098 | return 0; | ||
1099 | |||
1100 | for (i = 0; i < 4; i++) { | ||
1101 | struct _urb_queue *q = &husb->pending_q[i]; | ||
1102 | struct _urb *_urb; | ||
1103 | |||
1104 | spin_lock_irqsave(&q->lock, flags); | ||
1105 | |||
1106 | list_for_each_entry(_urb, &q->head, list) { | ||
1107 | err = usb_submit_urb(&_urb->urb, GFP_ATOMIC); | ||
1108 | if (err) | ||
1109 | break; | ||
1110 | } | ||
1111 | |||
1112 | spin_unlock_irqrestore(&q->lock, flags); | ||
1113 | |||
1114 | if (err) | ||
1115 | return -EIO; | ||
1116 | } | ||
1117 | |||
1118 | hci_resume_dev(husb->hdev); | ||
1119 | |||
1120 | return 0; | ||
1121 | } | ||
1122 | |||
1045 | static struct usb_driver hci_usb_driver = { | 1123 | static struct usb_driver hci_usb_driver = { |
1046 | .name = "hci_usb", | 1124 | .name = "hci_usb", |
1047 | .probe = hci_usb_probe, | 1125 | .probe = hci_usb_probe, |
1048 | .disconnect = hci_usb_disconnect, | 1126 | .disconnect = hci_usb_disconnect, |
1127 | .suspend = hci_usb_suspend, | ||
1128 | .resume = hci_usb_resume, | ||
1049 | .id_table = bluetooth_ids, | 1129 | .id_table = bluetooth_ids, |
1050 | }; | 1130 | }; |
1051 | 1131 | ||
diff --git a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h index 37100a6ea1a8..963fc55cdc85 100644 --- a/drivers/bluetooth/hci_usb.h +++ b/drivers/bluetooth/hci_usb.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #define HCI_SNIFFER 0x10 | 35 | #define HCI_SNIFFER 0x10 |
36 | #define HCI_BCM92035 0x20 | 36 | #define HCI_BCM92035 0x20 |
37 | #define HCI_BROKEN_ISOC 0x40 | 37 | #define HCI_BROKEN_ISOC 0x40 |
38 | #define HCI_WRONG_SCO_MTU 0x80 | ||
38 | 39 | ||
39 | #define HCI_MAX_IFACE_NUM 3 | 40 | #define HCI_MAX_IFACE_NUM 3 |
40 | 41 | ||
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index ea589007fa26..aac67a3a6019 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c | |||
@@ -277,7 +277,6 @@ static int vhci_open(struct inode *inode, struct file *file) | |||
277 | 277 | ||
278 | hdev->type = HCI_VHCI; | 278 | hdev->type = HCI_VHCI; |
279 | hdev->driver_data = vhci; | 279 | hdev->driver_data = vhci; |
280 | SET_HCIDEV_DEV(hdev, vhci_miscdev.dev); | ||
281 | 280 | ||
282 | hdev->open = vhci_open_dev; | 281 | hdev->open = vhci_open_dev; |
283 | hdev->close = vhci_close_dev; | 282 | hdev->close = vhci_close_dev; |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 5829143558e1..15278044295c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -166,8 +166,8 @@ static struct dma_chan *dma_client_chan_alloc(struct dma_client *client) | |||
166 | } | 166 | } |
167 | 167 | ||
168 | /** | 168 | /** |
169 | * dma_client_chan_free - release a DMA channel | 169 | * dma_chan_cleanup - release a DMA channel's resources |
170 | * @chan: &dma_chan | 170 | * @kref: kernel reference structure that contains the DMA channel device |
171 | */ | 171 | */ |
172 | void dma_chan_cleanup(struct kref *kref) | 172 | void dma_chan_cleanup(struct kref *kref) |
173 | { | 173 | { |
@@ -199,7 +199,7 @@ static void dma_client_chan_free(struct dma_chan *chan) | |||
199 | * dma_chans_rebalance - reallocate channels to clients | 199 | * dma_chans_rebalance - reallocate channels to clients |
200 | * | 200 | * |
201 | * When the number of DMA channel in the system changes, | 201 | * When the number of DMA channel in the system changes, |
202 | * channels need to be rebalanced among clients | 202 | * channels need to be rebalanced among clients. |
203 | */ | 203 | */ |
204 | static void dma_chans_rebalance(void) | 204 | static void dma_chans_rebalance(void) |
205 | { | 205 | { |
@@ -264,7 +264,7 @@ struct dma_client *dma_async_client_register(dma_event_callback event_callback) | |||
264 | 264 | ||
265 | /** | 265 | /** |
266 | * dma_async_client_unregister - unregister a client and free the &dma_client | 266 | * dma_async_client_unregister - unregister a client and free the &dma_client |
267 | * @client: | 267 | * @client: &dma_client to free |
268 | * | 268 | * |
269 | * Force frees any allocated DMA channels, frees the &dma_client memory | 269 | * Force frees any allocated DMA channels, frees the &dma_client memory |
270 | */ | 270 | */ |
@@ -306,7 +306,7 @@ void dma_async_client_chan_request(struct dma_client *client, | |||
306 | } | 306 | } |
307 | 307 | ||
308 | /** | 308 | /** |
309 | * dma_async_device_register - | 309 | * dma_async_device_register - registers DMA devices found |
310 | * @device: &dma_device | 310 | * @device: &dma_device |
311 | */ | 311 | */ |
312 | int dma_async_device_register(struct dma_device *device) | 312 | int dma_async_device_register(struct dma_device *device) |
@@ -348,8 +348,8 @@ int dma_async_device_register(struct dma_device *device) | |||
348 | } | 348 | } |
349 | 349 | ||
350 | /** | 350 | /** |
351 | * dma_async_device_unregister - | 351 | * dma_async_device_cleanup - function called when all references are released |
352 | * @device: &dma_device | 352 | * @kref: kernel reference object |
353 | */ | 353 | */ |
354 | static void dma_async_device_cleanup(struct kref *kref) | 354 | static void dma_async_device_cleanup(struct kref *kref) |
355 | { | 355 | { |
@@ -359,7 +359,11 @@ static void dma_async_device_cleanup(struct kref *kref) | |||
359 | complete(&device->done); | 359 | complete(&device->done); |
360 | } | 360 | } |
361 | 361 | ||
362 | void dma_async_device_unregister(struct dma_device* device) | 362 | /** |
363 | * dma_async_device_unregister - unregisters DMA devices | ||
364 | * @device: &dma_device | ||
365 | */ | ||
366 | void dma_async_device_unregister(struct dma_device *device) | ||
363 | { | 367 | { |
364 | struct dma_chan *chan; | 368 | struct dma_chan *chan; |
365 | unsigned long flags; | 369 | unsigned long flags; |
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c index ecad8f65d2d4..78bf46d917b7 100644 --- a/drivers/dma/ioatdma.c +++ b/drivers/dma/ioatdma.c | |||
@@ -217,7 +217,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |||
217 | 217 | ||
218 | /** | 218 | /** |
219 | * do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction | 219 | * do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction |
220 | * @chan: IOAT DMA channel handle | 220 | * @ioat_chan: IOAT DMA channel handle |
221 | * @dest: DMA destination address | 221 | * @dest: DMA destination address |
222 | * @src: DMA source address | 222 | * @src: DMA source address |
223 | * @len: transaction length in bytes | 223 | * @len: transaction length in bytes |
@@ -383,7 +383,7 @@ static dma_cookie_t ioat_dma_memcpy_buf_to_pg(struct dma_chan *chan, | |||
383 | * @dest_off: offset into that page | 383 | * @dest_off: offset into that page |
384 | * @src_pg: pointer to the page to copy from | 384 | * @src_pg: pointer to the page to copy from |
385 | * @src_off: offset into that page | 385 | * @src_off: offset into that page |
386 | * @len: transaction length in bytes. This is guaranteed to not make a copy | 386 | * @len: transaction length in bytes. This is guaranteed not to make a copy |
387 | * across a page boundary. | 387 | * across a page boundary. |
388 | */ | 388 | */ |
389 | 389 | ||
@@ -407,7 +407,7 @@ static dma_cookie_t ioat_dma_memcpy_pg_to_pg(struct dma_chan *chan, | |||
407 | } | 407 | } |
408 | 408 | ||
409 | /** | 409 | /** |
410 | * ioat_dma_memcpy_issue_pending - push potentially unrecognoized appended descriptors to hw | 410 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw |
411 | * @chan: DMA channel handle | 411 | * @chan: DMA channel handle |
412 | */ | 412 | */ |
413 | 413 | ||
@@ -510,6 +510,8 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan) | |||
510 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction | 510 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction |
511 | * @chan: IOAT DMA channel handle | 511 | * @chan: IOAT DMA channel handle |
512 | * @cookie: DMA transaction identifier | 512 | * @cookie: DMA transaction identifier |
513 | * @done: if not %NULL, updated with last completed transaction | ||
514 | * @used: if not %NULL, updated with last used transaction | ||
513 | */ | 515 | */ |
514 | 516 | ||
515 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, | 517 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, |
@@ -826,7 +828,7 @@ static int __init ioat_init_module(void) | |||
826 | /* if forced, worst case is that rmmod hangs */ | 828 | /* if forced, worst case is that rmmod hangs */ |
827 | __unsafe(THIS_MODULE); | 829 | __unsafe(THIS_MODULE); |
828 | 830 | ||
829 | pci_module_init(&ioat_pci_drv); | 831 | return pci_module_init(&ioat_pci_drv); |
830 | } | 832 | } |
831 | 833 | ||
832 | module_init(ioat_init_module); | 834 | module_init(ioat_init_module); |
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h index 41a21ab2b000..a30c7349075a 100644 --- a/drivers/dma/ioatdma_registers.h +++ b/drivers/dma/ioatdma_registers.h | |||
@@ -76,7 +76,7 @@ | |||
76 | #define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */ | 76 | #define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */ |
77 | #define IOAT_CHANSTS_OFFSET_LOW 0x04 | 77 | #define IOAT_CHANSTS_OFFSET_LOW 0x04 |
78 | #define IOAT_CHANSTS_OFFSET_HIGH 0x08 | 78 | #define IOAT_CHANSTS_OFFSET_HIGH 0x08 |
79 | #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0 | 79 | #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0UL |
80 | #define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 | 80 | #define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 |
81 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 | 81 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 |
82 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 | 82 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 |
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c index 5ed327e453a2..d637555a833b 100644 --- a/drivers/dma/iovlock.c +++ b/drivers/dma/iovlock.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <asm/io.h> | 31 | #include <asm/io.h> |
32 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
33 | 33 | ||
34 | int num_pages_spanned(struct iovec *iov) | 34 | static int num_pages_spanned(struct iovec *iov) |
35 | { | 35 | { |
36 | return | 36 | return |
37 | ((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - | 37 | ((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index e5e1b2962936..f645921aff8b 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -68,8 +68,8 @@ | |||
68 | 68 | ||
69 | #define DRV_MODULE_NAME "tg3" | 69 | #define DRV_MODULE_NAME "tg3" |
70 | #define PFX DRV_MODULE_NAME ": " | 70 | #define PFX DRV_MODULE_NAME ": " |
71 | #define DRV_MODULE_VERSION "3.61" | 71 | #define DRV_MODULE_VERSION "3.62" |
72 | #define DRV_MODULE_RELDATE "June 29, 2006" | 72 | #define DRV_MODULE_RELDATE "June 30, 2006" |
73 | 73 | ||
74 | #define TG3_DEF_MAC_MODE 0 | 74 | #define TG3_DEF_MAC_MODE 0 |
75 | #define TG3_DEF_RX_MODE 0 | 75 | #define TG3_DEF_RX_MODE 0 |
@@ -3798,18 +3798,24 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3798 | goto out_unlock; | 3798 | goto out_unlock; |
3799 | } | 3799 | } |
3800 | 3800 | ||
3801 | tcp_opt_len = ((skb->h.th->doff - 5) * 4); | 3801 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
3802 | ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); | 3802 | mss |= (skb_headlen(skb) - ETH_HLEN) << 9; |
3803 | else { | ||
3804 | tcp_opt_len = ((skb->h.th->doff - 5) * 4); | ||
3805 | ip_tcp_len = (skb->nh.iph->ihl * 4) + | ||
3806 | sizeof(struct tcphdr); | ||
3807 | |||
3808 | skb->nh.iph->check = 0; | ||
3809 | skb->nh.iph->tot_len = htons(mss + ip_tcp_len + | ||
3810 | tcp_opt_len); | ||
3811 | mss |= (ip_tcp_len + tcp_opt_len) << 9; | ||
3812 | } | ||
3803 | 3813 | ||
3804 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | | 3814 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | |
3805 | TXD_FLAG_CPU_POST_DMA); | 3815 | TXD_FLAG_CPU_POST_DMA); |
3806 | 3816 | ||
3807 | skb->nh.iph->check = 0; | ||
3808 | skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); | ||
3809 | |||
3810 | skb->h.th->check = 0; | 3817 | skb->h.th->check = 0; |
3811 | 3818 | ||
3812 | mss |= (ip_tcp_len + tcp_opt_len) << 9; | ||
3813 | } | 3819 | } |
3814 | else if (skb->ip_summed == CHECKSUM_HW) | 3820 | else if (skb->ip_summed == CHECKSUM_HW) |
3815 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | 3821 | base_flags |= TXD_FLAG_TCPUDP_CSUM; |
@@ -7887,6 +7893,12 @@ static int tg3_set_tso(struct net_device *dev, u32 value) | |||
7887 | return -EINVAL; | 7893 | return -EINVAL; |
7888 | return 0; | 7894 | return 0; |
7889 | } | 7895 | } |
7896 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) { | ||
7897 | if (value) | ||
7898 | dev->features |= NETIF_F_TSO6; | ||
7899 | else | ||
7900 | dev->features &= ~NETIF_F_TSO6; | ||
7901 | } | ||
7890 | return ethtool_op_set_tso(dev, value); | 7902 | return ethtool_op_set_tso(dev, value); |
7891 | } | 7903 | } |
7892 | #endif | 7904 | #endif |
@@ -11507,8 +11519,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
11507 | * Firmware TSO on older chips gives lower performance, so it | 11519 | * Firmware TSO on older chips gives lower performance, so it |
11508 | * is off by default, but can be enabled using ethtool. | 11520 | * is off by default, but can be enabled using ethtool. |
11509 | */ | 11521 | */ |
11510 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 11522 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { |
11511 | dev->features |= NETIF_F_TSO; | 11523 | dev->features |= NETIF_F_TSO; |
11524 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) | ||
11525 | dev->features |= NETIF_F_TSO6; | ||
11526 | } | ||
11512 | 11527 | ||
11513 | #endif | 11528 | #endif |
11514 | 11529 | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 272010a6078a..c94d8f1d62e5 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -44,7 +44,7 @@ enum dma_event { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | /** | 46 | /** |
47 | * typedef dma_cookie_t | 47 | * typedef dma_cookie_t - an opaque DMA cookie |
48 | * | 48 | * |
49 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | 49 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code |
50 | */ | 50 | */ |
@@ -80,14 +80,14 @@ struct dma_chan_percpu { | |||
80 | 80 | ||
81 | /** | 81 | /** |
82 | * struct dma_chan - devices supply DMA channels, clients use them | 82 | * struct dma_chan - devices supply DMA channels, clients use them |
83 | * @client: ptr to the client user of this chan, will be NULL when unused | 83 | * @client: ptr to the client user of this chan, will be %NULL when unused |
84 | * @device: ptr to the dma device who supplies this channel, always !NULL | 84 | * @device: ptr to the dma device who supplies this channel, always !%NULL |
85 | * @cookie: last cookie value returned to client | 85 | * @cookie: last cookie value returned to client |
86 | * @chan_id: | 86 | * @chan_id: channel ID for sysfs |
87 | * @class_dev: | 87 | * @class_dev: class device for sysfs |
88 | * @refcount: kref, used in "bigref" slow-mode | 88 | * @refcount: kref, used in "bigref" slow-mode |
89 | * @slow_ref: | 89 | * @slow_ref: indicates that the DMA channel is free |
90 | * @rcu: | 90 | * @rcu: the DMA channel's RCU head |
91 | * @client_node: used to add this to the client chan list | 91 | * @client_node: used to add this to the client chan list |
92 | * @device_node: used to add this to the device chan list | 92 | * @device_node: used to add this to the device chan list |
93 | * @local: per-cpu pointer to a struct dma_chan_percpu | 93 | * @local: per-cpu pointer to a struct dma_chan_percpu |
@@ -162,10 +162,17 @@ struct dma_client { | |||
162 | * @chancnt: how many DMA channels are supported | 162 | * @chancnt: how many DMA channels are supported |
163 | * @channels: the list of struct dma_chan | 163 | * @channels: the list of struct dma_chan |
164 | * @global_node: list_head for global dma_device_list | 164 | * @global_node: list_head for global dma_device_list |
165 | * @refcount: | 165 | * @refcount: reference count |
166 | * @done: | 166 | * @done: IO completion struct |
167 | * @dev_id: | 167 | * @dev_id: unique device ID |
168 | * Other func ptrs: used to make use of this device's capabilities | 168 | * @device_alloc_chan_resources: allocate resources and return the |
169 | * number of allocated descriptors | ||
170 | * @device_free_chan_resources: release DMA channel's resources | ||
171 | * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer | ||
172 | * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page | ||
173 | * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset | ||
174 | * @device_memcpy_complete: poll the status of an IOAT DMA transaction | ||
175 | * @device_memcpy_issue_pending: push appended descriptors to hardware | ||
169 | */ | 176 | */ |
170 | struct dma_device { | 177 | struct dma_device { |
171 | 178 | ||
@@ -211,7 +218,7 @@ void dma_async_client_chan_request(struct dma_client *client, | |||
211 | * Both @dest and @src must be mappable to a bus address according to the | 218 | * Both @dest and @src must be mappable to a bus address according to the |
212 | * DMA mapping API rules for streaming mappings. | 219 | * DMA mapping API rules for streaming mappings. |
213 | * Both @dest and @src must stay memory resident (kernel memory or locked | 220 | * Both @dest and @src must stay memory resident (kernel memory or locked |
214 | * user space pages) | 221 | * user space pages). |
215 | */ | 222 | */ |
216 | static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 223 | static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
217 | void *dest, void *src, size_t len) | 224 | void *dest, void *src, size_t len) |
@@ -225,7 +232,7 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | |||
225 | } | 232 | } |
226 | 233 | ||
227 | /** | 234 | /** |
228 | * dma_async_memcpy_buf_to_pg - offloaded copy | 235 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page |
229 | * @chan: DMA channel to offload copy to | 236 | * @chan: DMA channel to offload copy to |
230 | * @page: destination page | 237 | * @page: destination page |
231 | * @offset: offset in page to copy to | 238 | * @offset: offset in page to copy to |
@@ -250,18 +257,18 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | |||
250 | } | 257 | } |
251 | 258 | ||
252 | /** | 259 | /** |
253 | * dma_async_memcpy_buf_to_pg - offloaded copy | 260 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page |
254 | * @chan: DMA channel to offload copy to | 261 | * @chan: DMA channel to offload copy to |
255 | * @dest_page: destination page | 262 | * @dest_pg: destination page |
256 | * @dest_off: offset in page to copy to | 263 | * @dest_off: offset in page to copy to |
257 | * @src_page: source page | 264 | * @src_pg: source page |
258 | * @src_off: offset in page to copy from | 265 | * @src_off: offset in page to copy from |
259 | * @len: length | 266 | * @len: length |
260 | * | 267 | * |
261 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | 268 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus |
262 | * address according to the DMA mapping API rules for streaming mappings. | 269 | * address according to the DMA mapping API rules for streaming mappings. |
263 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | 270 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident |
264 | * (kernel memory or locked user space pages) | 271 | * (kernel memory or locked user space pages). |
265 | */ | 272 | */ |
266 | static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | 273 | static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, |
267 | struct page *dest_pg, unsigned int dest_off, struct page *src_pg, | 274 | struct page *dest_pg, unsigned int dest_off, struct page *src_pg, |
@@ -278,7 +285,7 @@ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | |||
278 | 285 | ||
279 | /** | 286 | /** |
280 | * dma_async_memcpy_issue_pending - flush pending copies to HW | 287 | * dma_async_memcpy_issue_pending - flush pending copies to HW |
281 | * @chan: | 288 | * @chan: target DMA channel |
282 | * | 289 | * |
283 | * This allows drivers to push copies to HW in batches, | 290 | * This allows drivers to push copies to HW in batches, |
284 | * reducing MMIO writes where possible. | 291 | * reducing MMIO writes where possible. |
diff --git a/include/linux/poison.h b/include/linux/poison.h index a5347c02432e..3e628f990fdf 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
@@ -44,6 +44,11 @@ | |||
44 | 44 | ||
45 | /********** drivers/atm/ **********/ | 45 | /********** drivers/atm/ **********/ |
46 | #define ATM_POISON_FREE 0x12 | 46 | #define ATM_POISON_FREE 0x12 |
47 | #define ATM_POISON 0xdeadbeef | ||
48 | |||
49 | /********** net/ **********/ | ||
50 | #define NEIGHBOR_DEAD 0xdeadbeef | ||
51 | #define NETFILTER_LINK_POISON 0xdead57ac | ||
47 | 52 | ||
48 | /********** kernel/mutexes **********/ | 53 | /********** kernel/mutexes **********/ |
49 | #define MUTEX_DEBUG_INIT 0x11 | 54 | #define MUTEX_DEBUG_INIT 0x11 |
diff --git a/include/net/ax25.h b/include/net/ax25.h index 7cd528e9d668..69374cd1a857 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h | |||
@@ -182,14 +182,26 @@ typedef struct { | |||
182 | 182 | ||
183 | typedef struct ax25_route { | 183 | typedef struct ax25_route { |
184 | struct ax25_route *next; | 184 | struct ax25_route *next; |
185 | atomic_t ref; | 185 | atomic_t refcount; |
186 | ax25_address callsign; | 186 | ax25_address callsign; |
187 | struct net_device *dev; | 187 | struct net_device *dev; |
188 | ax25_digi *digipeat; | 188 | ax25_digi *digipeat; |
189 | char ip_mode; | 189 | char ip_mode; |
190 | struct timer_list timer; | ||
191 | } ax25_route; | 190 | } ax25_route; |
192 | 191 | ||
192 | static inline void ax25_hold_route(ax25_route *ax25_rt) | ||
193 | { | ||
194 | atomic_inc(&ax25_rt->refcount); | ||
195 | } | ||
196 | |||
197 | extern void __ax25_put_route(ax25_route *ax25_rt); | ||
198 | |||
199 | static inline void ax25_put_route(ax25_route *ax25_rt) | ||
200 | { | ||
201 | if (atomic_dec_and_test(&ax25_rt->refcount)) | ||
202 | __ax25_put_route(ax25_rt); | ||
203 | } | ||
204 | |||
193 | typedef struct { | 205 | typedef struct { |
194 | char slave; /* slave_mode? */ | 206 | char slave; /* slave_mode? */ |
195 | struct timer_list slave_timer; /* timeout timer */ | 207 | struct timer_list slave_timer; /* timeout timer */ |
@@ -348,17 +360,11 @@ extern int ax25_check_iframes_acked(ax25_cb *, unsigned short); | |||
348 | extern void ax25_rt_device_down(struct net_device *); | 360 | extern void ax25_rt_device_down(struct net_device *); |
349 | extern int ax25_rt_ioctl(unsigned int, void __user *); | 361 | extern int ax25_rt_ioctl(unsigned int, void __user *); |
350 | extern struct file_operations ax25_route_fops; | 362 | extern struct file_operations ax25_route_fops; |
363 | extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev); | ||
351 | extern int ax25_rt_autobind(ax25_cb *, ax25_address *); | 364 | extern int ax25_rt_autobind(ax25_cb *, ax25_address *); |
352 | extern ax25_route *ax25_rt_find_route(ax25_route *, ax25_address *, | ||
353 | struct net_device *); | ||
354 | extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *); | 365 | extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *); |
355 | extern void ax25_rt_free(void); | 366 | extern void ax25_rt_free(void); |
356 | 367 | ||
357 | static inline void ax25_put_route(ax25_route *ax25_rt) | ||
358 | { | ||
359 | atomic_dec(&ax25_rt->ref); | ||
360 | } | ||
361 | |||
362 | /* ax25_std_in.c */ | 368 | /* ax25_std_in.c */ |
363 | extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int); | 369 | extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int); |
364 | 370 | ||
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 911ceb5cd263..771d17783c18 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h | |||
@@ -175,6 +175,6 @@ extern int hci_sock_cleanup(void); | |||
175 | extern int bt_sysfs_init(void); | 175 | extern int bt_sysfs_init(void); |
176 | extern void bt_sysfs_cleanup(void); | 176 | extern void bt_sysfs_cleanup(void); |
177 | 177 | ||
178 | extern struct class bt_class; | 178 | extern struct class *bt_class; |
179 | 179 | ||
180 | #endif /* __BLUETOOTH_H */ | 180 | #endif /* __BLUETOOTH_H */ |
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index b06a2d2f63d2..b2bdb1aa0429 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h | |||
@@ -54,7 +54,8 @@ | |||
54 | /* HCI device quirks */ | 54 | /* HCI device quirks */ |
55 | enum { | 55 | enum { |
56 | HCI_QUIRK_RESET_ON_INIT, | 56 | HCI_QUIRK_RESET_ON_INIT, |
57 | HCI_QUIRK_RAW_DEVICE | 57 | HCI_QUIRK_RAW_DEVICE, |
58 | HCI_QUIRK_FIXUP_BUFFER_SIZE | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | /* HCI device flags */ | 61 | /* HCI device flags */ |
@@ -100,9 +101,10 @@ enum { | |||
100 | #define HCIINQUIRY _IOR('H', 240, int) | 101 | #define HCIINQUIRY _IOR('H', 240, int) |
101 | 102 | ||
102 | /* HCI timeouts */ | 103 | /* HCI timeouts */ |
103 | #define HCI_CONN_TIMEOUT (HZ * 40) | 104 | #define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */ |
104 | #define HCI_DISCONN_TIMEOUT (HZ * 2) | 105 | #define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */ |
105 | #define HCI_CONN_IDLE_TIMEOUT (HZ * 60) | 106 | #define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */ |
107 | #define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ | ||
106 | 108 | ||
107 | /* HCI Packet types */ | 109 | /* HCI Packet types */ |
108 | #define HCI_COMMAND_PKT 0x01 | 110 | #define HCI_COMMAND_PKT 0x01 |
@@ -144,7 +146,7 @@ enum { | |||
144 | #define LMP_TACCURACY 0x10 | 146 | #define LMP_TACCURACY 0x10 |
145 | #define LMP_RSWITCH 0x20 | 147 | #define LMP_RSWITCH 0x20 |
146 | #define LMP_HOLD 0x40 | 148 | #define LMP_HOLD 0x40 |
147 | #define LMP_SNIF 0x80 | 149 | #define LMP_SNIFF 0x80 |
148 | 150 | ||
149 | #define LMP_PARK 0x01 | 151 | #define LMP_PARK 0x01 |
150 | #define LMP_RSSI 0x02 | 152 | #define LMP_RSSI 0x02 |
@@ -159,13 +161,21 @@ enum { | |||
159 | #define LMP_PSCHEME 0x02 | 161 | #define LMP_PSCHEME 0x02 |
160 | #define LMP_PCONTROL 0x04 | 162 | #define LMP_PCONTROL 0x04 |
161 | 163 | ||
164 | #define LMP_SNIFF_SUBR 0x02 | ||
165 | |||
166 | /* Connection modes */ | ||
167 | #define HCI_CM_ACTIVE 0x0000 | ||
168 | #define HCI_CM_HOLD 0x0001 | ||
169 | #define HCI_CM_SNIFF 0x0002 | ||
170 | #define HCI_CM_PARK 0x0003 | ||
171 | |||
162 | /* Link policies */ | 172 | /* Link policies */ |
163 | #define HCI_LP_RSWITCH 0x0001 | 173 | #define HCI_LP_RSWITCH 0x0001 |
164 | #define HCI_LP_HOLD 0x0002 | 174 | #define HCI_LP_HOLD 0x0002 |
165 | #define HCI_LP_SNIFF 0x0004 | 175 | #define HCI_LP_SNIFF 0x0004 |
166 | #define HCI_LP_PARK 0x0008 | 176 | #define HCI_LP_PARK 0x0008 |
167 | 177 | ||
168 | /* Link mode */ | 178 | /* Link modes */ |
169 | #define HCI_LM_ACCEPT 0x8000 | 179 | #define HCI_LM_ACCEPT 0x8000 |
170 | #define HCI_LM_MASTER 0x0001 | 180 | #define HCI_LM_MASTER 0x0001 |
171 | #define HCI_LM_AUTH 0x0002 | 181 | #define HCI_LM_AUTH 0x0002 |
@@ -191,7 +201,7 @@ struct hci_rp_read_loc_version { | |||
191 | } __attribute__ ((packed)); | 201 | } __attribute__ ((packed)); |
192 | 202 | ||
193 | #define OCF_READ_LOCAL_FEATURES 0x0003 | 203 | #define OCF_READ_LOCAL_FEATURES 0x0003 |
194 | struct hci_rp_read_loc_features { | 204 | struct hci_rp_read_local_features { |
195 | __u8 status; | 205 | __u8 status; |
196 | __u8 features[8]; | 206 | __u8 features[8]; |
197 | } __attribute__ ((packed)); | 207 | } __attribute__ ((packed)); |
@@ -375,17 +385,32 @@ struct hci_cp_change_conn_link_key { | |||
375 | } __attribute__ ((packed)); | 385 | } __attribute__ ((packed)); |
376 | 386 | ||
377 | #define OCF_READ_REMOTE_FEATURES 0x001B | 387 | #define OCF_READ_REMOTE_FEATURES 0x001B |
378 | struct hci_cp_read_rmt_features { | 388 | struct hci_cp_read_remote_features { |
379 | __le16 handle; | 389 | __le16 handle; |
380 | } __attribute__ ((packed)); | 390 | } __attribute__ ((packed)); |
381 | 391 | ||
382 | #define OCF_READ_REMOTE_VERSION 0x001D | 392 | #define OCF_READ_REMOTE_VERSION 0x001D |
383 | struct hci_cp_read_rmt_version { | 393 | struct hci_cp_read_remote_version { |
384 | __le16 handle; | 394 | __le16 handle; |
385 | } __attribute__ ((packed)); | 395 | } __attribute__ ((packed)); |
386 | 396 | ||
387 | /* Link Policy */ | 397 | /* Link Policy */ |
388 | #define OGF_LINK_POLICY 0x02 | 398 | #define OGF_LINK_POLICY 0x02 |
399 | |||
400 | #define OCF_SNIFF_MODE 0x0003 | ||
401 | struct hci_cp_sniff_mode { | ||
402 | __le16 handle; | ||
403 | __le16 max_interval; | ||
404 | __le16 min_interval; | ||
405 | __le16 attempt; | ||
406 | __le16 timeout; | ||
407 | } __attribute__ ((packed)); | ||
408 | |||
409 | #define OCF_EXIT_SNIFF_MODE 0x0004 | ||
410 | struct hci_cp_exit_sniff_mode { | ||
411 | __le16 handle; | ||
412 | } __attribute__ ((packed)); | ||
413 | |||
389 | #define OCF_ROLE_DISCOVERY 0x0009 | 414 | #define OCF_ROLE_DISCOVERY 0x0009 |
390 | struct hci_cp_role_discovery { | 415 | struct hci_cp_role_discovery { |
391 | __le16 handle; | 416 | __le16 handle; |
@@ -406,7 +431,7 @@ struct hci_rp_read_link_policy { | |||
406 | __le16 policy; | 431 | __le16 policy; |
407 | } __attribute__ ((packed)); | 432 | } __attribute__ ((packed)); |
408 | 433 | ||
409 | #define OCF_SWITCH_ROLE 0x000B | 434 | #define OCF_SWITCH_ROLE 0x000B |
410 | struct hci_cp_switch_role { | 435 | struct hci_cp_switch_role { |
411 | bdaddr_t bdaddr; | 436 | bdaddr_t bdaddr; |
412 | __u8 role; | 437 | __u8 role; |
@@ -422,6 +447,14 @@ struct hci_rp_write_link_policy { | |||
422 | __le16 handle; | 447 | __le16 handle; |
423 | } __attribute__ ((packed)); | 448 | } __attribute__ ((packed)); |
424 | 449 | ||
450 | #define OCF_SNIFF_SUBRATE 0x0011 | ||
451 | struct hci_cp_sniff_subrate { | ||
452 | __le16 handle; | ||
453 | __le16 max_latency; | ||
454 | __le16 min_remote_timeout; | ||
455 | __le16 min_local_timeout; | ||
456 | } __attribute__ ((packed)); | ||
457 | |||
425 | /* Status params */ | 458 | /* Status params */ |
426 | #define OGF_STATUS_PARAM 0x05 | 459 | #define OGF_STATUS_PARAM 0x05 |
427 | 460 | ||
@@ -581,15 +614,15 @@ struct hci_ev_link_key_notify { | |||
581 | __u8 key_type; | 614 | __u8 key_type; |
582 | } __attribute__ ((packed)); | 615 | } __attribute__ ((packed)); |
583 | 616 | ||
584 | #define HCI_EV_RMT_FEATURES 0x0B | 617 | #define HCI_EV_REMOTE_FEATURES 0x0B |
585 | struct hci_ev_rmt_features { | 618 | struct hci_ev_remote_features { |
586 | __u8 status; | 619 | __u8 status; |
587 | __le16 handle; | 620 | __le16 handle; |
588 | __u8 features[8]; | 621 | __u8 features[8]; |
589 | } __attribute__ ((packed)); | 622 | } __attribute__ ((packed)); |
590 | 623 | ||
591 | #define HCI_EV_RMT_VERSION 0x0C | 624 | #define HCI_EV_REMOTE_VERSION 0x0C |
592 | struct hci_ev_rmt_version { | 625 | struct hci_ev_remote_version { |
593 | __u8 status; | 626 | __u8 status; |
594 | __le16 handle; | 627 | __le16 handle; |
595 | __u8 lmp_ver; | 628 | __u8 lmp_ver; |
@@ -610,6 +643,16 @@ struct hci_ev_pscan_rep_mode { | |||
610 | __u8 pscan_rep_mode; | 643 | __u8 pscan_rep_mode; |
611 | } __attribute__ ((packed)); | 644 | } __attribute__ ((packed)); |
612 | 645 | ||
646 | #define HCI_EV_SNIFF_SUBRATE 0x2E | ||
647 | struct hci_ev_sniff_subrate { | ||
648 | __u8 status; | ||
649 | __le16 handle; | ||
650 | __le16 max_tx_latency; | ||
651 | __le16 max_rx_latency; | ||
652 | __le16 max_remote_timeout; | ||
653 | __le16 max_local_timeout; | ||
654 | } __attribute__ ((packed)); | ||
655 | |||
613 | /* Internal events generated by Bluetooth stack */ | 656 | /* Internal events generated by Bluetooth stack */ |
614 | #define HCI_EV_STACK_INTERNAL 0xFD | 657 | #define HCI_EV_STACK_INTERNAL 0xFD |
615 | struct hci_ev_stack_internal { | 658 | struct hci_ev_stack_internal { |
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index bb9f81dc8723..d84855fe7336 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h | |||
@@ -31,10 +31,7 @@ | |||
31 | #define HCI_PROTO_L2CAP 0 | 31 | #define HCI_PROTO_L2CAP 0 |
32 | #define HCI_PROTO_SCO 1 | 32 | #define HCI_PROTO_SCO 1 |
33 | 33 | ||
34 | #define HCI_INIT_TIMEOUT (HZ * 10) | ||
35 | |||
36 | /* HCI Core structures */ | 34 | /* HCI Core structures */ |
37 | |||
38 | struct inquiry_data { | 35 | struct inquiry_data { |
39 | bdaddr_t bdaddr; | 36 | bdaddr_t bdaddr; |
40 | __u8 pscan_rep_mode; | 37 | __u8 pscan_rep_mode; |
@@ -81,6 +78,10 @@ struct hci_dev { | |||
81 | __u16 link_policy; | 78 | __u16 link_policy; |
82 | __u16 link_mode; | 79 | __u16 link_mode; |
83 | 80 | ||
81 | __u32 idle_timeout; | ||
82 | __u16 sniff_min_interval; | ||
83 | __u16 sniff_max_interval; | ||
84 | |||
84 | unsigned long quirks; | 85 | unsigned long quirks; |
85 | 86 | ||
86 | atomic_t cmd_cnt; | 87 | atomic_t cmd_cnt; |
@@ -123,7 +124,8 @@ struct hci_dev { | |||
123 | 124 | ||
124 | atomic_t promisc; | 125 | atomic_t promisc; |
125 | 126 | ||
126 | struct class_device class_dev; | 127 | struct device *parent; |
128 | struct device dev; | ||
127 | 129 | ||
128 | struct module *owner; | 130 | struct module *owner; |
129 | 131 | ||
@@ -145,18 +147,24 @@ struct hci_conn { | |||
145 | bdaddr_t dst; | 147 | bdaddr_t dst; |
146 | __u16 handle; | 148 | __u16 handle; |
147 | __u16 state; | 149 | __u16 state; |
150 | __u8 mode; | ||
148 | __u8 type; | 151 | __u8 type; |
149 | __u8 out; | 152 | __u8 out; |
150 | __u8 dev_class[3]; | 153 | __u8 dev_class[3]; |
154 | __u8 features[8]; | ||
155 | __u16 interval; | ||
156 | __u16 link_policy; | ||
151 | __u32 link_mode; | 157 | __u32 link_mode; |
158 | __u8 power_save; | ||
152 | unsigned long pend; | 159 | unsigned long pend; |
153 | 160 | ||
154 | unsigned int sent; | 161 | unsigned int sent; |
155 | 162 | ||
156 | struct sk_buff_head data_q; | 163 | struct sk_buff_head data_q; |
157 | 164 | ||
158 | struct timer_list timer; | 165 | struct timer_list disc_timer; |
159 | 166 | struct timer_list idle_timer; | |
167 | |||
160 | struct hci_dev *hdev; | 168 | struct hci_dev *hdev; |
161 | void *l2cap_data; | 169 | void *l2cap_data; |
162 | void *sco_data; | 170 | void *sco_data; |
@@ -211,7 +219,8 @@ void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data); | |||
211 | enum { | 219 | enum { |
212 | HCI_CONN_AUTH_PEND, | 220 | HCI_CONN_AUTH_PEND, |
213 | HCI_CONN_ENCRYPT_PEND, | 221 | HCI_CONN_ENCRYPT_PEND, |
214 | HCI_CONN_RSWITCH_PEND | 222 | HCI_CONN_RSWITCH_PEND, |
223 | HCI_CONN_MODE_CHANGE_PEND, | ||
215 | }; | 224 | }; |
216 | 225 | ||
217 | static inline void hci_conn_hash_init(struct hci_dev *hdev) | 226 | static inline void hci_conn_hash_init(struct hci_dev *hdev) |
@@ -286,31 +295,27 @@ int hci_conn_encrypt(struct hci_conn *conn); | |||
286 | int hci_conn_change_link_key(struct hci_conn *conn); | 295 | int hci_conn_change_link_key(struct hci_conn *conn); |
287 | int hci_conn_switch_role(struct hci_conn *conn, uint8_t role); | 296 | int hci_conn_switch_role(struct hci_conn *conn, uint8_t role); |
288 | 297 | ||
289 | static inline void hci_conn_set_timer(struct hci_conn *conn, unsigned long timeout) | 298 | void hci_conn_enter_active_mode(struct hci_conn *conn); |
290 | { | 299 | void hci_conn_enter_sniff_mode(struct hci_conn *conn); |
291 | mod_timer(&conn->timer, jiffies + timeout); | ||
292 | } | ||
293 | |||
294 | static inline void hci_conn_del_timer(struct hci_conn *conn) | ||
295 | { | ||
296 | del_timer(&conn->timer); | ||
297 | } | ||
298 | 300 | ||
299 | static inline void hci_conn_hold(struct hci_conn *conn) | 301 | static inline void hci_conn_hold(struct hci_conn *conn) |
300 | { | 302 | { |
301 | atomic_inc(&conn->refcnt); | 303 | atomic_inc(&conn->refcnt); |
302 | hci_conn_del_timer(conn); | 304 | del_timer(&conn->disc_timer); |
303 | } | 305 | } |
304 | 306 | ||
305 | static inline void hci_conn_put(struct hci_conn *conn) | 307 | static inline void hci_conn_put(struct hci_conn *conn) |
306 | { | 308 | { |
307 | if (atomic_dec_and_test(&conn->refcnt)) { | 309 | if (atomic_dec_and_test(&conn->refcnt)) { |
310 | unsigned long timeo; | ||
308 | if (conn->type == ACL_LINK) { | 311 | if (conn->type == ACL_LINK) { |
309 | unsigned long timeo = (conn->out) ? | 312 | timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT); |
310 | HCI_DISCONN_TIMEOUT : HCI_DISCONN_TIMEOUT * 2; | 313 | if (!conn->out) |
311 | hci_conn_set_timer(conn, timeo); | 314 | timeo *= 2; |
315 | del_timer(&conn->idle_timer); | ||
312 | } else | 316 | } else |
313 | hci_conn_set_timer(conn, HZ / 100); | 317 | timeo = msecs_to_jiffies(10); |
318 | mod_timer(&conn->disc_timer, jiffies + timeo); | ||
314 | } | 319 | } |
315 | } | 320 | } |
316 | 321 | ||
@@ -408,11 +413,13 @@ static inline int hci_recv_frame(struct sk_buff *skb) | |||
408 | int hci_register_sysfs(struct hci_dev *hdev); | 413 | int hci_register_sysfs(struct hci_dev *hdev); |
409 | void hci_unregister_sysfs(struct hci_dev *hdev); | 414 | void hci_unregister_sysfs(struct hci_dev *hdev); |
410 | 415 | ||
411 | #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->class_dev.dev = (pdev)) | 416 | #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev)) |
412 | 417 | ||
413 | /* ----- LMP capabilities ----- */ | 418 | /* ----- LMP capabilities ----- */ |
414 | #define lmp_rswitch_capable(dev) (dev->features[0] & LMP_RSWITCH) | 419 | #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH) |
415 | #define lmp_encrypt_capable(dev) (dev->features[0] & LMP_ENCRYPT) | 420 | #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT) |
421 | #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF) | ||
422 | #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR) | ||
416 | 423 | ||
417 | /* ----- HCI protocols ----- */ | 424 | /* ----- HCI protocols ----- */ |
418 | struct hci_proto { | 425 | struct hci_proto { |
diff --git a/net/atm/clip.c b/net/atm/clip.c index 87a454f5c89c..121bf6f49148 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/if.h> /* for IFF_UP */ | 23 | #include <linux/if.h> /* for IFF_UP */ |
24 | #include <linux/inetdevice.h> | 24 | #include <linux/inetdevice.h> |
25 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
26 | #include <linux/poison.h> | ||
26 | #include <linux/proc_fs.h> | 27 | #include <linux/proc_fs.h> |
27 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
28 | #include <linux/rcupdate.h> | 29 | #include <linux/rcupdate.h> |
@@ -266,7 +267,7 @@ static void clip_neigh_destroy(struct neighbour *neigh) | |||
266 | DPRINTK("clip_neigh_destroy (neigh %p)\n", neigh); | 267 | DPRINTK("clip_neigh_destroy (neigh %p)\n", neigh); |
267 | if (NEIGH2ENTRY(neigh)->vccs) | 268 | if (NEIGH2ENTRY(neigh)->vccs) |
268 | printk(KERN_CRIT "clip_neigh_destroy: vccs != NULL !!!\n"); | 269 | printk(KERN_CRIT "clip_neigh_destroy: vccs != NULL !!!\n"); |
269 | NEIGH2ENTRY(neigh)->vccs = (void *) 0xdeadbeef; | 270 | NEIGH2ENTRY(neigh)->vccs = (void *) NEIGHBOR_DEAD; |
270 | } | 271 | } |
271 | 272 | ||
272 | static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) | 273 | static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) |
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 9be5c15e63d3..136c3aefa9de 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c | |||
@@ -103,11 +103,13 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
103 | { | 103 | { |
104 | struct sk_buff *ourskb; | 104 | struct sk_buff *ourskb; |
105 | unsigned char *bp = skb->data; | 105 | unsigned char *bp = skb->data; |
106 | struct net_device *dev; | 106 | ax25_route *route; |
107 | struct net_device *dev = NULL; | ||
107 | ax25_address *src, *dst; | 108 | ax25_address *src, *dst; |
109 | ax25_digi *digipeat = NULL; | ||
108 | ax25_dev *ax25_dev; | 110 | ax25_dev *ax25_dev; |
109 | ax25_route _route, *route = &_route; | ||
110 | ax25_cb *ax25; | 111 | ax25_cb *ax25; |
112 | char ip_mode = ' '; | ||
111 | 113 | ||
112 | dst = (ax25_address *)(bp + 1); | 114 | dst = (ax25_address *)(bp + 1); |
113 | src = (ax25_address *)(bp + 8); | 115 | src = (ax25_address *)(bp + 8); |
@@ -115,8 +117,12 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
115 | if (arp_find(bp + 1, skb)) | 117 | if (arp_find(bp + 1, skb)) |
116 | return 1; | 118 | return 1; |
117 | 119 | ||
118 | route = ax25_rt_find_route(route, dst, NULL); | 120 | route = ax25_get_route(dst, NULL); |
119 | dev = route->dev; | 121 | if (route) { |
122 | digipeat = route->digipeat; | ||
123 | dev = route->dev; | ||
124 | ip_mode = route->ip_mode; | ||
125 | }; | ||
120 | 126 | ||
121 | if (dev == NULL) | 127 | if (dev == NULL) |
122 | dev = skb->dev; | 128 | dev = skb->dev; |
@@ -126,7 +132,7 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
126 | } | 132 | } |
127 | 133 | ||
128 | if (bp[16] == AX25_P_IP) { | 134 | if (bp[16] == AX25_P_IP) { |
129 | if (route->ip_mode == 'V' || (route->ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) { | 135 | if (ip_mode == 'V' || (ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) { |
130 | /* | 136 | /* |
131 | * We copy the buffer and release the original thereby | 137 | * We copy the buffer and release the original thereby |
132 | * keeping it straight | 138 | * keeping it straight |
@@ -172,7 +178,7 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
172 | ourskb, | 178 | ourskb, |
173 | ax25_dev->values[AX25_VALUES_PACLEN], | 179 | ax25_dev->values[AX25_VALUES_PACLEN], |
174 | &src_c, | 180 | &src_c, |
175 | &dst_c, route->digipeat, dev); | 181 | &dst_c, digipeat, dev); |
176 | if (ax25) { | 182 | if (ax25) { |
177 | ax25_cb_put(ax25); | 183 | ax25_cb_put(ax25); |
178 | } | 184 | } |
@@ -190,7 +196,7 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
190 | 196 | ||
191 | skb_pull(skb, AX25_KISS_HEADER_LEN); | 197 | skb_pull(skb, AX25_KISS_HEADER_LEN); |
192 | 198 | ||
193 | if (route->digipeat != NULL) { | 199 | if (digipeat != NULL) { |
194 | if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) { | 200 | if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) { |
195 | kfree_skb(skb); | 201 | kfree_skb(skb); |
196 | goto put; | 202 | goto put; |
@@ -202,7 +208,8 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
202 | ax25_queue_xmit(skb, dev); | 208 | ax25_queue_xmit(skb, dev); |
203 | 209 | ||
204 | put: | 210 | put: |
205 | ax25_put_route(route); | 211 | if (route) |
212 | ax25_put_route(route); | ||
206 | 213 | ||
207 | return 1; | 214 | return 1; |
208 | } | 215 | } |
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index 5ac98250797b..51b7bdaf27eb 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c | |||
@@ -41,8 +41,6 @@ | |||
41 | static ax25_route *ax25_route_list; | 41 | static ax25_route *ax25_route_list; |
42 | static DEFINE_RWLOCK(ax25_route_lock); | 42 | static DEFINE_RWLOCK(ax25_route_lock); |
43 | 43 | ||
44 | static ax25_route *ax25_get_route(ax25_address *, struct net_device *); | ||
45 | |||
46 | void ax25_rt_device_down(struct net_device *dev) | 44 | void ax25_rt_device_down(struct net_device *dev) |
47 | { | 45 | { |
48 | ax25_route *s, *t, *ax25_rt; | 46 | ax25_route *s, *t, *ax25_rt; |
@@ -115,7 +113,7 @@ static int ax25_rt_add(struct ax25_routes_struct *route) | |||
115 | return -ENOMEM; | 113 | return -ENOMEM; |
116 | } | 114 | } |
117 | 115 | ||
118 | atomic_set(&ax25_rt->ref, 0); | 116 | atomic_set(&ax25_rt->refcount, 1); |
119 | ax25_rt->callsign = route->dest_addr; | 117 | ax25_rt->callsign = route->dest_addr; |
120 | ax25_rt->dev = ax25_dev->dev; | 118 | ax25_rt->dev = ax25_dev->dev; |
121 | ax25_rt->digipeat = NULL; | 119 | ax25_rt->digipeat = NULL; |
@@ -140,23 +138,10 @@ static int ax25_rt_add(struct ax25_routes_struct *route) | |||
140 | return 0; | 138 | return 0; |
141 | } | 139 | } |
142 | 140 | ||
143 | static void ax25_rt_destroy(ax25_route *ax25_rt) | 141 | void __ax25_put_route(ax25_route *ax25_rt) |
144 | { | 142 | { |
145 | if (atomic_read(&ax25_rt->ref) == 0) { | 143 | kfree(ax25_rt->digipeat); |
146 | kfree(ax25_rt->digipeat); | 144 | kfree(ax25_rt); |
147 | kfree(ax25_rt); | ||
148 | return; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * Uh... Route is still in use; we can't yet destroy it. Retry later. | ||
153 | */ | ||
154 | init_timer(&ax25_rt->timer); | ||
155 | ax25_rt->timer.data = (unsigned long) ax25_rt; | ||
156 | ax25_rt->timer.function = (void *) ax25_rt_destroy; | ||
157 | ax25_rt->timer.expires = jiffies + 5 * HZ; | ||
158 | |||
159 | add_timer(&ax25_rt->timer); | ||
160 | } | 145 | } |
161 | 146 | ||
162 | static int ax25_rt_del(struct ax25_routes_struct *route) | 147 | static int ax25_rt_del(struct ax25_routes_struct *route) |
@@ -177,12 +162,12 @@ static int ax25_rt_del(struct ax25_routes_struct *route) | |||
177 | ax25cmp(&route->dest_addr, &s->callsign) == 0) { | 162 | ax25cmp(&route->dest_addr, &s->callsign) == 0) { |
178 | if (ax25_route_list == s) { | 163 | if (ax25_route_list == s) { |
179 | ax25_route_list = s->next; | 164 | ax25_route_list = s->next; |
180 | ax25_rt_destroy(s); | 165 | ax25_put_route(s); |
181 | } else { | 166 | } else { |
182 | for (t = ax25_route_list; t != NULL; t = t->next) { | 167 | for (t = ax25_route_list; t != NULL; t = t->next) { |
183 | if (t->next == s) { | 168 | if (t->next == s) { |
184 | t->next = s->next; | 169 | t->next = s->next; |
185 | ax25_rt_destroy(s); | 170 | ax25_put_route(s); |
186 | break; | 171 | break; |
187 | } | 172 | } |
188 | } | 173 | } |
@@ -362,7 +347,7 @@ struct file_operations ax25_route_fops = { | |||
362 | * | 347 | * |
363 | * Only routes with a reference count of zero can be destroyed. | 348 | * Only routes with a reference count of zero can be destroyed. |
364 | */ | 349 | */ |
365 | static ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) | 350 | ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) |
366 | { | 351 | { |
367 | ax25_route *ax25_spe_rt = NULL; | 352 | ax25_route *ax25_spe_rt = NULL; |
368 | ax25_route *ax25_def_rt = NULL; | 353 | ax25_route *ax25_def_rt = NULL; |
@@ -392,7 +377,7 @@ static ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) | |||
392 | ax25_rt = ax25_spe_rt; | 377 | ax25_rt = ax25_spe_rt; |
393 | 378 | ||
394 | if (ax25_rt != NULL) | 379 | if (ax25_rt != NULL) |
395 | atomic_inc(&ax25_rt->ref); | 380 | ax25_hold_route(ax25_rt); |
396 | 381 | ||
397 | read_unlock(&ax25_route_lock); | 382 | read_unlock(&ax25_route_lock); |
398 | 383 | ||
@@ -467,24 +452,6 @@ put: | |||
467 | return 0; | 452 | return 0; |
468 | } | 453 | } |
469 | 454 | ||
470 | ax25_route *ax25_rt_find_route(ax25_route * route, ax25_address *addr, | ||
471 | struct net_device *dev) | ||
472 | { | ||
473 | ax25_route *ax25_rt; | ||
474 | |||
475 | if ((ax25_rt = ax25_get_route(addr, dev))) | ||
476 | return ax25_rt; | ||
477 | |||
478 | route->next = NULL; | ||
479 | atomic_set(&route->ref, 1); | ||
480 | route->callsign = *addr; | ||
481 | route->dev = dev; | ||
482 | route->digipeat = NULL; | ||
483 | route->ip_mode = ' '; | ||
484 | |||
485 | return route; | ||
486 | } | ||
487 | |||
488 | struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, | 455 | struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, |
489 | ax25_address *dest, ax25_digi *digi) | 456 | ax25_address *dest, ax25_digi *digi) |
490 | { | 457 | { |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 51f867062e1d..788ea7a2b744 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -48,7 +48,7 @@ | |||
48 | #define BT_DBG(D...) | 48 | #define BT_DBG(D...) |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #define VERSION "2.8" | 51 | #define VERSION "2.10" |
52 | 52 | ||
53 | /* Bluetooth sockets */ | 53 | /* Bluetooth sockets */ |
54 | #define BT_MAX_PROTO 8 | 54 | #define BT_MAX_PROTO 8 |
@@ -307,13 +307,21 @@ static struct net_proto_family bt_sock_family_ops = { | |||
307 | 307 | ||
308 | static int __init bt_init(void) | 308 | static int __init bt_init(void) |
309 | { | 309 | { |
310 | int err; | ||
311 | |||
310 | BT_INFO("Core ver %s", VERSION); | 312 | BT_INFO("Core ver %s", VERSION); |
311 | 313 | ||
312 | sock_register(&bt_sock_family_ops); | 314 | err = bt_sysfs_init(); |
315 | if (err < 0) | ||
316 | return err; | ||
313 | 317 | ||
314 | BT_INFO("HCI device and connection manager initialized"); | 318 | err = sock_register(&bt_sock_family_ops); |
319 | if (err < 0) { | ||
320 | bt_sysfs_cleanup(); | ||
321 | return err; | ||
322 | } | ||
315 | 323 | ||
316 | bt_sysfs_init(); | 324 | BT_INFO("HCI device and connection manager initialized"); |
317 | 325 | ||
318 | hci_sock_init(); | 326 | hci_sock_init(); |
319 | 327 | ||
@@ -324,9 +332,9 @@ static void __exit bt_exit(void) | |||
324 | { | 332 | { |
325 | hci_sock_cleanup(); | 333 | hci_sock_cleanup(); |
326 | 334 | ||
327 | bt_sysfs_cleanup(); | ||
328 | |||
329 | sock_unregister(PF_BLUETOOTH); | 335 | sock_unregister(PF_BLUETOOTH); |
336 | |||
337 | bt_sysfs_cleanup(); | ||
330 | } | 338 | } |
331 | 339 | ||
332 | subsys_initcall(bt_init); | 340 | subsys_initcall(bt_init); |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 5c0c2b1ef34a..420ed4d7e57e 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -115,8 +115,8 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle) | |||
115 | 115 | ||
116 | static void hci_conn_timeout(unsigned long arg) | 116 | static void hci_conn_timeout(unsigned long arg) |
117 | { | 117 | { |
118 | struct hci_conn *conn = (void *)arg; | 118 | struct hci_conn *conn = (void *) arg; |
119 | struct hci_dev *hdev = conn->hdev; | 119 | struct hci_dev *hdev = conn->hdev; |
120 | 120 | ||
121 | BT_DBG("conn %p state %d", conn, conn->state); | 121 | BT_DBG("conn %p state %d", conn, conn->state); |
122 | 122 | ||
@@ -132,11 +132,13 @@ static void hci_conn_timeout(unsigned long arg) | |||
132 | return; | 132 | return; |
133 | } | 133 | } |
134 | 134 | ||
135 | static void hci_conn_init_timer(struct hci_conn *conn) | 135 | static void hci_conn_idle(unsigned long arg) |
136 | { | 136 | { |
137 | init_timer(&conn->timer); | 137 | struct hci_conn *conn = (void *) arg; |
138 | conn->timer.function = hci_conn_timeout; | 138 | |
139 | conn->timer.data = (unsigned long)conn; | 139 | BT_DBG("conn %p mode %d", conn, conn->mode); |
140 | |||
141 | hci_conn_enter_sniff_mode(conn); | ||
140 | } | 142 | } |
141 | 143 | ||
142 | struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | 144 | struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) |
@@ -145,17 +147,27 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
145 | 147 | ||
146 | BT_DBG("%s dst %s", hdev->name, batostr(dst)); | 148 | BT_DBG("%s dst %s", hdev->name, batostr(dst)); |
147 | 149 | ||
148 | if (!(conn = kmalloc(sizeof(struct hci_conn), GFP_ATOMIC))) | 150 | conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC); |
151 | if (!conn) | ||
149 | return NULL; | 152 | return NULL; |
150 | memset(conn, 0, sizeof(struct hci_conn)); | ||
151 | 153 | ||
152 | bacpy(&conn->dst, dst); | 154 | bacpy(&conn->dst, dst); |
153 | conn->type = type; | ||
154 | conn->hdev = hdev; | 155 | conn->hdev = hdev; |
156 | conn->type = type; | ||
157 | conn->mode = HCI_CM_ACTIVE; | ||
155 | conn->state = BT_OPEN; | 158 | conn->state = BT_OPEN; |
156 | 159 | ||
160 | conn->power_save = 1; | ||
161 | |||
157 | skb_queue_head_init(&conn->data_q); | 162 | skb_queue_head_init(&conn->data_q); |
158 | hci_conn_init_timer(conn); | 163 | |
164 | init_timer(&conn->disc_timer); | ||
165 | conn->disc_timer.function = hci_conn_timeout; | ||
166 | conn->disc_timer.data = (unsigned long) conn; | ||
167 | |||
168 | init_timer(&conn->idle_timer); | ||
169 | conn->idle_timer.function = hci_conn_idle; | ||
170 | conn->idle_timer.data = (unsigned long) conn; | ||
159 | 171 | ||
160 | atomic_set(&conn->refcnt, 0); | 172 | atomic_set(&conn->refcnt, 0); |
161 | 173 | ||
@@ -178,7 +190,9 @@ int hci_conn_del(struct hci_conn *conn) | |||
178 | 190 | ||
179 | BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); | 191 | BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); |
180 | 192 | ||
181 | hci_conn_del_timer(conn); | 193 | del_timer(&conn->idle_timer); |
194 | |||
195 | del_timer(&conn->disc_timer); | ||
182 | 196 | ||
183 | if (conn->type == SCO_LINK) { | 197 | if (conn->type == SCO_LINK) { |
184 | struct hci_conn *acl = conn->link; | 198 | struct hci_conn *acl = conn->link; |
@@ -364,6 +378,70 @@ int hci_conn_switch_role(struct hci_conn *conn, uint8_t role) | |||
364 | } | 378 | } |
365 | EXPORT_SYMBOL(hci_conn_switch_role); | 379 | EXPORT_SYMBOL(hci_conn_switch_role); |
366 | 380 | ||
381 | /* Enter active mode */ | ||
382 | void hci_conn_enter_active_mode(struct hci_conn *conn) | ||
383 | { | ||
384 | struct hci_dev *hdev = conn->hdev; | ||
385 | |||
386 | BT_DBG("conn %p mode %d", conn, conn->mode); | ||
387 | |||
388 | if (test_bit(HCI_RAW, &hdev->flags)) | ||
389 | return; | ||
390 | |||
391 | if (conn->mode != HCI_CM_SNIFF || !conn->power_save) | ||
392 | goto timer; | ||
393 | |||
394 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | ||
395 | struct hci_cp_exit_sniff_mode cp; | ||
396 | cp.handle = __cpu_to_le16(conn->handle); | ||
397 | hci_send_cmd(hdev, OGF_LINK_POLICY, | ||
398 | OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp); | ||
399 | } | ||
400 | |||
401 | timer: | ||
402 | if (hdev->idle_timeout > 0) | ||
403 | mod_timer(&conn->idle_timer, | ||
404 | jiffies + msecs_to_jiffies(hdev->idle_timeout)); | ||
405 | } | ||
406 | |||
407 | /* Enter sniff mode */ | ||
408 | void hci_conn_enter_sniff_mode(struct hci_conn *conn) | ||
409 | { | ||
410 | struct hci_dev *hdev = conn->hdev; | ||
411 | |||
412 | BT_DBG("conn %p mode %d", conn, conn->mode); | ||
413 | |||
414 | if (test_bit(HCI_RAW, &hdev->flags)) | ||
415 | return; | ||
416 | |||
417 | if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) | ||
418 | return; | ||
419 | |||
420 | if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) | ||
421 | return; | ||
422 | |||
423 | if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { | ||
424 | struct hci_cp_sniff_subrate cp; | ||
425 | cp.handle = __cpu_to_le16(conn->handle); | ||
426 | cp.max_latency = __constant_cpu_to_le16(0); | ||
427 | cp.min_remote_timeout = __constant_cpu_to_le16(0); | ||
428 | cp.min_local_timeout = __constant_cpu_to_le16(0); | ||
429 | hci_send_cmd(hdev, OGF_LINK_POLICY, | ||
430 | OCF_SNIFF_SUBRATE, sizeof(cp), &cp); | ||
431 | } | ||
432 | |||
433 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | ||
434 | struct hci_cp_sniff_mode cp; | ||
435 | cp.handle = __cpu_to_le16(conn->handle); | ||
436 | cp.max_interval = __cpu_to_le16(hdev->sniff_max_interval); | ||
437 | cp.min_interval = __cpu_to_le16(hdev->sniff_min_interval); | ||
438 | cp.attempt = __constant_cpu_to_le16(4); | ||
439 | cp.timeout = __constant_cpu_to_le16(1); | ||
440 | hci_send_cmd(hdev, OGF_LINK_POLICY, | ||
441 | OCF_SNIFF_MODE, sizeof(cp), &cp); | ||
442 | } | ||
443 | } | ||
444 | |||
367 | /* Drop all connection on the device */ | 445 | /* Drop all connection on the device */ |
368 | void hci_conn_hash_flush(struct hci_dev *hdev) | 446 | void hci_conn_hash_flush(struct hci_dev *hdev) |
369 | { | 447 | { |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index f67240beb0dd..54e8e5ea2154 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -411,7 +411,7 @@ int hci_inquiry(void __user *arg) | |||
411 | } | 411 | } |
412 | hci_dev_unlock_bh(hdev); | 412 | hci_dev_unlock_bh(hdev); |
413 | 413 | ||
414 | timeo = ir.length * 2 * HZ; | 414 | timeo = ir.length * msecs_to_jiffies(2000); |
415 | if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) | 415 | if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) |
416 | goto done; | 416 | goto done; |
417 | 417 | ||
@@ -479,7 +479,8 @@ int hci_dev_open(__u16 dev) | |||
479 | set_bit(HCI_INIT, &hdev->flags); | 479 | set_bit(HCI_INIT, &hdev->flags); |
480 | 480 | ||
481 | //__hci_request(hdev, hci_reset_req, 0, HZ); | 481 | //__hci_request(hdev, hci_reset_req, 0, HZ); |
482 | ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT); | 482 | ret = __hci_request(hdev, hci_init_req, 0, |
483 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | ||
483 | 484 | ||
484 | clear_bit(HCI_INIT, &hdev->flags); | 485 | clear_bit(HCI_INIT, &hdev->flags); |
485 | } | 486 | } |
@@ -546,7 +547,8 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
546 | atomic_set(&hdev->cmd_cnt, 1); | 547 | atomic_set(&hdev->cmd_cnt, 1); |
547 | if (!test_bit(HCI_RAW, &hdev->flags)) { | 548 | if (!test_bit(HCI_RAW, &hdev->flags)) { |
548 | set_bit(HCI_INIT, &hdev->flags); | 549 | set_bit(HCI_INIT, &hdev->flags); |
549 | __hci_request(hdev, hci_reset_req, 0, HZ/4); | 550 | __hci_request(hdev, hci_reset_req, 0, |
551 | msecs_to_jiffies(250)); | ||
550 | clear_bit(HCI_INIT, &hdev->flags); | 552 | clear_bit(HCI_INIT, &hdev->flags); |
551 | } | 553 | } |
552 | 554 | ||
@@ -619,7 +621,8 @@ int hci_dev_reset(__u16 dev) | |||
619 | hdev->acl_cnt = 0; hdev->sco_cnt = 0; | 621 | hdev->acl_cnt = 0; hdev->sco_cnt = 0; |
620 | 622 | ||
621 | if (!test_bit(HCI_RAW, &hdev->flags)) | 623 | if (!test_bit(HCI_RAW, &hdev->flags)) |
622 | ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); | 624 | ret = __hci_request(hdev, hci_reset_req, 0, |
625 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | ||
623 | 626 | ||
624 | done: | 627 | done: |
625 | tasklet_enable(&hdev->tx_task); | 628 | tasklet_enable(&hdev->tx_task); |
@@ -657,7 +660,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
657 | 660 | ||
658 | switch (cmd) { | 661 | switch (cmd) { |
659 | case HCISETAUTH: | 662 | case HCISETAUTH: |
660 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT); | 663 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, |
664 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | ||
661 | break; | 665 | break; |
662 | 666 | ||
663 | case HCISETENCRYPT: | 667 | case HCISETENCRYPT: |
@@ -668,18 +672,19 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
668 | 672 | ||
669 | if (!test_bit(HCI_AUTH, &hdev->flags)) { | 673 | if (!test_bit(HCI_AUTH, &hdev->flags)) { |
670 | /* Auth must be enabled first */ | 674 | /* Auth must be enabled first */ |
671 | err = hci_request(hdev, hci_auth_req, | 675 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, |
672 | dr.dev_opt, HCI_INIT_TIMEOUT); | 676 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
673 | if (err) | 677 | if (err) |
674 | break; | 678 | break; |
675 | } | 679 | } |
676 | 680 | ||
677 | err = hci_request(hdev, hci_encrypt_req, | 681 | err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, |
678 | dr.dev_opt, HCI_INIT_TIMEOUT); | 682 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
679 | break; | 683 | break; |
680 | 684 | ||
681 | case HCISETSCAN: | 685 | case HCISETSCAN: |
682 | err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT); | 686 | err = hci_request(hdev, hci_scan_req, dr.dev_opt, |
687 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | ||
683 | break; | 688 | break; |
684 | 689 | ||
685 | case HCISETPTYPE: | 690 | case HCISETPTYPE: |
@@ -812,8 +817,8 @@ void hci_free_dev(struct hci_dev *hdev) | |||
812 | { | 817 | { |
813 | skb_queue_purge(&hdev->driver_init); | 818 | skb_queue_purge(&hdev->driver_init); |
814 | 819 | ||
815 | /* will free via class release */ | 820 | /* will free via device release */ |
816 | class_device_put(&hdev->class_dev); | 821 | put_device(&hdev->dev); |
817 | } | 822 | } |
818 | EXPORT_SYMBOL(hci_free_dev); | 823 | EXPORT_SYMBOL(hci_free_dev); |
819 | 824 | ||
@@ -848,6 +853,10 @@ int hci_register_dev(struct hci_dev *hdev) | |||
848 | hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); | 853 | hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); |
849 | hdev->link_mode = (HCI_LM_ACCEPT); | 854 | hdev->link_mode = (HCI_LM_ACCEPT); |
850 | 855 | ||
856 | hdev->idle_timeout = 0; | ||
857 | hdev->sniff_max_interval = 800; | ||
858 | hdev->sniff_min_interval = 80; | ||
859 | |||
851 | tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); | 860 | tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); |
852 | tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); | 861 | tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); |
853 | tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); | 862 | tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); |
@@ -1220,6 +1229,9 @@ static inline void hci_sched_acl(struct hci_dev *hdev) | |||
1220 | while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { | 1229 | while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { |
1221 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | 1230 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { |
1222 | BT_DBG("skb %p len %d", skb, skb->len); | 1231 | BT_DBG("skb %p len %d", skb, skb->len); |
1232 | |||
1233 | hci_conn_enter_active_mode(conn); | ||
1234 | |||
1223 | hci_send_frame(skb); | 1235 | hci_send_frame(skb); |
1224 | hdev->acl_last_tx = jiffies; | 1236 | hdev->acl_last_tx = jiffies; |
1225 | 1237 | ||
@@ -1298,6 +1310,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
1298 | if (conn) { | 1310 | if (conn) { |
1299 | register struct hci_proto *hp; | 1311 | register struct hci_proto *hp; |
1300 | 1312 | ||
1313 | hci_conn_enter_active_mode(conn); | ||
1314 | |||
1301 | /* Send to upper protocol */ | 1315 | /* Send to upper protocol */ |
1302 | if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { | 1316 | if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { |
1303 | hp->recv_acldata(conn, skb, flags); | 1317 | hp->recv_acldata(conn, skb, flags); |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 618bacee1b1c..3896dabab11d 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -83,6 +83,8 @@ static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff * | |||
83 | { | 83 | { |
84 | struct hci_conn *conn; | 84 | struct hci_conn *conn; |
85 | struct hci_rp_role_discovery *rd; | 85 | struct hci_rp_role_discovery *rd; |
86 | struct hci_rp_write_link_policy *lp; | ||
87 | void *sent; | ||
86 | 88 | ||
87 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | 89 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); |
88 | 90 | ||
@@ -106,6 +108,27 @@ static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff * | |||
106 | hci_dev_unlock(hdev); | 108 | hci_dev_unlock(hdev); |
107 | break; | 109 | break; |
108 | 110 | ||
111 | case OCF_WRITE_LINK_POLICY: | ||
112 | sent = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY); | ||
113 | if (!sent) | ||
114 | break; | ||
115 | |||
116 | lp = (struct hci_rp_write_link_policy *) skb->data; | ||
117 | |||
118 | if (lp->status) | ||
119 | break; | ||
120 | |||
121 | hci_dev_lock(hdev); | ||
122 | |||
123 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(lp->handle)); | ||
124 | if (conn) { | ||
125 | __le16 policy = get_unaligned((__le16 *) (sent + 2)); | ||
126 | conn->link_policy = __le16_to_cpu(policy); | ||
127 | } | ||
128 | |||
129 | hci_dev_unlock(hdev); | ||
130 | break; | ||
131 | |||
109 | default: | 132 | default: |
110 | BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x", | 133 | BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x", |
111 | hdev->name, ocf); | 134 | hdev->name, ocf); |
@@ -274,7 +297,7 @@ static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb | |||
274 | /* Command Complete OGF INFO_PARAM */ | 297 | /* Command Complete OGF INFO_PARAM */ |
275 | static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) | 298 | static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) |
276 | { | 299 | { |
277 | struct hci_rp_read_loc_features *lf; | 300 | struct hci_rp_read_local_features *lf; |
278 | struct hci_rp_read_buffer_size *bs; | 301 | struct hci_rp_read_buffer_size *bs; |
279 | struct hci_rp_read_bd_addr *ba; | 302 | struct hci_rp_read_bd_addr *ba; |
280 | 303 | ||
@@ -282,7 +305,7 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s | |||
282 | 305 | ||
283 | switch (ocf) { | 306 | switch (ocf) { |
284 | case OCF_READ_LOCAL_FEATURES: | 307 | case OCF_READ_LOCAL_FEATURES: |
285 | lf = (struct hci_rp_read_loc_features *) skb->data; | 308 | lf = (struct hci_rp_read_local_features *) skb->data; |
286 | 309 | ||
287 | if (lf->status) { | 310 | if (lf->status) { |
288 | BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status); | 311 | BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status); |
@@ -319,9 +342,17 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s | |||
319 | } | 342 | } |
320 | 343 | ||
321 | hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu); | 344 | hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu); |
322 | hdev->sco_mtu = bs->sco_mtu ? bs->sco_mtu : 64; | 345 | hdev->sco_mtu = bs->sco_mtu; |
323 | hdev->acl_pkts = hdev->acl_cnt = __le16_to_cpu(bs->acl_max_pkt); | 346 | hdev->acl_pkts = __le16_to_cpu(bs->acl_max_pkt); |
324 | hdev->sco_pkts = hdev->sco_cnt = __le16_to_cpu(bs->sco_max_pkt); | 347 | hdev->sco_pkts = __le16_to_cpu(bs->sco_max_pkt); |
348 | |||
349 | if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { | ||
350 | hdev->sco_mtu = 64; | ||
351 | hdev->sco_pkts = 8; | ||
352 | } | ||
353 | |||
354 | hdev->acl_cnt = hdev->acl_pkts; | ||
355 | hdev->sco_cnt = hdev->sco_pkts; | ||
325 | 356 | ||
326 | BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name, | 357 | BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name, |
327 | hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts); | 358 | hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts); |
@@ -439,8 +470,46 @@ static void hci_cs_link_policy(struct hci_dev *hdev, __u16 ocf, __u8 status) | |||
439 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); | 470 | BT_DBG("%s ocf 0x%x", hdev->name, ocf); |
440 | 471 | ||
441 | switch (ocf) { | 472 | switch (ocf) { |
473 | case OCF_SNIFF_MODE: | ||
474 | if (status) { | ||
475 | struct hci_conn *conn; | ||
476 | struct hci_cp_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_SNIFF_MODE); | ||
477 | |||
478 | if (!cp) | ||
479 | break; | ||
480 | |||
481 | hci_dev_lock(hdev); | ||
482 | |||
483 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); | ||
484 | if (conn) { | ||
485 | clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); | ||
486 | } | ||
487 | |||
488 | hci_dev_unlock(hdev); | ||
489 | } | ||
490 | break; | ||
491 | |||
492 | case OCF_EXIT_SNIFF_MODE: | ||
493 | if (status) { | ||
494 | struct hci_conn *conn; | ||
495 | struct hci_cp_exit_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_EXIT_SNIFF_MODE); | ||
496 | |||
497 | if (!cp) | ||
498 | break; | ||
499 | |||
500 | hci_dev_lock(hdev); | ||
501 | |||
502 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); | ||
503 | if (conn) { | ||
504 | clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); | ||
505 | } | ||
506 | |||
507 | hci_dev_unlock(hdev); | ||
508 | } | ||
509 | break; | ||
510 | |||
442 | default: | 511 | default: |
443 | BT_DBG("%s Command status: ogf HOST_POLICY ocf %x", hdev->name, ocf); | 512 | BT_DBG("%s Command status: ogf LINK_POLICY ocf %x", hdev->name, ocf); |
444 | break; | 513 | break; |
445 | } | 514 | } |
446 | } | 515 | } |
@@ -622,14 +691,16 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
622 | else | 691 | else |
623 | cp.role = 0x01; /* Remain slave */ | 692 | cp.role = 0x01; /* Remain slave */ |
624 | 693 | ||
625 | hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ACCEPT_CONN_REQ, sizeof(cp), &cp); | 694 | hci_send_cmd(hdev, OGF_LINK_CTL, |
695 | OCF_ACCEPT_CONN_REQ, sizeof(cp), &cp); | ||
626 | } else { | 696 | } else { |
627 | /* Connection rejected */ | 697 | /* Connection rejected */ |
628 | struct hci_cp_reject_conn_req cp; | 698 | struct hci_cp_reject_conn_req cp; |
629 | 699 | ||
630 | bacpy(&cp.bdaddr, &ev->bdaddr); | 700 | bacpy(&cp.bdaddr, &ev->bdaddr); |
631 | cp.reason = 0x0f; | 701 | cp.reason = 0x0f; |
632 | hci_send_cmd(hdev, OGF_LINK_CTL, OCF_REJECT_CONN_REQ, sizeof(cp), &cp); | 702 | hci_send_cmd(hdev, OGF_LINK_CTL, |
703 | OCF_REJECT_CONN_REQ, sizeof(cp), &cp); | ||
633 | } | 704 | } |
634 | } | 705 | } |
635 | 706 | ||
@@ -637,7 +708,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
637 | static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 708 | static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
638 | { | 709 | { |
639 | struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data; | 710 | struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data; |
640 | struct hci_conn *conn = NULL; | 711 | struct hci_conn *conn; |
641 | 712 | ||
642 | BT_DBG("%s", hdev->name); | 713 | BT_DBG("%s", hdev->name); |
643 | 714 | ||
@@ -659,12 +730,21 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
659 | if (test_bit(HCI_ENCRYPT, &hdev->flags)) | 730 | if (test_bit(HCI_ENCRYPT, &hdev->flags)) |
660 | conn->link_mode |= HCI_LM_ENCRYPT; | 731 | conn->link_mode |= HCI_LM_ENCRYPT; |
661 | 732 | ||
733 | /* Get remote features */ | ||
734 | if (conn->type == ACL_LINK) { | ||
735 | struct hci_cp_read_remote_features cp; | ||
736 | cp.handle = ev->handle; | ||
737 | hci_send_cmd(hdev, OGF_LINK_CTL, | ||
738 | OCF_READ_REMOTE_FEATURES, sizeof(cp), &cp); | ||
739 | } | ||
740 | |||
662 | /* Set link policy */ | 741 | /* Set link policy */ |
663 | if (conn->type == ACL_LINK && hdev->link_policy) { | 742 | if (conn->type == ACL_LINK && hdev->link_policy) { |
664 | struct hci_cp_write_link_policy cp; | 743 | struct hci_cp_write_link_policy cp; |
665 | cp.handle = ev->handle; | 744 | cp.handle = ev->handle; |
666 | cp.policy = __cpu_to_le16(hdev->link_policy); | 745 | cp.policy = __cpu_to_le16(hdev->link_policy); |
667 | hci_send_cmd(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY, sizeof(cp), &cp); | 746 | hci_send_cmd(hdev, OGF_LINK_POLICY, |
747 | OCF_WRITE_LINK_POLICY, sizeof(cp), &cp); | ||
668 | } | 748 | } |
669 | 749 | ||
670 | /* Set packet type for incoming connection */ | 750 | /* Set packet type for incoming connection */ |
@@ -675,7 +755,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
675 | __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): | 755 | __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): |
676 | __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); | 756 | __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); |
677 | 757 | ||
678 | hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp); | 758 | hci_send_cmd(hdev, OGF_LINK_CTL, |
759 | OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp); | ||
679 | } | 760 | } |
680 | } else | 761 | } else |
681 | conn->state = BT_CLOSED; | 762 | conn->state = BT_CLOSED; |
@@ -703,8 +784,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
703 | static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 784 | static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
704 | { | 785 | { |
705 | struct hci_ev_disconn_complete *ev = (struct hci_ev_disconn_complete *) skb->data; | 786 | struct hci_ev_disconn_complete *ev = (struct hci_ev_disconn_complete *) skb->data; |
706 | struct hci_conn *conn = NULL; | 787 | struct hci_conn *conn; |
707 | __u16 handle = __le16_to_cpu(ev->handle); | ||
708 | 788 | ||
709 | BT_DBG("%s status %d", hdev->name, ev->status); | 789 | BT_DBG("%s status %d", hdev->name, ev->status); |
710 | 790 | ||
@@ -713,7 +793,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff | |||
713 | 793 | ||
714 | hci_dev_lock(hdev); | 794 | hci_dev_lock(hdev); |
715 | 795 | ||
716 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 796 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
717 | if (conn) { | 797 | if (conn) { |
718 | conn->state = BT_CLOSED; | 798 | conn->state = BT_CLOSED; |
719 | hci_proto_disconn_ind(conn, ev->reason); | 799 | hci_proto_disconn_ind(conn, ev->reason); |
@@ -770,7 +850,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s | |||
770 | static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | 850 | static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) |
771 | { | 851 | { |
772 | struct hci_ev_role_change *ev = (struct hci_ev_role_change *) skb->data; | 852 | struct hci_ev_role_change *ev = (struct hci_ev_role_change *) skb->data; |
773 | struct hci_conn *conn = NULL; | 853 | struct hci_conn *conn; |
774 | 854 | ||
775 | BT_DBG("%s status %d", hdev->name, ev->status); | 855 | BT_DBG("%s status %d", hdev->name, ev->status); |
776 | 856 | ||
@@ -793,18 +873,43 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb | |||
793 | hci_dev_unlock(hdev); | 873 | hci_dev_unlock(hdev); |
794 | } | 874 | } |
795 | 875 | ||
876 | /* Mode Change */ | ||
877 | static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
878 | { | ||
879 | struct hci_ev_mode_change *ev = (struct hci_ev_mode_change *) skb->data; | ||
880 | struct hci_conn *conn; | ||
881 | |||
882 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
883 | |||
884 | hci_dev_lock(hdev); | ||
885 | |||
886 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
887 | if (conn) { | ||
888 | conn->mode = ev->mode; | ||
889 | conn->interval = __le16_to_cpu(ev->interval); | ||
890 | |||
891 | if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | ||
892 | if (conn->mode == HCI_CM_ACTIVE) | ||
893 | conn->power_save = 1; | ||
894 | else | ||
895 | conn->power_save = 0; | ||
896 | } | ||
897 | } | ||
898 | |||
899 | hci_dev_unlock(hdev); | ||
900 | } | ||
901 | |||
796 | /* Authentication Complete */ | 902 | /* Authentication Complete */ |
797 | static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 903 | static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
798 | { | 904 | { |
799 | struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data; | 905 | struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data; |
800 | struct hci_conn *conn = NULL; | 906 | struct hci_conn *conn; |
801 | __u16 handle = __le16_to_cpu(ev->handle); | ||
802 | 907 | ||
803 | BT_DBG("%s status %d", hdev->name, ev->status); | 908 | BT_DBG("%s status %d", hdev->name, ev->status); |
804 | 909 | ||
805 | hci_dev_lock(hdev); | 910 | hci_dev_lock(hdev); |
806 | 911 | ||
807 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 912 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
808 | if (conn) { | 913 | if (conn) { |
809 | if (!ev->status) | 914 | if (!ev->status) |
810 | conn->link_mode |= HCI_LM_AUTH; | 915 | conn->link_mode |= HCI_LM_AUTH; |
@@ -819,8 +924,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
819 | cp.handle = __cpu_to_le16(conn->handle); | 924 | cp.handle = __cpu_to_le16(conn->handle); |
820 | cp.encrypt = 1; | 925 | cp.encrypt = 1; |
821 | hci_send_cmd(conn->hdev, OGF_LINK_CTL, | 926 | hci_send_cmd(conn->hdev, OGF_LINK_CTL, |
822 | OCF_SET_CONN_ENCRYPT, | 927 | OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); |
823 | sizeof(cp), &cp); | ||
824 | } else { | 928 | } else { |
825 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); | 929 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); |
826 | hci_encrypt_cfm(conn, ev->status, 0x00); | 930 | hci_encrypt_cfm(conn, ev->status, 0x00); |
@@ -835,14 +939,13 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
835 | static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | 939 | static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) |
836 | { | 940 | { |
837 | struct hci_ev_encrypt_change *ev = (struct hci_ev_encrypt_change *) skb->data; | 941 | struct hci_ev_encrypt_change *ev = (struct hci_ev_encrypt_change *) skb->data; |
838 | struct hci_conn *conn = NULL; | 942 | struct hci_conn *conn; |
839 | __u16 handle = __le16_to_cpu(ev->handle); | ||
840 | 943 | ||
841 | BT_DBG("%s status %d", hdev->name, ev->status); | 944 | BT_DBG("%s status %d", hdev->name, ev->status); |
842 | 945 | ||
843 | hci_dev_lock(hdev); | 946 | hci_dev_lock(hdev); |
844 | 947 | ||
845 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 948 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
846 | if (conn) { | 949 | if (conn) { |
847 | if (!ev->status) { | 950 | if (!ev->status) { |
848 | if (ev->encrypt) | 951 | if (ev->encrypt) |
@@ -863,14 +966,13 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff * | |||
863 | static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 966 | static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
864 | { | 967 | { |
865 | struct hci_ev_change_conn_link_key_complete *ev = (struct hci_ev_change_conn_link_key_complete *) skb->data; | 968 | struct hci_ev_change_conn_link_key_complete *ev = (struct hci_ev_change_conn_link_key_complete *) skb->data; |
866 | struct hci_conn *conn = NULL; | 969 | struct hci_conn *conn; |
867 | __u16 handle = __le16_to_cpu(ev->handle); | ||
868 | 970 | ||
869 | BT_DBG("%s status %d", hdev->name, ev->status); | 971 | BT_DBG("%s status %d", hdev->name, ev->status); |
870 | 972 | ||
871 | hci_dev_lock(hdev); | 973 | hci_dev_lock(hdev); |
872 | 974 | ||
873 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 975 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
874 | if (conn) { | 976 | if (conn) { |
875 | if (!ev->status) | 977 | if (!ev->status) |
876 | conn->link_mode |= HCI_LM_SECURE; | 978 | conn->link_mode |= HCI_LM_SECURE; |
@@ -898,18 +1000,35 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff | |||
898 | { | 1000 | { |
899 | } | 1001 | } |
900 | 1002 | ||
1003 | /* Remote Features */ | ||
1004 | static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
1005 | { | ||
1006 | struct hci_ev_remote_features *ev = (struct hci_ev_remote_features *) skb->data; | ||
1007 | struct hci_conn *conn; | ||
1008 | |||
1009 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
1010 | |||
1011 | hci_dev_lock(hdev); | ||
1012 | |||
1013 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
1014 | if (conn && !ev->status) { | ||
1015 | memcpy(conn->features, ev->features, sizeof(conn->features)); | ||
1016 | } | ||
1017 | |||
1018 | hci_dev_unlock(hdev); | ||
1019 | } | ||
1020 | |||
901 | /* Clock Offset */ | 1021 | /* Clock Offset */ |
902 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1022 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) |
903 | { | 1023 | { |
904 | struct hci_ev_clock_offset *ev = (struct hci_ev_clock_offset *) skb->data; | 1024 | struct hci_ev_clock_offset *ev = (struct hci_ev_clock_offset *) skb->data; |
905 | struct hci_conn *conn = NULL; | 1025 | struct hci_conn *conn; |
906 | __u16 handle = __le16_to_cpu(ev->handle); | ||
907 | 1026 | ||
908 | BT_DBG("%s status %d", hdev->name, ev->status); | 1027 | BT_DBG("%s status %d", hdev->name, ev->status); |
909 | 1028 | ||
910 | hci_dev_lock(hdev); | 1029 | hci_dev_lock(hdev); |
911 | 1030 | ||
912 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 1031 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
913 | if (conn && !ev->status) { | 1032 | if (conn && !ev->status) { |
914 | struct inquiry_entry *ie; | 1033 | struct inquiry_entry *ie; |
915 | 1034 | ||
@@ -940,6 +1059,23 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff * | |||
940 | hci_dev_unlock(hdev); | 1059 | hci_dev_unlock(hdev); |
941 | } | 1060 | } |
942 | 1061 | ||
1062 | /* Sniff Subrate */ | ||
1063 | static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
1064 | { | ||
1065 | struct hci_ev_sniff_subrate *ev = (struct hci_ev_sniff_subrate *) skb->data; | ||
1066 | struct hci_conn *conn; | ||
1067 | |||
1068 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
1069 | |||
1070 | hci_dev_lock(hdev); | ||
1071 | |||
1072 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
1073 | if (conn) { | ||
1074 | } | ||
1075 | |||
1076 | hci_dev_unlock(hdev); | ||
1077 | } | ||
1078 | |||
943 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | 1079 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) |
944 | { | 1080 | { |
945 | struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data; | 1081 | struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data; |
@@ -988,6 +1124,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
988 | hci_role_change_evt(hdev, skb); | 1124 | hci_role_change_evt(hdev, skb); |
989 | break; | 1125 | break; |
990 | 1126 | ||
1127 | case HCI_EV_MODE_CHANGE: | ||
1128 | hci_mode_change_evt(hdev, skb); | ||
1129 | break; | ||
1130 | |||
991 | case HCI_EV_AUTH_COMPLETE: | 1131 | case HCI_EV_AUTH_COMPLETE: |
992 | hci_auth_complete_evt(hdev, skb); | 1132 | hci_auth_complete_evt(hdev, skb); |
993 | break; | 1133 | break; |
@@ -1012,6 +1152,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
1012 | hci_link_key_notify_evt(hdev, skb); | 1152 | hci_link_key_notify_evt(hdev, skb); |
1013 | break; | 1153 | break; |
1014 | 1154 | ||
1155 | case HCI_EV_REMOTE_FEATURES: | ||
1156 | hci_remote_features_evt(hdev, skb); | ||
1157 | break; | ||
1158 | |||
1015 | case HCI_EV_CLOCK_OFFSET: | 1159 | case HCI_EV_CLOCK_OFFSET: |
1016 | hci_clock_offset_evt(hdev, skb); | 1160 | hci_clock_offset_evt(hdev, skb); |
1017 | break; | 1161 | break; |
@@ -1020,6 +1164,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
1020 | hci_pscan_rep_mode_evt(hdev, skb); | 1164 | hci_pscan_rep_mode_evt(hdev, skb); |
1021 | break; | 1165 | break; |
1022 | 1166 | ||
1167 | case HCI_EV_SNIFF_SUBRATE: | ||
1168 | hci_sniff_subrate_evt(hdev, skb); | ||
1169 | break; | ||
1170 | |||
1023 | case HCI_EV_CMD_STATUS: | 1171 | case HCI_EV_CMD_STATUS: |
1024 | cs = (struct hci_ev_cmd_status *) skb->data; | 1172 | cs = (struct hci_ev_cmd_status *) skb->data; |
1025 | skb_pull(skb, sizeof(cs)); | 1173 | skb_pull(skb, sizeof(cs)); |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 19b234c86f33..3987d167f04e 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -3,6 +3,8 @@ | |||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | 5 | ||
6 | #include <linux/platform_device.h> | ||
7 | |||
6 | #include <net/bluetooth/bluetooth.h> | 8 | #include <net/bluetooth/bluetooth.h> |
7 | #include <net/bluetooth/hci_core.h> | 9 | #include <net/bluetooth/hci_core.h> |
8 | 10 | ||
@@ -11,35 +13,35 @@ | |||
11 | #define BT_DBG(D...) | 13 | #define BT_DBG(D...) |
12 | #endif | 14 | #endif |
13 | 15 | ||
14 | static ssize_t show_name(struct class_device *cdev, char *buf) | 16 | static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) |
15 | { | 17 | { |
16 | struct hci_dev *hdev = class_get_devdata(cdev); | 18 | struct hci_dev *hdev = dev_get_drvdata(dev); |
17 | return sprintf(buf, "%s\n", hdev->name); | 19 | return sprintf(buf, "%s\n", hdev->name); |
18 | } | 20 | } |
19 | 21 | ||
20 | static ssize_t show_type(struct class_device *cdev, char *buf) | 22 | static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) |
21 | { | 23 | { |
22 | struct hci_dev *hdev = class_get_devdata(cdev); | 24 | struct hci_dev *hdev = dev_get_drvdata(dev); |
23 | return sprintf(buf, "%d\n", hdev->type); | 25 | return sprintf(buf, "%d\n", hdev->type); |
24 | } | 26 | } |
25 | 27 | ||
26 | static ssize_t show_address(struct class_device *cdev, char *buf) | 28 | static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) |
27 | { | 29 | { |
28 | struct hci_dev *hdev = class_get_devdata(cdev); | 30 | struct hci_dev *hdev = dev_get_drvdata(dev); |
29 | bdaddr_t bdaddr; | 31 | bdaddr_t bdaddr; |
30 | baswap(&bdaddr, &hdev->bdaddr); | 32 | baswap(&bdaddr, &hdev->bdaddr); |
31 | return sprintf(buf, "%s\n", batostr(&bdaddr)); | 33 | return sprintf(buf, "%s\n", batostr(&bdaddr)); |
32 | } | 34 | } |
33 | 35 | ||
34 | static ssize_t show_flags(struct class_device *cdev, char *buf) | 36 | static ssize_t show_flags(struct device *dev, struct device_attribute *attr, char *buf) |
35 | { | 37 | { |
36 | struct hci_dev *hdev = class_get_devdata(cdev); | 38 | struct hci_dev *hdev = dev_get_drvdata(dev); |
37 | return sprintf(buf, "0x%lx\n", hdev->flags); | 39 | return sprintf(buf, "0x%lx\n", hdev->flags); |
38 | } | 40 | } |
39 | 41 | ||
40 | static ssize_t show_inquiry_cache(struct class_device *cdev, char *buf) | 42 | static ssize_t show_inquiry_cache(struct device *dev, struct device_attribute *attr, char *buf) |
41 | { | 43 | { |
42 | struct hci_dev *hdev = class_get_devdata(cdev); | 44 | struct hci_dev *hdev = dev_get_drvdata(dev); |
43 | struct inquiry_cache *cache = &hdev->inq_cache; | 45 | struct inquiry_cache *cache = &hdev->inq_cache; |
44 | struct inquiry_entry *e; | 46 | struct inquiry_entry *e; |
45 | int n = 0; | 47 | int n = 0; |
@@ -61,94 +63,193 @@ static ssize_t show_inquiry_cache(struct class_device *cdev, char *buf) | |||
61 | return n; | 63 | return n; |
62 | } | 64 | } |
63 | 65 | ||
64 | static CLASS_DEVICE_ATTR(name, S_IRUGO, show_name, NULL); | 66 | static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) |
65 | static CLASS_DEVICE_ATTR(type, S_IRUGO, show_type, NULL); | 67 | { |
66 | static CLASS_DEVICE_ATTR(address, S_IRUGO, show_address, NULL); | 68 | struct hci_dev *hdev = dev_get_drvdata(dev); |
67 | static CLASS_DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL); | 69 | return sprintf(buf, "%d\n", hdev->idle_timeout); |
68 | static CLASS_DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL); | 70 | } |
69 | 71 | ||
70 | static struct class_device_attribute *bt_attrs[] = { | 72 | static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
71 | &class_device_attr_name, | 73 | { |
72 | &class_device_attr_type, | 74 | struct hci_dev *hdev = dev_get_drvdata(dev); |
73 | &class_device_attr_address, | 75 | char *ptr; |
74 | &class_device_attr_flags, | 76 | __u32 val; |
75 | &class_device_attr_inquiry_cache, | 77 | |
76 | NULL | 78 | val = simple_strtoul(buf, &ptr, 10); |
77 | }; | 79 | if (ptr == buf) |
80 | return -EINVAL; | ||
78 | 81 | ||
79 | #ifdef CONFIG_HOTPLUG | 82 | if (val != 0 && (val < 500 || val > 3600000)) |
80 | static int bt_uevent(struct class_device *cdev, char **envp, int num_envp, char *buf, int size) | 83 | return -EINVAL; |
84 | |||
85 | hdev->idle_timeout = val; | ||
86 | |||
87 | return count; | ||
88 | } | ||
89 | |||
90 | static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) | ||
81 | { | 91 | { |
82 | struct hci_dev *hdev = class_get_devdata(cdev); | 92 | struct hci_dev *hdev = dev_get_drvdata(dev); |
83 | int n, i = 0; | 93 | return sprintf(buf, "%d\n", hdev->sniff_max_interval); |
94 | } | ||
84 | 95 | ||
85 | envp[i++] = buf; | 96 | static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
86 | n = snprintf(buf, size, "INTERFACE=%s", hdev->name) + 1; | 97 | { |
87 | buf += n; | 98 | struct hci_dev *hdev = dev_get_drvdata(dev); |
88 | size -= n; | 99 | char *ptr; |
100 | __u16 val; | ||
89 | 101 | ||
90 | if ((size <= 0) || (i >= num_envp)) | 102 | val = simple_strtoul(buf, &ptr, 10); |
91 | return -ENOMEM; | 103 | if (ptr == buf) |
104 | return -EINVAL; | ||
92 | 105 | ||
93 | envp[i] = NULL; | 106 | if (val < 0x0002 || val > 0xFFFE || val % 2) |
94 | return 0; | 107 | return -EINVAL; |
108 | |||
109 | if (val < hdev->sniff_min_interval) | ||
110 | return -EINVAL; | ||
111 | |||
112 | hdev->sniff_max_interval = val; | ||
113 | |||
114 | return count; | ||
115 | } | ||
116 | |||
117 | static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) | ||
118 | { | ||
119 | struct hci_dev *hdev = dev_get_drvdata(dev); | ||
120 | return sprintf(buf, "%d\n", hdev->sniff_min_interval); | ||
95 | } | 121 | } |
96 | #endif | ||
97 | 122 | ||
98 | static void bt_release(struct class_device *cdev) | 123 | static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
99 | { | 124 | { |
100 | struct hci_dev *hdev = class_get_devdata(cdev); | 125 | struct hci_dev *hdev = dev_get_drvdata(dev); |
126 | char *ptr; | ||
127 | __u16 val; | ||
101 | 128 | ||
102 | kfree(hdev); | 129 | val = simple_strtoul(buf, &ptr, 10); |
130 | if (ptr == buf) | ||
131 | return -EINVAL; | ||
132 | |||
133 | if (val < 0x0002 || val > 0xFFFE || val % 2) | ||
134 | return -EINVAL; | ||
135 | |||
136 | if (val > hdev->sniff_max_interval) | ||
137 | return -EINVAL; | ||
138 | |||
139 | hdev->sniff_min_interval = val; | ||
140 | |||
141 | return count; | ||
103 | } | 142 | } |
104 | 143 | ||
105 | struct class bt_class = { | 144 | static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); |
106 | .name = "bluetooth", | 145 | static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); |
107 | .release = bt_release, | 146 | static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); |
108 | #ifdef CONFIG_HOTPLUG | 147 | static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL); |
109 | .uevent = bt_uevent, | 148 | static DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL); |
110 | #endif | 149 | |
150 | static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, | ||
151 | show_idle_timeout, store_idle_timeout); | ||
152 | static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, | ||
153 | show_sniff_max_interval, store_sniff_max_interval); | ||
154 | static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, | ||
155 | show_sniff_min_interval, store_sniff_min_interval); | ||
156 | |||
157 | static struct device_attribute *bt_attrs[] = { | ||
158 | &dev_attr_name, | ||
159 | &dev_attr_type, | ||
160 | &dev_attr_address, | ||
161 | &dev_attr_flags, | ||
162 | &dev_attr_inquiry_cache, | ||
163 | &dev_attr_idle_timeout, | ||
164 | &dev_attr_sniff_max_interval, | ||
165 | &dev_attr_sniff_min_interval, | ||
166 | NULL | ||
111 | }; | 167 | }; |
112 | 168 | ||
169 | struct class *bt_class = NULL; | ||
113 | EXPORT_SYMBOL_GPL(bt_class); | 170 | EXPORT_SYMBOL_GPL(bt_class); |
114 | 171 | ||
172 | static struct bus_type bt_bus = { | ||
173 | .name = "bluetooth", | ||
174 | }; | ||
175 | |||
176 | static struct platform_device *bt_platform; | ||
177 | |||
178 | static void bt_release(struct device *dev) | ||
179 | { | ||
180 | struct hci_dev *hdev = dev_get_drvdata(dev); | ||
181 | kfree(hdev); | ||
182 | } | ||
183 | |||
115 | int hci_register_sysfs(struct hci_dev *hdev) | 184 | int hci_register_sysfs(struct hci_dev *hdev) |
116 | { | 185 | { |
117 | struct class_device *cdev = &hdev->class_dev; | 186 | struct device *dev = &hdev->dev; |
118 | unsigned int i; | 187 | unsigned int i; |
119 | int err; | 188 | int err; |
120 | 189 | ||
121 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); | 190 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); |
122 | 191 | ||
123 | cdev->class = &bt_class; | 192 | dev->class = bt_class; |
124 | class_set_devdata(cdev, hdev); | 193 | |
194 | if (hdev->parent) | ||
195 | dev->parent = hdev->parent; | ||
196 | else | ||
197 | dev->parent = &bt_platform->dev; | ||
198 | |||
199 | strlcpy(dev->bus_id, hdev->name, BUS_ID_SIZE); | ||
200 | |||
201 | dev->release = bt_release; | ||
125 | 202 | ||
126 | strlcpy(cdev->class_id, hdev->name, BUS_ID_SIZE); | 203 | dev_set_drvdata(dev, hdev); |
127 | err = class_device_register(cdev); | 204 | |
205 | err = device_register(dev); | ||
128 | if (err < 0) | 206 | if (err < 0) |
129 | return err; | 207 | return err; |
130 | 208 | ||
131 | for (i = 0; bt_attrs[i]; i++) | 209 | for (i = 0; bt_attrs[i]; i++) |
132 | class_device_create_file(cdev, bt_attrs[i]); | 210 | device_create_file(dev, bt_attrs[i]); |
133 | 211 | ||
134 | return 0; | 212 | return 0; |
135 | } | 213 | } |
136 | 214 | ||
137 | void hci_unregister_sysfs(struct hci_dev *hdev) | 215 | void hci_unregister_sysfs(struct hci_dev *hdev) |
138 | { | 216 | { |
139 | struct class_device * cdev = &hdev->class_dev; | 217 | struct device *dev = &hdev->dev; |
140 | 218 | ||
141 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); | 219 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); |
142 | 220 | ||
143 | class_device_del(cdev); | 221 | device_del(dev); |
144 | } | 222 | } |
145 | 223 | ||
146 | int __init bt_sysfs_init(void) | 224 | int __init bt_sysfs_init(void) |
147 | { | 225 | { |
148 | return class_register(&bt_class); | 226 | int err; |
227 | |||
228 | bt_platform = platform_device_register_simple("bluetooth", -1, NULL, 0); | ||
229 | if (IS_ERR(bt_platform)) | ||
230 | return PTR_ERR(bt_platform); | ||
231 | |||
232 | err = bus_register(&bt_bus); | ||
233 | if (err < 0) { | ||
234 | platform_device_unregister(bt_platform); | ||
235 | return err; | ||
236 | } | ||
237 | |||
238 | bt_class = class_create(THIS_MODULE, "bluetooth"); | ||
239 | if (IS_ERR(bt_class)) { | ||
240 | bus_unregister(&bt_bus); | ||
241 | platform_device_unregister(bt_platform); | ||
242 | return PTR_ERR(bt_class); | ||
243 | } | ||
244 | |||
245 | return 0; | ||
149 | } | 246 | } |
150 | 247 | ||
151 | void __exit bt_sysfs_cleanup(void) | 248 | void __exit bt_sysfs_cleanup(void) |
152 | { | 249 | { |
153 | class_unregister(&bt_class); | 250 | class_destroy(bt_class); |
251 | |||
252 | bus_unregister(&bt_bus); | ||
253 | |||
254 | platform_device_unregister(bt_platform); | ||
154 | } | 255 | } |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 770101177da1..eaaad658d11d 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -63,11 +63,6 @@ static struct bt_sock_list l2cap_sk_list = { | |||
63 | .lock = RW_LOCK_UNLOCKED | 63 | .lock = RW_LOCK_UNLOCKED |
64 | }; | 64 | }; |
65 | 65 | ||
66 | static int l2cap_conn_del(struct hci_conn *conn, int err); | ||
67 | |||
68 | static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent); | ||
69 | static void l2cap_chan_del(struct sock *sk, int err); | ||
70 | |||
71 | static void __l2cap_sock_close(struct sock *sk, int reason); | 66 | static void __l2cap_sock_close(struct sock *sk, int reason); |
72 | static void l2cap_sock_close(struct sock *sk); | 67 | static void l2cap_sock_close(struct sock *sk); |
73 | static void l2cap_sock_kill(struct sock *sk); | 68 | static void l2cap_sock_kill(struct sock *sk); |
@@ -109,24 +104,177 @@ static void l2cap_sock_init_timer(struct sock *sk) | |||
109 | sk->sk_timer.data = (unsigned long)sk; | 104 | sk->sk_timer.data = (unsigned long)sk; |
110 | } | 105 | } |
111 | 106 | ||
107 | /* ---- L2CAP channels ---- */ | ||
108 | static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) | ||
109 | { | ||
110 | struct sock *s; | ||
111 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
112 | if (l2cap_pi(s)->dcid == cid) | ||
113 | break; | ||
114 | } | ||
115 | return s; | ||
116 | } | ||
117 | |||
118 | static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) | ||
119 | { | ||
120 | struct sock *s; | ||
121 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
122 | if (l2cap_pi(s)->scid == cid) | ||
123 | break; | ||
124 | } | ||
125 | return s; | ||
126 | } | ||
127 | |||
128 | /* Find channel with given SCID. | ||
129 | * Returns locked socket */ | ||
130 | static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) | ||
131 | { | ||
132 | struct sock *s; | ||
133 | read_lock(&l->lock); | ||
134 | s = __l2cap_get_chan_by_scid(l, cid); | ||
135 | if (s) bh_lock_sock(s); | ||
136 | read_unlock(&l->lock); | ||
137 | return s; | ||
138 | } | ||
139 | |||
140 | static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) | ||
141 | { | ||
142 | struct sock *s; | ||
143 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
144 | if (l2cap_pi(s)->ident == ident) | ||
145 | break; | ||
146 | } | ||
147 | return s; | ||
148 | } | ||
149 | |||
150 | static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) | ||
151 | { | ||
152 | struct sock *s; | ||
153 | read_lock(&l->lock); | ||
154 | s = __l2cap_get_chan_by_ident(l, ident); | ||
155 | if (s) bh_lock_sock(s); | ||
156 | read_unlock(&l->lock); | ||
157 | return s; | ||
158 | } | ||
159 | |||
160 | static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) | ||
161 | { | ||
162 | u16 cid = 0x0040; | ||
163 | |||
164 | for (; cid < 0xffff; cid++) { | ||
165 | if(!__l2cap_get_chan_by_scid(l, cid)) | ||
166 | return cid; | ||
167 | } | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk) | ||
173 | { | ||
174 | sock_hold(sk); | ||
175 | |||
176 | if (l->head) | ||
177 | l2cap_pi(l->head)->prev_c = sk; | ||
178 | |||
179 | l2cap_pi(sk)->next_c = l->head; | ||
180 | l2cap_pi(sk)->prev_c = NULL; | ||
181 | l->head = sk; | ||
182 | } | ||
183 | |||
184 | static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) | ||
185 | { | ||
186 | struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; | ||
187 | |||
188 | write_lock(&l->lock); | ||
189 | if (sk == l->head) | ||
190 | l->head = next; | ||
191 | |||
192 | if (next) | ||
193 | l2cap_pi(next)->prev_c = prev; | ||
194 | if (prev) | ||
195 | l2cap_pi(prev)->next_c = next; | ||
196 | write_unlock(&l->lock); | ||
197 | |||
198 | __sock_put(sk); | ||
199 | } | ||
200 | |||
201 | static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) | ||
202 | { | ||
203 | struct l2cap_chan_list *l = &conn->chan_list; | ||
204 | |||
205 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); | ||
206 | |||
207 | l2cap_pi(sk)->conn = conn; | ||
208 | |||
209 | if (sk->sk_type == SOCK_SEQPACKET) { | ||
210 | /* Alloc CID for connection-oriented socket */ | ||
211 | l2cap_pi(sk)->scid = l2cap_alloc_cid(l); | ||
212 | } else if (sk->sk_type == SOCK_DGRAM) { | ||
213 | /* Connectionless socket */ | ||
214 | l2cap_pi(sk)->scid = 0x0002; | ||
215 | l2cap_pi(sk)->dcid = 0x0002; | ||
216 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; | ||
217 | } else { | ||
218 | /* Raw socket can send/recv signalling messages only */ | ||
219 | l2cap_pi(sk)->scid = 0x0001; | ||
220 | l2cap_pi(sk)->dcid = 0x0001; | ||
221 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; | ||
222 | } | ||
223 | |||
224 | __l2cap_chan_link(l, sk); | ||
225 | |||
226 | if (parent) | ||
227 | bt_accept_enqueue(parent, sk); | ||
228 | } | ||
229 | |||
230 | /* Delete channel. | ||
231 | * Must be called on the locked socket. */ | ||
232 | static void l2cap_chan_del(struct sock *sk, int err) | ||
233 | { | ||
234 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | ||
235 | struct sock *parent = bt_sk(sk)->parent; | ||
236 | |||
237 | l2cap_sock_clear_timer(sk); | ||
238 | |||
239 | BT_DBG("sk %p, conn %p, err %d", sk, conn, err); | ||
240 | |||
241 | if (conn) { | ||
242 | /* Unlink from channel list */ | ||
243 | l2cap_chan_unlink(&conn->chan_list, sk); | ||
244 | l2cap_pi(sk)->conn = NULL; | ||
245 | hci_conn_put(conn->hcon); | ||
246 | } | ||
247 | |||
248 | sk->sk_state = BT_CLOSED; | ||
249 | sock_set_flag(sk, SOCK_ZAPPED); | ||
250 | |||
251 | if (err) | ||
252 | sk->sk_err = err; | ||
253 | |||
254 | if (parent) { | ||
255 | bt_accept_unlink(sk); | ||
256 | parent->sk_data_ready(parent, 0); | ||
257 | } else | ||
258 | sk->sk_state_change(sk); | ||
259 | } | ||
260 | |||
112 | /* ---- L2CAP connections ---- */ | 261 | /* ---- L2CAP connections ---- */ |
113 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | 262 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) |
114 | { | 263 | { |
115 | struct l2cap_conn *conn; | 264 | struct l2cap_conn *conn = hcon->l2cap_data; |
116 | |||
117 | if ((conn = hcon->l2cap_data)) | ||
118 | return conn; | ||
119 | 265 | ||
120 | if (status) | 266 | if (conn || status) |
121 | return conn; | 267 | return conn; |
122 | 268 | ||
123 | if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC))) | 269 | conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); |
270 | if (!conn) | ||
124 | return NULL; | 271 | return NULL; |
125 | memset(conn, 0, sizeof(struct l2cap_conn)); | ||
126 | 272 | ||
127 | hcon->l2cap_data = conn; | 273 | hcon->l2cap_data = conn; |
128 | conn->hcon = hcon; | 274 | conn->hcon = hcon; |
129 | 275 | ||
276 | BT_DBG("hcon %p conn %p", hcon, conn); | ||
277 | |||
130 | conn->mtu = hcon->hdev->acl_mtu; | 278 | conn->mtu = hcon->hdev->acl_mtu; |
131 | conn->src = &hcon->hdev->bdaddr; | 279 | conn->src = &hcon->hdev->bdaddr; |
132 | conn->dst = &hcon->dst; | 280 | conn->dst = &hcon->dst; |
@@ -134,17 +282,16 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | |||
134 | spin_lock_init(&conn->lock); | 282 | spin_lock_init(&conn->lock); |
135 | rwlock_init(&conn->chan_list.lock); | 283 | rwlock_init(&conn->chan_list.lock); |
136 | 284 | ||
137 | BT_DBG("hcon %p conn %p", hcon, conn); | ||
138 | return conn; | 285 | return conn; |
139 | } | 286 | } |
140 | 287 | ||
141 | static int l2cap_conn_del(struct hci_conn *hcon, int err) | 288 | static void l2cap_conn_del(struct hci_conn *hcon, int err) |
142 | { | 289 | { |
143 | struct l2cap_conn *conn; | 290 | struct l2cap_conn *conn = hcon->l2cap_data; |
144 | struct sock *sk; | 291 | struct sock *sk; |
145 | 292 | ||
146 | if (!(conn = hcon->l2cap_data)) | 293 | if (!conn) |
147 | return 0; | 294 | return; |
148 | 295 | ||
149 | BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); | 296 | BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); |
150 | 297 | ||
@@ -161,7 +308,6 @@ static int l2cap_conn_del(struct hci_conn *hcon, int err) | |||
161 | 308 | ||
162 | hcon->l2cap_data = NULL; | 309 | hcon->l2cap_data = NULL; |
163 | kfree(conn); | 310 | kfree(conn); |
164 | return 0; | ||
165 | } | 311 | } |
166 | 312 | ||
167 | static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) | 313 | static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) |
@@ -925,160 +1071,6 @@ static int l2cap_sock_release(struct socket *sock) | |||
925 | return err; | 1071 | return err; |
926 | } | 1072 | } |
927 | 1073 | ||
928 | /* ---- L2CAP channels ---- */ | ||
929 | static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) | ||
930 | { | ||
931 | struct sock *s; | ||
932 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
933 | if (l2cap_pi(s)->dcid == cid) | ||
934 | break; | ||
935 | } | ||
936 | return s; | ||
937 | } | ||
938 | |||
939 | static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) | ||
940 | { | ||
941 | struct sock *s; | ||
942 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
943 | if (l2cap_pi(s)->scid == cid) | ||
944 | break; | ||
945 | } | ||
946 | return s; | ||
947 | } | ||
948 | |||
949 | /* Find channel with given SCID. | ||
950 | * Returns locked socket */ | ||
951 | static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) | ||
952 | { | ||
953 | struct sock *s; | ||
954 | read_lock(&l->lock); | ||
955 | s = __l2cap_get_chan_by_scid(l, cid); | ||
956 | if (s) bh_lock_sock(s); | ||
957 | read_unlock(&l->lock); | ||
958 | return s; | ||
959 | } | ||
960 | |||
961 | static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) | ||
962 | { | ||
963 | struct sock *s; | ||
964 | for (s = l->head; s; s = l2cap_pi(s)->next_c) { | ||
965 | if (l2cap_pi(s)->ident == ident) | ||
966 | break; | ||
967 | } | ||
968 | return s; | ||
969 | } | ||
970 | |||
971 | static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) | ||
972 | { | ||
973 | struct sock *s; | ||
974 | read_lock(&l->lock); | ||
975 | s = __l2cap_get_chan_by_ident(l, ident); | ||
976 | if (s) bh_lock_sock(s); | ||
977 | read_unlock(&l->lock); | ||
978 | return s; | ||
979 | } | ||
980 | |||
981 | static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) | ||
982 | { | ||
983 | u16 cid = 0x0040; | ||
984 | |||
985 | for (; cid < 0xffff; cid++) { | ||
986 | if(!__l2cap_get_chan_by_scid(l, cid)) | ||
987 | return cid; | ||
988 | } | ||
989 | |||
990 | return 0; | ||
991 | } | ||
992 | |||
993 | static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk) | ||
994 | { | ||
995 | sock_hold(sk); | ||
996 | |||
997 | if (l->head) | ||
998 | l2cap_pi(l->head)->prev_c = sk; | ||
999 | |||
1000 | l2cap_pi(sk)->next_c = l->head; | ||
1001 | l2cap_pi(sk)->prev_c = NULL; | ||
1002 | l->head = sk; | ||
1003 | } | ||
1004 | |||
1005 | static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) | ||
1006 | { | ||
1007 | struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; | ||
1008 | |||
1009 | write_lock(&l->lock); | ||
1010 | if (sk == l->head) | ||
1011 | l->head = next; | ||
1012 | |||
1013 | if (next) | ||
1014 | l2cap_pi(next)->prev_c = prev; | ||
1015 | if (prev) | ||
1016 | l2cap_pi(prev)->next_c = next; | ||
1017 | write_unlock(&l->lock); | ||
1018 | |||
1019 | __sock_put(sk); | ||
1020 | } | ||
1021 | |||
1022 | static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) | ||
1023 | { | ||
1024 | struct l2cap_chan_list *l = &conn->chan_list; | ||
1025 | |||
1026 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); | ||
1027 | |||
1028 | l2cap_pi(sk)->conn = conn; | ||
1029 | |||
1030 | if (sk->sk_type == SOCK_SEQPACKET) { | ||
1031 | /* Alloc CID for connection-oriented socket */ | ||
1032 | l2cap_pi(sk)->scid = l2cap_alloc_cid(l); | ||
1033 | } else if (sk->sk_type == SOCK_DGRAM) { | ||
1034 | /* Connectionless socket */ | ||
1035 | l2cap_pi(sk)->scid = 0x0002; | ||
1036 | l2cap_pi(sk)->dcid = 0x0002; | ||
1037 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; | ||
1038 | } else { | ||
1039 | /* Raw socket can send/recv signalling messages only */ | ||
1040 | l2cap_pi(sk)->scid = 0x0001; | ||
1041 | l2cap_pi(sk)->dcid = 0x0001; | ||
1042 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; | ||
1043 | } | ||
1044 | |||
1045 | __l2cap_chan_link(l, sk); | ||
1046 | |||
1047 | if (parent) | ||
1048 | bt_accept_enqueue(parent, sk); | ||
1049 | } | ||
1050 | |||
1051 | /* Delete channel. | ||
1052 | * Must be called on the locked socket. */ | ||
1053 | static void l2cap_chan_del(struct sock *sk, int err) | ||
1054 | { | ||
1055 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | ||
1056 | struct sock *parent = bt_sk(sk)->parent; | ||
1057 | |||
1058 | l2cap_sock_clear_timer(sk); | ||
1059 | |||
1060 | BT_DBG("sk %p, conn %p, err %d", sk, conn, err); | ||
1061 | |||
1062 | if (conn) { | ||
1063 | /* Unlink from channel list */ | ||
1064 | l2cap_chan_unlink(&conn->chan_list, sk); | ||
1065 | l2cap_pi(sk)->conn = NULL; | ||
1066 | hci_conn_put(conn->hcon); | ||
1067 | } | ||
1068 | |||
1069 | sk->sk_state = BT_CLOSED; | ||
1070 | sock_set_flag(sk, SOCK_ZAPPED); | ||
1071 | |||
1072 | if (err) | ||
1073 | sk->sk_err = err; | ||
1074 | |||
1075 | if (parent) { | ||
1076 | bt_accept_unlink(sk); | ||
1077 | parent->sk_data_ready(parent, 0); | ||
1078 | } else | ||
1079 | sk->sk_state_change(sk); | ||
1080 | } | ||
1081 | |||
1082 | static void l2cap_conn_ready(struct l2cap_conn *conn) | 1074 | static void l2cap_conn_ready(struct l2cap_conn *conn) |
1083 | { | 1075 | { |
1084 | struct l2cap_chan_list *l = &conn->chan_list; | 1076 | struct l2cap_chan_list *l = &conn->chan_list; |
@@ -1834,7 +1826,9 @@ drop: | |||
1834 | kfree_skb(skb); | 1826 | kfree_skb(skb); |
1835 | 1827 | ||
1836 | done: | 1828 | done: |
1837 | if (sk) bh_unlock_sock(sk); | 1829 | if (sk) |
1830 | bh_unlock_sock(sk); | ||
1831 | |||
1838 | return 0; | 1832 | return 0; |
1839 | } | 1833 | } |
1840 | 1834 | ||
@@ -1925,18 +1919,18 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) | |||
1925 | 1919 | ||
1926 | static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) | 1920 | static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) |
1927 | { | 1921 | { |
1922 | struct l2cap_conn *conn; | ||
1923 | |||
1928 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); | 1924 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); |
1929 | 1925 | ||
1930 | if (hcon->type != ACL_LINK) | 1926 | if (hcon->type != ACL_LINK) |
1931 | return 0; | 1927 | return 0; |
1932 | 1928 | ||
1933 | if (!status) { | 1929 | if (!status) { |
1934 | struct l2cap_conn *conn; | ||
1935 | |||
1936 | conn = l2cap_conn_add(hcon, status); | 1930 | conn = l2cap_conn_add(hcon, status); |
1937 | if (conn) | 1931 | if (conn) |
1938 | l2cap_conn_ready(conn); | 1932 | l2cap_conn_ready(conn); |
1939 | } else | 1933 | } else |
1940 | l2cap_conn_del(hcon, bt_err(status)); | 1934 | l2cap_conn_del(hcon, bt_err(status)); |
1941 | 1935 | ||
1942 | return 0; | 1936 | return 0; |
@@ -1950,19 +1944,21 @@ static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason) | |||
1950 | return 0; | 1944 | return 0; |
1951 | 1945 | ||
1952 | l2cap_conn_del(hcon, bt_err(reason)); | 1946 | l2cap_conn_del(hcon, bt_err(reason)); |
1947 | |||
1953 | return 0; | 1948 | return 0; |
1954 | } | 1949 | } |
1955 | 1950 | ||
1956 | static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) | 1951 | static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) |
1957 | { | 1952 | { |
1958 | struct l2cap_chan_list *l; | 1953 | struct l2cap_chan_list *l; |
1959 | struct l2cap_conn *conn; | 1954 | struct l2cap_conn *conn = conn = hcon->l2cap_data; |
1960 | struct l2cap_conn_rsp rsp; | 1955 | struct l2cap_conn_rsp rsp; |
1961 | struct sock *sk; | 1956 | struct sock *sk; |
1962 | int result; | 1957 | int result; |
1963 | 1958 | ||
1964 | if (!(conn = hcon->l2cap_data)) | 1959 | if (!conn) |
1965 | return 0; | 1960 | return 0; |
1961 | |||
1966 | l = &conn->chan_list; | 1962 | l = &conn->chan_list; |
1967 | 1963 | ||
1968 | BT_DBG("conn %p", conn); | 1964 | BT_DBG("conn %p", conn); |
@@ -2005,13 +2001,14 @@ static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) | |||
2005 | static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status) | 2001 | static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status) |
2006 | { | 2002 | { |
2007 | struct l2cap_chan_list *l; | 2003 | struct l2cap_chan_list *l; |
2008 | struct l2cap_conn *conn; | 2004 | struct l2cap_conn *conn = hcon->l2cap_data; |
2009 | struct l2cap_conn_rsp rsp; | 2005 | struct l2cap_conn_rsp rsp; |
2010 | struct sock *sk; | 2006 | struct sock *sk; |
2011 | int result; | 2007 | int result; |
2012 | 2008 | ||
2013 | if (!(conn = hcon->l2cap_data)) | 2009 | if (!conn) |
2014 | return 0; | 2010 | return 0; |
2011 | |||
2015 | l = &conn->chan_list; | 2012 | l = &conn->chan_list; |
2016 | 2013 | ||
2017 | BT_DBG("conn %p", conn); | 2014 | BT_DBG("conn %p", conn); |
@@ -2219,7 +2216,7 @@ static int __init l2cap_init(void) | |||
2219 | goto error; | 2216 | goto error; |
2220 | } | 2217 | } |
2221 | 2218 | ||
2222 | class_create_file(&bt_class, &class_attr_l2cap); | 2219 | class_create_file(bt_class, &class_attr_l2cap); |
2223 | 2220 | ||
2224 | BT_INFO("L2CAP ver %s", VERSION); | 2221 | BT_INFO("L2CAP ver %s", VERSION); |
2225 | BT_INFO("L2CAP socket layer initialized"); | 2222 | BT_INFO("L2CAP socket layer initialized"); |
@@ -2233,7 +2230,7 @@ error: | |||
2233 | 2230 | ||
2234 | static void __exit l2cap_exit(void) | 2231 | static void __exit l2cap_exit(void) |
2235 | { | 2232 | { |
2236 | class_remove_file(&bt_class, &class_attr_l2cap); | 2233 | class_remove_file(bt_class, &class_attr_l2cap); |
2237 | 2234 | ||
2238 | if (bt_sock_unregister(BTPROTO_L2CAP) < 0) | 2235 | if (bt_sock_unregister(BTPROTO_L2CAP) < 0) |
2239 | BT_ERR("L2CAP socket unregistration failed"); | 2236 | BT_ERR("L2CAP socket unregistration failed"); |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index bd46e8927f29..155a2b93760e 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -52,8 +52,9 @@ | |||
52 | #define BT_DBG(D...) | 52 | #define BT_DBG(D...) |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #define VERSION "1.7" | 55 | #define VERSION "1.8" |
56 | 56 | ||
57 | static int disable_cfc = 0; | ||
57 | static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; | 58 | static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; |
58 | 59 | ||
59 | static struct task_struct *rfcomm_thread; | 60 | static struct task_struct *rfcomm_thread; |
@@ -533,7 +534,7 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) | |||
533 | s->sock = sock; | 534 | s->sock = sock; |
534 | 535 | ||
535 | s->mtu = RFCOMM_DEFAULT_MTU; | 536 | s->mtu = RFCOMM_DEFAULT_MTU; |
536 | s->cfc = RFCOMM_CFC_UNKNOWN; | 537 | s->cfc = disable_cfc ? RFCOMM_CFC_DISABLED : RFCOMM_CFC_UNKNOWN; |
537 | 538 | ||
538 | /* Do not increment module usage count for listening sessions. | 539 | /* Do not increment module usage count for listening sessions. |
539 | * Otherwise we won't be able to unload the module. */ | 540 | * Otherwise we won't be able to unload the module. */ |
@@ -1149,6 +1150,8 @@ static inline int rfcomm_check_link_mode(struct rfcomm_dlc *d) | |||
1149 | 1150 | ||
1150 | static void rfcomm_dlc_accept(struct rfcomm_dlc *d) | 1151 | static void rfcomm_dlc_accept(struct rfcomm_dlc *d) |
1151 | { | 1152 | { |
1153 | struct sock *sk = d->session->sock->sk; | ||
1154 | |||
1152 | BT_DBG("dlc %p", d); | 1155 | BT_DBG("dlc %p", d); |
1153 | 1156 | ||
1154 | rfcomm_send_ua(d->session, d->dlci); | 1157 | rfcomm_send_ua(d->session, d->dlci); |
@@ -1158,6 +1161,9 @@ static void rfcomm_dlc_accept(struct rfcomm_dlc *d) | |||
1158 | d->state_change(d, 0); | 1161 | d->state_change(d, 0); |
1159 | rfcomm_dlc_unlock(d); | 1162 | rfcomm_dlc_unlock(d); |
1160 | 1163 | ||
1164 | if (d->link_mode & RFCOMM_LM_MASTER) | ||
1165 | hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00); | ||
1166 | |||
1161 | rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); | 1167 | rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); |
1162 | } | 1168 | } |
1163 | 1169 | ||
@@ -1222,14 +1228,18 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn) | |||
1222 | BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d", | 1228 | BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d", |
1223 | d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits); | 1229 | d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits); |
1224 | 1230 | ||
1225 | if (pn->flow_ctrl == 0xf0 || pn->flow_ctrl == 0xe0) { | 1231 | if ((pn->flow_ctrl == 0xf0 && s->cfc != RFCOMM_CFC_DISABLED) || |
1226 | d->cfc = s->cfc = RFCOMM_CFC_ENABLED; | 1232 | pn->flow_ctrl == 0xe0) { |
1233 | d->cfc = RFCOMM_CFC_ENABLED; | ||
1227 | d->tx_credits = pn->credits; | 1234 | d->tx_credits = pn->credits; |
1228 | } else { | 1235 | } else { |
1229 | d->cfc = s->cfc = RFCOMM_CFC_DISABLED; | 1236 | d->cfc = RFCOMM_CFC_DISABLED; |
1230 | set_bit(RFCOMM_TX_THROTTLED, &d->flags); | 1237 | set_bit(RFCOMM_TX_THROTTLED, &d->flags); |
1231 | } | 1238 | } |
1232 | 1239 | ||
1240 | if (s->cfc == RFCOMM_CFC_UNKNOWN) | ||
1241 | s->cfc = d->cfc; | ||
1242 | |||
1233 | d->priority = pn->priority; | 1243 | d->priority = pn->priority; |
1234 | 1244 | ||
1235 | d->mtu = s->mtu = btohs(pn->mtu); | 1245 | d->mtu = s->mtu = btohs(pn->mtu); |
@@ -2035,7 +2045,7 @@ static int __init rfcomm_init(void) | |||
2035 | 2045 | ||
2036 | kernel_thread(rfcomm_run, NULL, CLONE_KERNEL); | 2046 | kernel_thread(rfcomm_run, NULL, CLONE_KERNEL); |
2037 | 2047 | ||
2038 | class_create_file(&bt_class, &class_attr_rfcomm_dlc); | 2048 | class_create_file(bt_class, &class_attr_rfcomm_dlc); |
2039 | 2049 | ||
2040 | rfcomm_init_sockets(); | 2050 | rfcomm_init_sockets(); |
2041 | 2051 | ||
@@ -2050,7 +2060,7 @@ static int __init rfcomm_init(void) | |||
2050 | 2060 | ||
2051 | static void __exit rfcomm_exit(void) | 2061 | static void __exit rfcomm_exit(void) |
2052 | { | 2062 | { |
2053 | class_remove_file(&bt_class, &class_attr_rfcomm_dlc); | 2063 | class_remove_file(bt_class, &class_attr_rfcomm_dlc); |
2054 | 2064 | ||
2055 | hci_unregister_cb(&rfcomm_cb); | 2065 | hci_unregister_cb(&rfcomm_cb); |
2056 | 2066 | ||
@@ -2073,6 +2083,9 @@ static void __exit rfcomm_exit(void) | |||
2073 | module_init(rfcomm_init); | 2083 | module_init(rfcomm_init); |
2074 | module_exit(rfcomm_exit); | 2084 | module_exit(rfcomm_exit); |
2075 | 2085 | ||
2086 | module_param(disable_cfc, bool, 0644); | ||
2087 | MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); | ||
2088 | |||
2076 | module_param(l2cap_mtu, uint, 0644); | 2089 | module_param(l2cap_mtu, uint, 0644); |
2077 | MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); | 2090 | MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); |
2078 | 2091 | ||
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 4e9962c8cfa6..220fee04e7f2 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -944,7 +944,7 @@ int __init rfcomm_init_sockets(void) | |||
944 | if (err < 0) | 944 | if (err < 0) |
945 | goto error; | 945 | goto error; |
946 | 946 | ||
947 | class_create_file(&bt_class, &class_attr_rfcomm); | 947 | class_create_file(bt_class, &class_attr_rfcomm); |
948 | 948 | ||
949 | BT_INFO("RFCOMM socket layer initialized"); | 949 | BT_INFO("RFCOMM socket layer initialized"); |
950 | 950 | ||
@@ -958,7 +958,7 @@ error: | |||
958 | 958 | ||
959 | void __exit rfcomm_cleanup_sockets(void) | 959 | void __exit rfcomm_cleanup_sockets(void) |
960 | { | 960 | { |
961 | class_remove_file(&bt_class, &class_attr_rfcomm); | 961 | class_remove_file(bt_class, &class_attr_rfcomm); |
962 | 962 | ||
963 | if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) | 963 | if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) |
964 | BT_ERR("RFCOMM socket layer unregistration failed"); | 964 | BT_ERR("RFCOMM socket layer unregistration failed"); |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index a5f1e44db5d3..85defccc0287 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -969,7 +969,7 @@ static int __init sco_init(void) | |||
969 | goto error; | 969 | goto error; |
970 | } | 970 | } |
971 | 971 | ||
972 | class_create_file(&bt_class, &class_attr_sco); | 972 | class_create_file(bt_class, &class_attr_sco); |
973 | 973 | ||
974 | BT_INFO("SCO (Voice Link) ver %s", VERSION); | 974 | BT_INFO("SCO (Voice Link) ver %s", VERSION); |
975 | BT_INFO("SCO socket layer initialized"); | 975 | BT_INFO("SCO socket layer initialized"); |
@@ -983,7 +983,7 @@ error: | |||
983 | 983 | ||
984 | static void __exit sco_exit(void) | 984 | static void __exit sco_exit(void) |
985 | { | 985 | { |
986 | class_remove_file(&bt_class, &class_attr_sco); | 986 | class_remove_file(bt_class, &class_attr_sco); |
987 | 987 | ||
988 | if (bt_sock_unregister(BTPROTO_SCO) < 0) | 988 | if (bt_sock_unregister(BTPROTO_SCO) < 0) |
989 | BT_ERR("SCO socket unregistration failed"); | 989 | BT_ERR("SCO socket unregistration failed"); |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 881d7d1a732a..06abb6634f5b 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -117,12 +117,13 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
117 | continue; | 117 | continue; |
118 | 118 | ||
119 | if (idx < s_idx) | 119 | if (idx < s_idx) |
120 | continue; | 120 | goto cont; |
121 | 121 | ||
122 | err = br_fill_ifinfo(skb, p, NETLINK_CB(cb->skb).pid, | 122 | err = br_fill_ifinfo(skb, p, NETLINK_CB(cb->skb).pid, |
123 | cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); | 123 | cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); |
124 | if (err <= 0) | 124 | if (err <= 0) |
125 | break; | 125 | break; |
126 | cont: | ||
126 | ++idx; | 127 | ++idx; |
127 | } | 128 | } |
128 | read_unlock(&dev_base_lock); | 129 | read_unlock(&dev_base_lock); |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8d157157bf8e..318d4674faa1 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1106,7 +1106,15 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
1106 | int ihl; | 1106 | int ihl; |
1107 | int id; | 1107 | int id; |
1108 | 1108 | ||
1109 | if (!pskb_may_pull(skb, sizeof(*iph))) | 1109 | if (unlikely(skb_shinfo(skb)->gso_type & |
1110 | ~(SKB_GSO_TCPV4 | | ||
1111 | SKB_GSO_UDP | | ||
1112 | SKB_GSO_DODGY | | ||
1113 | SKB_GSO_TCP_ECN | | ||
1114 | 0))) | ||
1115 | goto out; | ||
1116 | |||
1117 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | ||
1110 | goto out; | 1118 | goto out; |
1111 | 1119 | ||
1112 | iph = skb->nh.iph; | 1120 | iph = skb->nh.iph; |
@@ -1114,7 +1122,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
1114 | if (ihl < sizeof(*iph)) | 1122 | if (ihl < sizeof(*iph)) |
1115 | goto out; | 1123 | goto out; |
1116 | 1124 | ||
1117 | if (!pskb_may_pull(skb, ihl)) | 1125 | if (unlikely(!pskb_may_pull(skb, ihl))) |
1118 | goto out; | 1126 | goto out; |
1119 | 1127 | ||
1120 | skb->h.raw = __skb_pull(skb, ihl); | 1128 | skb->h.raw = __skb_pull(skb, ihl); |
@@ -1125,7 +1133,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
1125 | 1133 | ||
1126 | rcu_read_lock(); | 1134 | rcu_read_lock(); |
1127 | ops = rcu_dereference(inet_protos[proto]); | 1135 | ops = rcu_dereference(inet_protos[proto]); |
1128 | if (ops && ops->gso_segment) | 1136 | if (likely(ops && ops->gso_segment)) |
1129 | segs = ops->gso_segment(skb, features); | 1137 | segs = ops->gso_segment(skb, features); |
1130 | rcu_read_unlock(); | 1138 | rcu_read_unlock(); |
1131 | 1139 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 804458712d88..f6a2d9223d07 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2170,8 +2170,19 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
2170 | 2170 | ||
2171 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { | 2171 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
2172 | /* Packet is from an untrusted source, reset gso_segs. */ | 2172 | /* Packet is from an untrusted source, reset gso_segs. */ |
2173 | int mss = skb_shinfo(skb)->gso_size; | 2173 | int type = skb_shinfo(skb)->gso_type; |
2174 | int mss; | ||
2175 | |||
2176 | if (unlikely(type & | ||
2177 | ~(SKB_GSO_TCPV4 | | ||
2178 | SKB_GSO_DODGY | | ||
2179 | SKB_GSO_TCP_ECN | | ||
2180 | SKB_GSO_TCPV6 | | ||
2181 | 0) || | ||
2182 | !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) | ||
2183 | goto out; | ||
2174 | 2184 | ||
2185 | mss = skb_shinfo(skb)->gso_size; | ||
2175 | skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; | 2186 | skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; |
2176 | 2187 | ||
2177 | segs = NULL; | 2188 | segs = NULL; |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index c28e5c287447..0c17dec11c8d 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -64,6 +64,14 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | |||
64 | struct inet6_protocol *ops; | 64 | struct inet6_protocol *ops; |
65 | int proto; | 65 | int proto; |
66 | 66 | ||
67 | if (unlikely(skb_shinfo(skb)->gso_type & | ||
68 | ~(SKB_GSO_UDP | | ||
69 | SKB_GSO_DODGY | | ||
70 | SKB_GSO_TCP_ECN | | ||
71 | SKB_GSO_TCPV6 | | ||
72 | 0))) | ||
73 | goto out; | ||
74 | |||
67 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | 75 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) |
68 | goto out; | 76 | goto out; |
69 | 77 | ||
@@ -111,7 +119,8 @@ unlock: | |||
111 | 119 | ||
112 | for (skb = segs; skb; skb = skb->next) { | 120 | for (skb = segs; skb; skb = skb->next) { |
113 | ipv6h = skb->nh.ipv6h; | 121 | ipv6h = skb->nh.ipv6h; |
114 | ipv6h->payload_len = htons(skb->len - skb->mac_len); | 122 | ipv6h->payload_len = htons(skb->len - skb->mac_len - |
123 | sizeof(*ipv6h)); | ||
115 | } | 124 | } |
116 | 125 | ||
117 | out: | 126 | out: |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 7ef143c0ebf6..f26898b00347 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/poison.h> | ||
28 | #include <linux/icmpv6.h> | 29 | #include <linux/icmpv6.h> |
29 | #include <net/ipv6.h> | 30 | #include <net/ipv6.h> |
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
@@ -376,7 +377,7 @@ ip6t_do_table(struct sk_buff **pskb, | |||
376 | } while (!hotdrop); | 377 | } while (!hotdrop); |
377 | 378 | ||
378 | #ifdef CONFIG_NETFILTER_DEBUG | 379 | #ifdef CONFIG_NETFILTER_DEBUG |
379 | ((struct ip6t_entry *)table_base)->comefrom = 0xdead57ac; | 380 | ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; |
380 | #endif | 381 | #endif |
381 | read_unlock_bh(&table->lock); | 382 | read_unlock_bh(&table->lock); |
382 | 383 | ||
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index eba6df054b1f..389a4119e1b4 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -800,7 +800,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags) | |||
800 | 800 | ||
801 | /* Now attach up the new socket */ | 801 | /* Now attach up the new socket */ |
802 | kfree_skb(skb); | 802 | kfree_skb(skb); |
803 | sk->sk_ack_backlog--; | 803 | sk_acceptq_removed(sk); |
804 | newsock->sk = newsk; | 804 | newsock->sk = newsk; |
805 | 805 | ||
806 | out: | 806 | out: |
@@ -985,7 +985,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) | |||
985 | nr_make->vr = 0; | 985 | nr_make->vr = 0; |
986 | nr_make->vl = 0; | 986 | nr_make->vl = 0; |
987 | nr_make->state = NR_STATE_3; | 987 | nr_make->state = NR_STATE_3; |
988 | sk->sk_ack_backlog++; | 988 | sk_acceptq_added(sk); |
989 | 989 | ||
990 | nr_insert_socket(make); | 990 | nr_insert_socket(make); |
991 | 991 | ||
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 7799fe82aeb6..d0a67bb31363 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -752,7 +752,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le | |||
752 | 752 | ||
753 | rose_insert_socket(sk); /* Finish the bind */ | 753 | rose_insert_socket(sk); /* Finish the bind */ |
754 | } | 754 | } |
755 | 755 | rose_try_next_neigh: | |
756 | rose->dest_addr = addr->srose_addr; | 756 | rose->dest_addr = addr->srose_addr; |
757 | rose->dest_call = addr->srose_call; | 757 | rose->dest_call = addr->srose_call; |
758 | rose->rand = ((long)rose & 0xFFFF) + rose->lci; | 758 | rose->rand = ((long)rose & 0xFFFF) + rose->lci; |
@@ -810,6 +810,11 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le | |||
810 | } | 810 | } |
811 | 811 | ||
812 | if (sk->sk_state != TCP_ESTABLISHED) { | 812 | if (sk->sk_state != TCP_ESTABLISHED) { |
813 | /* Try next neighbour */ | ||
814 | rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic); | ||
815 | if (rose->neighbour) | ||
816 | goto rose_try_next_neigh; | ||
817 | /* No more neighbour */ | ||
813 | sock->state = SS_UNCONNECTED; | 818 | sock->state = SS_UNCONNECTED; |
814 | return sock_error(sk); /* Always set at this point */ | 819 | return sock_error(sk); /* Always set at this point */ |
815 | } | 820 | } |
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 9d0bf2a1ea3f..7c279e2659ec 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c | |||
@@ -59,6 +59,7 @@ static int rose_rebuild_header(struct sk_buff *skb) | |||
59 | struct net_device_stats *stats = netdev_priv(dev); | 59 | struct net_device_stats *stats = netdev_priv(dev); |
60 | unsigned char *bp = (unsigned char *)skb->data; | 60 | unsigned char *bp = (unsigned char *)skb->data; |
61 | struct sk_buff *skbn; | 61 | struct sk_buff *skbn; |
62 | unsigned int len; | ||
62 | 63 | ||
63 | #ifdef CONFIG_INET | 64 | #ifdef CONFIG_INET |
64 | if (arp_find(bp + 7, skb)) { | 65 | if (arp_find(bp + 7, skb)) { |
@@ -75,6 +76,8 @@ static int rose_rebuild_header(struct sk_buff *skb) | |||
75 | 76 | ||
76 | kfree_skb(skb); | 77 | kfree_skb(skb); |
77 | 78 | ||
79 | len = skbn->len; | ||
80 | |||
78 | if (!rose_route_frame(skbn, NULL)) { | 81 | if (!rose_route_frame(skbn, NULL)) { |
79 | kfree_skb(skbn); | 82 | kfree_skb(skbn); |
80 | stats->tx_errors++; | 83 | stats->tx_errors++; |
@@ -82,7 +85,7 @@ static int rose_rebuild_header(struct sk_buff *skb) | |||
82 | } | 85 | } |
83 | 86 | ||
84 | stats->tx_packets++; | 87 | stats->tx_packets++; |
85 | stats->tx_bytes += skbn->len; | 88 | stats->tx_bytes += len; |
86 | #endif | 89 | #endif |
87 | return 1; | 90 | return 1; |
88 | } | 91 | } |
diff --git a/net/tipc/core.h b/net/tipc/core.h index 86f54f3512f1..762aac2572be 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
@@ -297,7 +297,10 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb) | |||
297 | * buf_acquire - creates a TIPC message buffer | 297 | * buf_acquire - creates a TIPC message buffer |
298 | * @size: message size (including TIPC header) | 298 | * @size: message size (including TIPC header) |
299 | * | 299 | * |
300 | * Returns a new buffer. Space is reserved for a data link header. | 300 | * Returns a new buffer with data pointers set to the specified size. |
301 | * | ||
302 | * NOTE: Headroom is reserved to allow prepending of a data link header. | ||
303 | * There may also be unrequested tailroom present at the buffer's end. | ||
301 | */ | 304 | */ |
302 | 305 | ||
303 | static inline struct sk_buff *buf_acquire(u32 size) | 306 | static inline struct sk_buff *buf_acquire(u32 size) |
diff --git a/net/tipc/link.c b/net/tipc/link.c index c6831c75cfa4..c10e18a49b96 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -998,6 +998,8 @@ static int link_bundle_buf(struct link *l_ptr, | |||
998 | return 0; | 998 | return 0; |
999 | if (skb_tailroom(bundler) < (pad + size)) | 999 | if (skb_tailroom(bundler) < (pad + size)) |
1000 | return 0; | 1000 | return 0; |
1001 | if (link_max_pkt(l_ptr) < (to_pos + size)) | ||
1002 | return 0; | ||
1001 | 1003 | ||
1002 | skb_put(bundler, pad + size); | 1004 | skb_put(bundler, pad + size); |
1003 | memcpy(bundler->data + to_pos, buf->data, size); | 1005 | memcpy(bundler->data + to_pos, buf->data, size); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e9a287bc3142..f70475bfb62a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -144,7 +144,7 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) | |||
144 | scm->seclen = *UNIXSECLEN(skb); | 144 | scm->seclen = *UNIXSECLEN(skb); |
145 | } | 145 | } |
146 | #else | 146 | #else |
147 | static void unix_get_peersec_dgram(struct sk_buff *skb) | 147 | static inline void unix_get_peersec_dgram(struct sk_buff *skb) |
148 | { } | 148 | { } |
149 | 149 | ||
150 | static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) | 150 | static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) |