diff options
author | David S. Miller <davem@davemloft.net> | 2014-08-05 16:18:20 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-08-05 16:18:20 -0400 |
commit | aef4f5b6db654e512ebcccab2a6e50424c05d2f9 (patch) | |
tree | 4daaee5ac85d1128233a45908dac5212f38ec7aa /net | |
parent | 61ab9efddf51cbc0d57356a4d650785cf5721fbe (diff) | |
parent | dc6be9f54a4ecb0a09765d1f515ed947d86b7528 (diff) |
Merge tag 'master-2014-07-31' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next
Conflicts:
net/6lowpan/iphc.c
Minor conflicts in iphc.c were changes overlapping with some
style cleanups.
John W. Linville says:
====================
Please pull this last(?) batch of wireless change intended for the
3.17 stream...
For the NFC bits, Samuel says:
"This is a rather quiet one, we have:
- A new driver from ST Microelectronics for their NCI ST21NFCB,
including device tree support.
- p2p support for the ST21NFCA driver
- A few fixes an enhancements for the NFC digital laye"
For the Atheros bits, Kalle says:
"Michal and Janusz did some important RX aggregation fixes, basically we
were missing RX reordering altogether. The 10.1 firmware doesn't support
Ad-Hoc mode and Michal fixed ath10k so that it doesn't advertise Ad-Hoc
support with that firmware. Also he implemented a workaround for a KVM
issue."
For the Bluetooth bits, Gustavo and Johan say:
"To quote Gustavo from his previous request:
'Some last minute fixes for -next. We have a fix for a use after free in
RFCOMM, another fix to an issue with ADV_DIRECT_IND and one for ADV_IND with
auto-connection handling. Last, we added support for reading the codec and
MWS setting for controllers that support these features.'
Additionally there are fixes to LE scanning, an update to conform to the 4.1
core specification as well as fixes for tracking the page scan state. All
of these fixes are important for 3.17."
And,
"We've got:
- 6lowpan fixes/cleanups
- A couple crash fixes, one for the Marvell HCI driver and another in LE SMP.
- Fix for an incorrect connected state check
- Fix for the bondable requirement during pairing (an issue which had
crept in because of using "pairable" when in fact the actual meaning
was "bondable" (these have different meanings in Bluetooth)"
Along with those are some late-breaking hardware support patches in
brcmfmac and b43 as well as a stray ath9k patch.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/6lowpan/iphc.c | 212 | ||||
-rw-r--r-- | net/bluetooth/hci_core.c | 185 | ||||
-rw-r--r-- | net/bluetooth/hci_event.c | 50 | ||||
-rw-r--r-- | net/bluetooth/hidp/core.c | 2 | ||||
-rw-r--r-- | net/bluetooth/mgmt.c | 57 | ||||
-rw-r--r-- | net/bluetooth/rfcomm/core.c | 7 | ||||
-rw-r--r-- | net/bluetooth/smp.c | 33 | ||||
-rw-r--r-- | net/nfc/digital.h | 3 | ||||
-rw-r--r-- | net/nfc/digital_core.c | 27 | ||||
-rw-r--r-- | net/nfc/digital_dep.c | 11 | ||||
-rw-r--r-- | net/nfc/digital_technology.c | 96 | ||||
-rw-r--r-- | net/nfc/hci/core.c | 7 | ||||
-rw-r--r-- | net/nfc/nci/ntf.c | 4 |
13 files changed, 520 insertions, 174 deletions
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index a1b7117a9600..142eef55c9e2 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c | |||
@@ -177,8 +177,8 @@ static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr, | |||
177 | struct sk_buff *new; | 177 | struct sk_buff *new; |
178 | int stat; | 178 | int stat; |
179 | 179 | ||
180 | new = skb_copy_expand(skb, sizeof(struct ipv6hdr), | 180 | new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb), |
181 | skb_tailroom(skb), GFP_ATOMIC); | 181 | GFP_ATOMIC); |
182 | kfree_skb(skb); | 182 | kfree_skb(skb); |
183 | 183 | ||
184 | if (!new) | 184 | if (!new) |
@@ -205,10 +205,9 @@ static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr, | |||
205 | /* Uncompress function for multicast destination address, | 205 | /* Uncompress function for multicast destination address, |
206 | * when M bit is set. | 206 | * when M bit is set. |
207 | */ | 207 | */ |
208 | static int | 208 | static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb, |
209 | lowpan_uncompress_multicast_daddr(struct sk_buff *skb, | 209 | struct in6_addr *ipaddr, |
210 | struct in6_addr *ipaddr, | 210 | const u8 dam) |
211 | const u8 dam) | ||
212 | { | 211 | { |
213 | bool fail; | 212 | bool fail; |
214 | 213 | ||
@@ -254,41 +253,41 @@ lowpan_uncompress_multicast_daddr(struct sk_buff *skb, | |||
254 | } | 253 | } |
255 | 254 | ||
256 | raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is", | 255 | raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is", |
257 | ipaddr->s6_addr, 16); | 256 | ipaddr->s6_addr, 16); |
258 | 257 | ||
259 | return 0; | 258 | return 0; |
260 | } | 259 | } |
261 | 260 | ||
262 | static int | 261 | static int uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh) |
263 | uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh) | ||
264 | { | 262 | { |
265 | bool fail; | 263 | bool fail; |
266 | u8 tmp = 0, val = 0; | 264 | u8 tmp = 0, val = 0; |
267 | 265 | ||
268 | if (!uh) | 266 | fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp)); |
269 | goto err; | ||
270 | |||
271 | fail = lowpan_fetch_skb(skb, &tmp, 1); | ||
272 | 267 | ||
273 | if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { | 268 | if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { |
274 | pr_debug("UDP header uncompression\n"); | 269 | pr_debug("UDP header uncompression\n"); |
275 | switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { | 270 | switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { |
276 | case LOWPAN_NHC_UDP_CS_P_00: | 271 | case LOWPAN_NHC_UDP_CS_P_00: |
277 | fail |= lowpan_fetch_skb(skb, &uh->source, 2); | 272 | fail |= lowpan_fetch_skb(skb, &uh->source, |
278 | fail |= lowpan_fetch_skb(skb, &uh->dest, 2); | 273 | sizeof(uh->source)); |
274 | fail |= lowpan_fetch_skb(skb, &uh->dest, | ||
275 | sizeof(uh->dest)); | ||
279 | break; | 276 | break; |
280 | case LOWPAN_NHC_UDP_CS_P_01: | 277 | case LOWPAN_NHC_UDP_CS_P_01: |
281 | fail |= lowpan_fetch_skb(skb, &uh->source, 2); | 278 | fail |= lowpan_fetch_skb(skb, &uh->source, |
282 | fail |= lowpan_fetch_skb(skb, &val, 1); | 279 | sizeof(uh->source)); |
280 | fail |= lowpan_fetch_skb(skb, &val, sizeof(val)); | ||
283 | uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT); | 281 | uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT); |
284 | break; | 282 | break; |
285 | case LOWPAN_NHC_UDP_CS_P_10: | 283 | case LOWPAN_NHC_UDP_CS_P_10: |
286 | fail |= lowpan_fetch_skb(skb, &val, 1); | 284 | fail |= lowpan_fetch_skb(skb, &val, sizeof(val)); |
287 | uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT); | 285 | uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT); |
288 | fail |= lowpan_fetch_skb(skb, &uh->dest, 2); | 286 | fail |= lowpan_fetch_skb(skb, &uh->dest, |
287 | sizeof(uh->dest)); | ||
289 | break; | 288 | break; |
290 | case LOWPAN_NHC_UDP_CS_P_11: | 289 | case LOWPAN_NHC_UDP_CS_P_11: |
291 | fail |= lowpan_fetch_skb(skb, &val, 1); | 290 | fail |= lowpan_fetch_skb(skb, &val, sizeof(val)); |
292 | uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT + | 291 | uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT + |
293 | (val >> 4)); | 292 | (val >> 4)); |
294 | uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + | 293 | uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + |
@@ -307,10 +306,11 @@ uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh) | |||
307 | pr_debug_ratelimited("checksum elided currently not supported\n"); | 306 | pr_debug_ratelimited("checksum elided currently not supported\n"); |
308 | goto err; | 307 | goto err; |
309 | } else { | 308 | } else { |
310 | fail |= lowpan_fetch_skb(skb, &uh->check, 2); | 309 | fail |= lowpan_fetch_skb(skb, &uh->check, |
310 | sizeof(uh->check)); | ||
311 | } | 311 | } |
312 | 312 | ||
313 | /* UDP lenght needs to be infered from the lower layers | 313 | /* UDP length needs to be infered from the lower layers |
314 | * here, we obtain the hint from the remaining size of the | 314 | * here, we obtain the hint from the remaining size of the |
315 | * frame | 315 | * frame |
316 | */ | 316 | */ |
@@ -333,9 +333,8 @@ err: | |||
333 | static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 }; | 333 | static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 }; |
334 | 334 | ||
335 | int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, | 335 | int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, |
336 | const u8 *saddr, const u8 saddr_type, | 336 | const u8 *saddr, const u8 saddr_type, const u8 saddr_len, |
337 | const u8 saddr_len, const u8 *daddr, | 337 | const u8 *daddr, const u8 daddr_type, const u8 daddr_len, |
338 | const u8 daddr_type, const u8 daddr_len, | ||
339 | u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb) | 338 | u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb) |
340 | { | 339 | { |
341 | struct ipv6hdr hdr = {}; | 340 | struct ipv6hdr hdr = {}; |
@@ -348,7 +347,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, | |||
348 | /* another if the CID flag is set */ | 347 | /* another if the CID flag is set */ |
349 | if (iphc1 & LOWPAN_IPHC_CID) { | 348 | if (iphc1 & LOWPAN_IPHC_CID) { |
350 | pr_debug("CID flag is set, increase header with one\n"); | 349 | pr_debug("CID flag is set, increase header with one\n"); |
351 | if (lowpan_fetch_skb_u8(skb, &num_context)) | 350 | if (lowpan_fetch_skb(skb, &num_context, sizeof(num_context))) |
352 | goto drop; | 351 | goto drop; |
353 | } | 352 | } |
354 | 353 | ||
@@ -360,7 +359,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, | |||
360 | * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) | 359 | * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) |
361 | */ | 360 | */ |
362 | case 0: /* 00b */ | 361 | case 0: /* 00b */ |
363 | if (lowpan_fetch_skb_u8(skb, &tmp)) | 362 | if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp))) |
364 | goto drop; | 363 | goto drop; |
365 | 364 | ||
366 | memcpy(&hdr.flow_lbl, &skb->data[0], 3); | 365 | memcpy(&hdr.flow_lbl, &skb->data[0], 3); |
@@ -373,7 +372,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, | |||
373 | * ECN + DSCP (1 byte), Flow Label is elided | 372 | * ECN + DSCP (1 byte), Flow Label is elided |
374 | */ | 373 | */ |
375 | case 2: /* 10b */ | 374 | case 2: /* 10b */ |
376 | if (lowpan_fetch_skb_u8(skb, &tmp)) | 375 | if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp))) |
377 | goto drop; | 376 | goto drop; |
378 | 377 | ||
379 | hdr.priority = ((tmp >> 2) & 0x0f); | 378 | hdr.priority = ((tmp >> 2) & 0x0f); |
@@ -383,7 +382,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, | |||
383 | * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided | 382 | * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided |
384 | */ | 383 | */ |
385 | case 1: /* 01b */ | 384 | case 1: /* 01b */ |
386 | if (lowpan_fetch_skb_u8(skb, &tmp)) | 385 | if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp))) |
387 | goto drop; | 386 | goto drop; |
388 | 387 | ||
389 | hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); | 388 | hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); |
@@ -400,7 +399,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, | |||
400 | /* Next Header */ | 399 | /* Next Header */ |
401 | if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { | 400 | if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { |
402 | /* Next header is carried inline */ | 401 | /* Next header is carried inline */ |
403 | if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr))) | 402 | if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr))) |
404 | goto drop; | 403 | goto drop; |
405 | 404 | ||
406 | pr_debug("NH flag is set, next header carried inline: %02x\n", | 405 | pr_debug("NH flag is set, next header carried inline: %02x\n", |
@@ -411,7 +410,8 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, | |||
411 | if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) { | 410 | if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) { |
412 | hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03]; | 411 | hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03]; |
413 | } else { | 412 | } else { |
414 | if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit))) | 413 | if (lowpan_fetch_skb(skb, &hdr.hop_limit, |
414 | sizeof(hdr.hop_limit))) | ||
415 | goto drop; | 415 | goto drop; |
416 | } | 416 | } |
417 | 417 | ||
@@ -421,8 +421,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, | |||
421 | if (iphc1 & LOWPAN_IPHC_SAC) { | 421 | if (iphc1 & LOWPAN_IPHC_SAC) { |
422 | /* Source address context based uncompression */ | 422 | /* Source address context based uncompression */ |
423 | pr_debug("SAC bit is set. Handle context based source address.\n"); | 423 | pr_debug("SAC bit is set. Handle context based source address.\n"); |
424 | err = uncompress_context_based_src_addr( | 424 | err = uncompress_context_based_src_addr(skb, &hdr.saddr, tmp); |
425 | skb, &hdr.saddr, tmp); | ||
426 | } else { | 425 | } else { |
427 | /* Source address uncompression */ | 426 | /* Source address uncompression */ |
428 | pr_debug("source address stateless compression\n"); | 427 | pr_debug("source address stateless compression\n"); |
@@ -443,8 +442,9 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, | |||
443 | pr_debug("dest: context-based mcast compression\n"); | 442 | pr_debug("dest: context-based mcast compression\n"); |
444 | /* TODO: implement this */ | 443 | /* TODO: implement this */ |
445 | } else { | 444 | } else { |
446 | err = lowpan_uncompress_multicast_daddr( | 445 | err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr, |
447 | skb, &hdr.daddr, tmp); | 446 | tmp); |
447 | |||
448 | if (err) | 448 | if (err) |
449 | goto drop; | 449 | goto drop; |
450 | } | 450 | } |
@@ -497,8 +497,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, | |||
497 | hdr.version, ntohs(hdr.payload_len), hdr.nexthdr, | 497 | hdr.version, ntohs(hdr.payload_len), hdr.nexthdr, |
498 | hdr.hop_limit, &hdr.daddr); | 498 | hdr.hop_limit, &hdr.daddr); |
499 | 499 | ||
500 | raw_dump_table(__func__, "raw header dump", | 500 | raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr)); |
501 | (u8 *)&hdr, sizeof(hdr)); | ||
502 | 501 | ||
503 | return skb_deliver(skb, &hdr, dev, deliver_skb); | 502 | return skb_deliver(skb, &hdr, dev, deliver_skb); |
504 | 503 | ||
@@ -508,7 +507,7 @@ drop: | |||
508 | } | 507 | } |
509 | EXPORT_SYMBOL_GPL(lowpan_process_data); | 508 | EXPORT_SYMBOL_GPL(lowpan_process_data); |
510 | 509 | ||
511 | static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, | 510 | static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift, |
512 | const struct in6_addr *ipaddr, | 511 | const struct in6_addr *ipaddr, |
513 | const unsigned char *lladdr) | 512 | const unsigned char *lladdr) |
514 | { | 513 | { |
@@ -519,24 +518,22 @@ static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, | |||
519 | pr_debug("address compression 0 bits\n"); | 518 | pr_debug("address compression 0 bits\n"); |
520 | } else if (lowpan_is_iid_16_bit_compressable(ipaddr)) { | 519 | } else if (lowpan_is_iid_16_bit_compressable(ipaddr)) { |
521 | /* compress IID to 16 bits xxxx::XXXX */ | 520 | /* compress IID to 16 bits xxxx::XXXX */ |
522 | memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2); | 521 | lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[7], 2); |
523 | *hc06_ptr += 2; | ||
524 | val = 2; /* 16-bits */ | 522 | val = 2; /* 16-bits */ |
525 | raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)", | 523 | raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)", |
526 | *hc06_ptr - 2, 2); | 524 | *hc_ptr - 2, 2); |
527 | } else { | 525 | } else { |
528 | /* do not compress IID => xxxx::IID */ | 526 | /* do not compress IID => xxxx::IID */ |
529 | memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8); | 527 | lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[4], 8); |
530 | *hc06_ptr += 8; | ||
531 | val = 1; /* 64-bits */ | 528 | val = 1; /* 64-bits */ |
532 | raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)", | 529 | raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)", |
533 | *hc06_ptr - 8, 8); | 530 | *hc_ptr - 8, 8); |
534 | } | 531 | } |
535 | 532 | ||
536 | return rol8(val, shift); | 533 | return rol8(val, shift); |
537 | } | 534 | } |
538 | 535 | ||
539 | static void compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb) | 536 | static void compress_udp_header(u8 **hc_ptr, struct sk_buff *skb) |
540 | { | 537 | { |
541 | struct udphdr *uh = udp_hdr(skb); | 538 | struct udphdr *uh = udp_hdr(skb); |
542 | u8 tmp; | 539 | u8 tmp; |
@@ -548,46 +545,46 @@ static void compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb) | |||
548 | pr_debug("UDP header: both ports compression to 4 bits\n"); | 545 | pr_debug("UDP header: both ports compression to 4 bits\n"); |
549 | /* compression value */ | 546 | /* compression value */ |
550 | tmp = LOWPAN_NHC_UDP_CS_P_11; | 547 | tmp = LOWPAN_NHC_UDP_CS_P_11; |
551 | lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp)); | 548 | lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); |
552 | /* source and destination port */ | 549 | /* source and destination port */ |
553 | tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT + | 550 | tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT + |
554 | ((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4); | 551 | ((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4); |
555 | lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp)); | 552 | lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); |
556 | } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) == | 553 | } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) == |
557 | LOWPAN_NHC_UDP_8BIT_PORT) { | 554 | LOWPAN_NHC_UDP_8BIT_PORT) { |
558 | pr_debug("UDP header: remove 8 bits of dest\n"); | 555 | pr_debug("UDP header: remove 8 bits of dest\n"); |
559 | /* compression value */ | 556 | /* compression value */ |
560 | tmp = LOWPAN_NHC_UDP_CS_P_01; | 557 | tmp = LOWPAN_NHC_UDP_CS_P_01; |
561 | lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp)); | 558 | lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); |
562 | /* source port */ | 559 | /* source port */ |
563 | lowpan_push_hc_data(hc06_ptr, &uh->source, sizeof(uh->source)); | 560 | lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source)); |
564 | /* destination port */ | 561 | /* destination port */ |
565 | tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT; | 562 | tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT; |
566 | lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp)); | 563 | lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); |
567 | } else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) == | 564 | } else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) == |
568 | LOWPAN_NHC_UDP_8BIT_PORT) { | 565 | LOWPAN_NHC_UDP_8BIT_PORT) { |
569 | pr_debug("UDP header: remove 8 bits of source\n"); | 566 | pr_debug("UDP header: remove 8 bits of source\n"); |
570 | /* compression value */ | 567 | /* compression value */ |
571 | tmp = LOWPAN_NHC_UDP_CS_P_10; | 568 | tmp = LOWPAN_NHC_UDP_CS_P_10; |
572 | lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp)); | 569 | lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); |
573 | /* source port */ | 570 | /* source port */ |
574 | tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT; | 571 | tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT; |
575 | lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp)); | 572 | lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); |
576 | /* destination port */ | 573 | /* destination port */ |
577 | lowpan_push_hc_data(hc06_ptr, &uh->dest, sizeof(uh->dest)); | 574 | lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest)); |
578 | } else { | 575 | } else { |
579 | pr_debug("UDP header: can't compress\n"); | 576 | pr_debug("UDP header: can't compress\n"); |
580 | /* compression value */ | 577 | /* compression value */ |
581 | tmp = LOWPAN_NHC_UDP_CS_P_00; | 578 | tmp = LOWPAN_NHC_UDP_CS_P_00; |
582 | lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp)); | 579 | lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp)); |
583 | /* source port */ | 580 | /* source port */ |
584 | lowpan_push_hc_data(hc06_ptr, &uh->source, sizeof(uh->source)); | 581 | lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source)); |
585 | /* destination port */ | 582 | /* destination port */ |
586 | lowpan_push_hc_data(hc06_ptr, &uh->dest, sizeof(uh->dest)); | 583 | lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest)); |
587 | } | 584 | } |
588 | 585 | ||
589 | /* checksum is always inline */ | 586 | /* checksum is always inline */ |
590 | lowpan_push_hc_data(hc06_ptr, &uh->check, sizeof(uh->check)); | 587 | lowpan_push_hc_data(hc_ptr, &uh->check, sizeof(uh->check)); |
591 | 588 | ||
592 | /* skip the UDP header */ | 589 | /* skip the UDP header */ |
593 | skb_pull(skb, sizeof(struct udphdr)); | 590 | skb_pull(skb, sizeof(struct udphdr)); |
@@ -597,15 +594,16 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, | |||
597 | unsigned short type, const void *_daddr, | 594 | unsigned short type, const void *_daddr, |
598 | const void *_saddr, unsigned int len) | 595 | const void *_saddr, unsigned int len) |
599 | { | 596 | { |
600 | u8 tmp, iphc0, iphc1, *hc06_ptr; | 597 | u8 tmp, iphc0, iphc1, *hc_ptr; |
601 | struct ipv6hdr *hdr; | 598 | struct ipv6hdr *hdr; |
602 | u8 head[100] = {}; | 599 | u8 head[100] = {}; |
600 | int addr_type; | ||
603 | 601 | ||
604 | if (type != ETH_P_IPV6) | 602 | if (type != ETH_P_IPV6) |
605 | return -EINVAL; | 603 | return -EINVAL; |
606 | 604 | ||
607 | hdr = ipv6_hdr(skb); | 605 | hdr = ipv6_hdr(skb); |
608 | hc06_ptr = head + 2; | 606 | hc_ptr = head + 2; |
609 | 607 | ||
610 | pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n" | 608 | pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n" |
611 | "\tnexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n", | 609 | "\tnexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n", |
@@ -630,8 +628,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, | |||
630 | raw_dump_inline(__func__, "daddr", | 628 | raw_dump_inline(__func__, "daddr", |
631 | (unsigned char *)_daddr, IEEE802154_ADDR_LEN); | 629 | (unsigned char *)_daddr, IEEE802154_ADDR_LEN); |
632 | 630 | ||
633 | raw_dump_table(__func__, | 631 | raw_dump_table(__func__, "sending raw skb network uncompressed packet", |
634 | "sending raw skb network uncompressed packet", | ||
635 | skb->data, skb->len); | 632 | skb->data, skb->len); |
636 | 633 | ||
637 | /* Traffic class, flow label | 634 | /* Traffic class, flow label |
@@ -640,7 +637,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, | |||
640 | * class depends on the presence of version and flow label | 637 | * class depends on the presence of version and flow label |
641 | */ | 638 | */ |
642 | 639 | ||
643 | /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */ | 640 | /* hc format of TC is ECN | DSCP , original one is DSCP | ECN */ |
644 | tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4); | 641 | tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4); |
645 | tmp = ((tmp & 0x03) << 6) | (tmp >> 2); | 642 | tmp = ((tmp & 0x03) << 6) | (tmp >> 2); |
646 | 643 | ||
@@ -654,8 +651,8 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, | |||
654 | iphc0 |= LOWPAN_IPHC_TC_C; | 651 | iphc0 |= LOWPAN_IPHC_TC_C; |
655 | } else { | 652 | } else { |
656 | /* compress only the flow label */ | 653 | /* compress only the flow label */ |
657 | *hc06_ptr = tmp; | 654 | *hc_ptr = tmp; |
658 | hc06_ptr += 1; | 655 | hc_ptr += 1; |
659 | } | 656 | } |
660 | } else { | 657 | } else { |
661 | /* Flow label cannot be compressed */ | 658 | /* Flow label cannot be compressed */ |
@@ -663,15 +660,15 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, | |||
663 | ((hdr->flow_lbl[0] & 0xF0) == 0)) { | 660 | ((hdr->flow_lbl[0] & 0xF0) == 0)) { |
664 | /* compress only traffic class */ | 661 | /* compress only traffic class */ |
665 | iphc0 |= LOWPAN_IPHC_TC_C; | 662 | iphc0 |= LOWPAN_IPHC_TC_C; |
666 | *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F); | 663 | *hc_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F); |
667 | memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2); | 664 | memcpy(hc_ptr + 1, &hdr->flow_lbl[1], 2); |
668 | hc06_ptr += 3; | 665 | hc_ptr += 3; |
669 | } else { | 666 | } else { |
670 | /* compress nothing */ | 667 | /* compress nothing */ |
671 | memcpy(hc06_ptr, hdr, 4); | 668 | memcpy(hc_ptr, hdr, 4); |
672 | /* replace the top byte with new ECN | DSCP format */ | 669 | /* replace the top byte with new ECN | DSCP format */ |
673 | *hc06_ptr = tmp; | 670 | *hc_ptr = tmp; |
674 | hc06_ptr += 4; | 671 | hc_ptr += 4; |
675 | } | 672 | } |
676 | } | 673 | } |
677 | 674 | ||
@@ -681,10 +678,9 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, | |||
681 | if (hdr->nexthdr == UIP_PROTO_UDP) | 678 | if (hdr->nexthdr == UIP_PROTO_UDP) |
682 | iphc0 |= LOWPAN_IPHC_NH_C; | 679 | iphc0 |= LOWPAN_IPHC_NH_C; |
683 | 680 | ||
684 | if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { | 681 | if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) |
685 | *hc06_ptr = hdr->nexthdr; | 682 | lowpan_push_hc_data(&hc_ptr, &hdr->nexthdr, |
686 | hc06_ptr += 1; | 683 | sizeof(hdr->nexthdr)); |
687 | } | ||
688 | 684 | ||
689 | /* Hop limit | 685 | /* Hop limit |
690 | * if 1: compress, encoding is 01 | 686 | * if 1: compress, encoding is 01 |
@@ -703,84 +699,86 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, | |||
703 | iphc0 |= LOWPAN_IPHC_TTL_255; | 699 | iphc0 |= LOWPAN_IPHC_TTL_255; |
704 | break; | 700 | break; |
705 | default: | 701 | default: |
706 | *hc06_ptr = hdr->hop_limit; | 702 | lowpan_push_hc_data(&hc_ptr, &hdr->hop_limit, |
707 | hc06_ptr += 1; | 703 | sizeof(hdr->hop_limit)); |
708 | break; | ||
709 | } | 704 | } |
710 | 705 | ||
706 | addr_type = ipv6_addr_type(&hdr->saddr); | ||
711 | /* source address compression */ | 707 | /* source address compression */ |
712 | if (is_addr_unspecified(&hdr->saddr)) { | 708 | if (addr_type == IPV6_ADDR_ANY) { |
713 | pr_debug("source address is unspecified, setting SAC\n"); | 709 | pr_debug("source address is unspecified, setting SAC\n"); |
714 | iphc1 |= LOWPAN_IPHC_SAC; | 710 | iphc1 |= LOWPAN_IPHC_SAC; |
715 | /* TODO: context lookup */ | ||
716 | } else if (is_addr_link_local(&hdr->saddr)) { | ||
717 | iphc1 |= lowpan_compress_addr_64(&hc06_ptr, | ||
718 | LOWPAN_IPHC_SAM_BIT, &hdr->saddr, _saddr); | ||
719 | pr_debug("source address unicast link-local %pI6c " | ||
720 | "iphc1 0x%02x\n", &hdr->saddr, iphc1); | ||
721 | } else { | 711 | } else { |
722 | pr_debug("send the full source address\n"); | 712 | if (addr_type & IPV6_ADDR_LINKLOCAL) { |
723 | memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16); | 713 | iphc1 |= lowpan_compress_addr_64(&hc_ptr, |
724 | hc06_ptr += 16; | 714 | LOWPAN_IPHC_SAM_BIT, |
715 | &hdr->saddr, _saddr); | ||
716 | pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n", | ||
717 | &hdr->saddr, iphc1); | ||
718 | } else { | ||
719 | pr_debug("send the full source address\n"); | ||
720 | lowpan_push_hc_data(&hc_ptr, hdr->saddr.s6_addr, 16); | ||
721 | } | ||
725 | } | 722 | } |
726 | 723 | ||
724 | addr_type = ipv6_addr_type(&hdr->daddr); | ||
727 | /* destination address compression */ | 725 | /* destination address compression */ |
728 | if (is_addr_mcast(&hdr->daddr)) { | 726 | if (addr_type & IPV6_ADDR_MULTICAST) { |
729 | pr_debug("destination address is multicast: "); | 727 | pr_debug("destination address is multicast: "); |
730 | iphc1 |= LOWPAN_IPHC_M; | 728 | iphc1 |= LOWPAN_IPHC_M; |
731 | if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) { | 729 | if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) { |
732 | pr_debug("compressed to 1 octet\n"); | 730 | pr_debug("compressed to 1 octet\n"); |
733 | iphc1 |= LOWPAN_IPHC_DAM_11; | 731 | iphc1 |= LOWPAN_IPHC_DAM_11; |
734 | /* use last byte */ | 732 | /* use last byte */ |
735 | *hc06_ptr = hdr->daddr.s6_addr[15]; | 733 | lowpan_push_hc_data(&hc_ptr, |
736 | hc06_ptr += 1; | 734 | &hdr->daddr.s6_addr[15], 1); |
737 | } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) { | 735 | } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) { |
738 | pr_debug("compressed to 4 octets\n"); | 736 | pr_debug("compressed to 4 octets\n"); |
739 | iphc1 |= LOWPAN_IPHC_DAM_10; | 737 | iphc1 |= LOWPAN_IPHC_DAM_10; |
740 | /* second byte + the last three */ | 738 | /* second byte + the last three */ |
741 | *hc06_ptr = hdr->daddr.s6_addr[1]; | 739 | lowpan_push_hc_data(&hc_ptr, |
742 | memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3); | 740 | &hdr->daddr.s6_addr[1], 1); |
743 | hc06_ptr += 4; | 741 | lowpan_push_hc_data(&hc_ptr, |
742 | &hdr->daddr.s6_addr[13], 3); | ||
744 | } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) { | 743 | } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) { |
745 | pr_debug("compressed to 6 octets\n"); | 744 | pr_debug("compressed to 6 octets\n"); |
746 | iphc1 |= LOWPAN_IPHC_DAM_01; | 745 | iphc1 |= LOWPAN_IPHC_DAM_01; |
747 | /* second byte + the last five */ | 746 | /* second byte + the last five */ |
748 | *hc06_ptr = hdr->daddr.s6_addr[1]; | 747 | lowpan_push_hc_data(&hc_ptr, |
749 | memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5); | 748 | &hdr->daddr.s6_addr[1], 1); |
750 | hc06_ptr += 6; | 749 | lowpan_push_hc_data(&hc_ptr, |
750 | &hdr->daddr.s6_addr[11], 5); | ||
751 | } else { | 751 | } else { |
752 | pr_debug("using full address\n"); | 752 | pr_debug("using full address\n"); |
753 | iphc1 |= LOWPAN_IPHC_DAM_00; | 753 | iphc1 |= LOWPAN_IPHC_DAM_00; |
754 | memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16); | 754 | lowpan_push_hc_data(&hc_ptr, hdr->daddr.s6_addr, 16); |
755 | hc06_ptr += 16; | ||
756 | } | 755 | } |
757 | } else { | 756 | } else { |
758 | /* TODO: context lookup */ | 757 | if (addr_type & IPV6_ADDR_LINKLOCAL) { |
759 | if (is_addr_link_local(&hdr->daddr)) { | 758 | /* TODO: context lookup */ |
760 | iphc1 |= lowpan_compress_addr_64(&hc06_ptr, | 759 | iphc1 |= lowpan_compress_addr_64(&hc_ptr, |
761 | LOWPAN_IPHC_DAM_BIT, &hdr->daddr, _daddr); | 760 | LOWPAN_IPHC_DAM_BIT, &hdr->daddr, _daddr); |
762 | pr_debug("dest address unicast link-local %pI6c " | 761 | pr_debug("dest address unicast link-local %pI6c " |
763 | "iphc1 0x%02x\n", &hdr->daddr, iphc1); | 762 | "iphc1 0x%02x\n", &hdr->daddr, iphc1); |
764 | } else { | 763 | } else { |
765 | pr_debug("dest address unicast %pI6c\n", &hdr->daddr); | 764 | pr_debug("dest address unicast %pI6c\n", &hdr->daddr); |
766 | memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16); | 765 | lowpan_push_hc_data(&hc_ptr, hdr->daddr.s6_addr, 16); |
767 | hc06_ptr += 16; | ||
768 | } | 766 | } |
769 | } | 767 | } |
770 | 768 | ||
771 | /* UDP header compression */ | 769 | /* UDP header compression */ |
772 | if (hdr->nexthdr == UIP_PROTO_UDP) | 770 | if (hdr->nexthdr == UIP_PROTO_UDP) |
773 | compress_udp_header(&hc06_ptr, skb); | 771 | compress_udp_header(&hc_ptr, skb); |
774 | 772 | ||
775 | head[0] = iphc0; | 773 | head[0] = iphc0; |
776 | head[1] = iphc1; | 774 | head[1] = iphc1; |
777 | 775 | ||
778 | skb_pull(skb, sizeof(struct ipv6hdr)); | 776 | skb_pull(skb, sizeof(struct ipv6hdr)); |
779 | skb_reset_transport_header(skb); | 777 | skb_reset_transport_header(skb); |
780 | memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head); | 778 | memcpy(skb_push(skb, hc_ptr - head), head, hc_ptr - head); |
781 | skb_reset_network_header(skb); | 779 | skb_reset_network_header(skb); |
782 | 780 | ||
783 | pr_debug("header len %d skb %u\n", (int)(hc06_ptr - head), skb->len); | 781 | pr_debug("header len %d skb %u\n", (int)(hc_ptr - head), skb->len); |
784 | 782 | ||
785 | raw_dump_table(__func__, "raw skb data dump compressed", | 783 | raw_dump_table(__func__, "raw skb data dump compressed", |
786 | skb->data, skb->len); | 784 | skb->data, skb->len); |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index cfcb6055ced8..32b96f1aaf42 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -970,6 +970,62 @@ static int adv_channel_map_get(void *data, u64 *val) | |||
970 | DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, | 970 | DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, |
971 | adv_channel_map_set, "%llu\n"); | 971 | adv_channel_map_set, "%llu\n"); |
972 | 972 | ||
973 | static int adv_min_interval_set(void *data, u64 val) | ||
974 | { | ||
975 | struct hci_dev *hdev = data; | ||
976 | |||
977 | if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval) | ||
978 | return -EINVAL; | ||
979 | |||
980 | hci_dev_lock(hdev); | ||
981 | hdev->le_adv_min_interval = val; | ||
982 | hci_dev_unlock(hdev); | ||
983 | |||
984 | return 0; | ||
985 | } | ||
986 | |||
987 | static int adv_min_interval_get(void *data, u64 *val) | ||
988 | { | ||
989 | struct hci_dev *hdev = data; | ||
990 | |||
991 | hci_dev_lock(hdev); | ||
992 | *val = hdev->le_adv_min_interval; | ||
993 | hci_dev_unlock(hdev); | ||
994 | |||
995 | return 0; | ||
996 | } | ||
997 | |||
998 | DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get, | ||
999 | adv_min_interval_set, "%llu\n"); | ||
1000 | |||
1001 | static int adv_max_interval_set(void *data, u64 val) | ||
1002 | { | ||
1003 | struct hci_dev *hdev = data; | ||
1004 | |||
1005 | if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval) | ||
1006 | return -EINVAL; | ||
1007 | |||
1008 | hci_dev_lock(hdev); | ||
1009 | hdev->le_adv_max_interval = val; | ||
1010 | hci_dev_unlock(hdev); | ||
1011 | |||
1012 | return 0; | ||
1013 | } | ||
1014 | |||
1015 | static int adv_max_interval_get(void *data, u64 *val) | ||
1016 | { | ||
1017 | struct hci_dev *hdev = data; | ||
1018 | |||
1019 | hci_dev_lock(hdev); | ||
1020 | *val = hdev->le_adv_max_interval; | ||
1021 | hci_dev_unlock(hdev); | ||
1022 | |||
1023 | return 0; | ||
1024 | } | ||
1025 | |||
1026 | DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get, | ||
1027 | adv_max_interval_set, "%llu\n"); | ||
1028 | |||
973 | static int device_list_show(struct seq_file *f, void *ptr) | 1029 | static int device_list_show(struct seq_file *f, void *ptr) |
974 | { | 1030 | { |
975 | struct hci_dev *hdev = f->private; | 1031 | struct hci_dev *hdev = f->private; |
@@ -1567,7 +1623,7 @@ static void hci_set_le_support(struct hci_request *req) | |||
1567 | 1623 | ||
1568 | if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { | 1624 | if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { |
1569 | cp.le = 0x01; | 1625 | cp.le = 0x01; |
1570 | cp.simul = lmp_le_br_capable(hdev); | 1626 | cp.simul = 0x00; |
1571 | } | 1627 | } |
1572 | 1628 | ||
1573 | if (cp.le != lmp_host_le_capable(hdev)) | 1629 | if (cp.le != lmp_host_le_capable(hdev)) |
@@ -1686,6 +1742,14 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt) | |||
1686 | if (hdev->commands[22] & 0x04) | 1742 | if (hdev->commands[22] & 0x04) |
1687 | hci_set_event_mask_page_2(req); | 1743 | hci_set_event_mask_page_2(req); |
1688 | 1744 | ||
1745 | /* Read local codec list if the HCI command is supported */ | ||
1746 | if (hdev->commands[29] & 0x20) | ||
1747 | hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL); | ||
1748 | |||
1749 | /* Get MWS transport configuration if the HCI command is supported */ | ||
1750 | if (hdev->commands[30] & 0x08) | ||
1751 | hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL); | ||
1752 | |||
1689 | /* Check for Synchronization Train support */ | 1753 | /* Check for Synchronization Train support */ |
1690 | if (lmp_sync_train_capable(hdev)) | 1754 | if (lmp_sync_train_capable(hdev)) |
1691 | hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); | 1755 | hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); |
@@ -1825,6 +1889,10 @@ static int __hci_init(struct hci_dev *hdev) | |||
1825 | hdev, &supervision_timeout_fops); | 1889 | hdev, &supervision_timeout_fops); |
1826 | debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, | 1890 | debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, |
1827 | hdev, &adv_channel_map_fops); | 1891 | hdev, &adv_channel_map_fops); |
1892 | debugfs_create_file("adv_min_interval", 0644, hdev->debugfs, | ||
1893 | hdev, &adv_min_interval_fops); | ||
1894 | debugfs_create_file("adv_max_interval", 0644, hdev->debugfs, | ||
1895 | hdev, &adv_max_interval_fops); | ||
1828 | debugfs_create_file("device_list", 0444, hdev->debugfs, hdev, | 1896 | debugfs_create_file("device_list", 0444, hdev->debugfs, hdev, |
1829 | &device_list_fops); | 1897 | &device_list_fops); |
1830 | debugfs_create_u16("discov_interleaved_timeout", 0644, | 1898 | debugfs_create_u16("discov_interleaved_timeout", 0644, |
@@ -2453,14 +2521,14 @@ int hci_dev_open(__u16 dev) | |||
2453 | flush_workqueue(hdev->req_workqueue); | 2521 | flush_workqueue(hdev->req_workqueue); |
2454 | 2522 | ||
2455 | /* For controllers not using the management interface and that | 2523 | /* For controllers not using the management interface and that |
2456 | * are brought up using legacy ioctl, set the HCI_PAIRABLE bit | 2524 | * are brought up using legacy ioctl, set the HCI_BONDABLE bit |
2457 | * so that pairing works for them. Once the management interface | 2525 | * so that pairing works for them. Once the management interface |
2458 | * is in use this bit will be cleared again and userspace has | 2526 | * is in use this bit will be cleared again and userspace has |
2459 | * to explicitly enable it. | 2527 | * to explicitly enable it. |
2460 | */ | 2528 | */ |
2461 | if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && | 2529 | if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && |
2462 | !test_bit(HCI_MGMT, &hdev->dev_flags)) | 2530 | !test_bit(HCI_MGMT, &hdev->dev_flags)) |
2463 | set_bit(HCI_PAIRABLE, &hdev->dev_flags); | 2531 | set_bit(HCI_BONDABLE, &hdev->dev_flags); |
2464 | 2532 | ||
2465 | err = hci_dev_do_open(hdev); | 2533 | err = hci_dev_do_open(hdev); |
2466 | 2534 | ||
@@ -3639,6 +3707,7 @@ int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, | |||
3639 | list_add(¶ms->action, &hdev->pend_le_reports); | 3707 | list_add(¶ms->action, &hdev->pend_le_reports); |
3640 | hci_update_background_scan(hdev); | 3708 | hci_update_background_scan(hdev); |
3641 | break; | 3709 | break; |
3710 | case HCI_AUTO_CONN_DIRECT: | ||
3642 | case HCI_AUTO_CONN_ALWAYS: | 3711 | case HCI_AUTO_CONN_ALWAYS: |
3643 | if (!is_connected(hdev, addr, addr_type)) { | 3712 | if (!is_connected(hdev, addr, addr_type)) { |
3644 | list_add(¶ms->action, &hdev->pend_le_conns); | 3713 | list_add(¶ms->action, &hdev->pend_le_conns); |
@@ -3914,6 +3983,8 @@ struct hci_dev *hci_alloc_dev(void) | |||
3914 | hdev->sniff_min_interval = 80; | 3983 | hdev->sniff_min_interval = 80; |
3915 | 3984 | ||
3916 | hdev->le_adv_channel_map = 0x07; | 3985 | hdev->le_adv_channel_map = 0x07; |
3986 | hdev->le_adv_min_interval = 0x0800; | ||
3987 | hdev->le_adv_max_interval = 0x0800; | ||
3917 | hdev->le_scan_interval = 0x0060; | 3988 | hdev->le_scan_interval = 0x0060; |
3918 | hdev->le_scan_window = 0x0030; | 3989 | hdev->le_scan_window = 0x0030; |
3919 | hdev->le_conn_min_interval = 0x0028; | 3990 | hdev->le_conn_min_interval = 0x0028; |
@@ -5397,12 +5468,113 @@ void hci_req_add_le_scan_disable(struct hci_request *req) | |||
5397 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | 5468 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); |
5398 | } | 5469 | } |
5399 | 5470 | ||
5471 | static void add_to_white_list(struct hci_request *req, | ||
5472 | struct hci_conn_params *params) | ||
5473 | { | ||
5474 | struct hci_cp_le_add_to_white_list cp; | ||
5475 | |||
5476 | cp.bdaddr_type = params->addr_type; | ||
5477 | bacpy(&cp.bdaddr, ¶ms->addr); | ||
5478 | |||
5479 | hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); | ||
5480 | } | ||
5481 | |||
5482 | static u8 update_white_list(struct hci_request *req) | ||
5483 | { | ||
5484 | struct hci_dev *hdev = req->hdev; | ||
5485 | struct hci_conn_params *params; | ||
5486 | struct bdaddr_list *b; | ||
5487 | uint8_t white_list_entries = 0; | ||
5488 | |||
5489 | /* Go through the current white list programmed into the | ||
5490 | * controller one by one and check if that address is still | ||
5491 | * in the list of pending connections or list of devices to | ||
5492 | * report. If not present in either list, then queue the | ||
5493 | * command to remove it from the controller. | ||
5494 | */ | ||
5495 | list_for_each_entry(b, &hdev->le_white_list, list) { | ||
5496 | struct hci_cp_le_del_from_white_list cp; | ||
5497 | |||
5498 | if (hci_pend_le_action_lookup(&hdev->pend_le_conns, | ||
5499 | &b->bdaddr, b->bdaddr_type) || | ||
5500 | hci_pend_le_action_lookup(&hdev->pend_le_reports, | ||
5501 | &b->bdaddr, b->bdaddr_type)) { | ||
5502 | white_list_entries++; | ||
5503 | continue; | ||
5504 | } | ||
5505 | |||
5506 | cp.bdaddr_type = b->bdaddr_type; | ||
5507 | bacpy(&cp.bdaddr, &b->bdaddr); | ||
5508 | |||
5509 | hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, | ||
5510 | sizeof(cp), &cp); | ||
5511 | } | ||
5512 | |||
5513 | /* Since all no longer valid white list entries have been | ||
5514 | * removed, walk through the list of pending connections | ||
5515 | * and ensure that any new device gets programmed into | ||
5516 | * the controller. | ||
5517 | * | ||
5518 | * If the list of the devices is larger than the list of | ||
5519 | * available white list entries in the controller, then | ||
5520 | * just abort and return filer policy value to not use the | ||
5521 | * white list. | ||
5522 | */ | ||
5523 | list_for_each_entry(params, &hdev->pend_le_conns, action) { | ||
5524 | if (hci_bdaddr_list_lookup(&hdev->le_white_list, | ||
5525 | ¶ms->addr, params->addr_type)) | ||
5526 | continue; | ||
5527 | |||
5528 | if (white_list_entries >= hdev->le_white_list_size) { | ||
5529 | /* Select filter policy to accept all advertising */ | ||
5530 | return 0x00; | ||
5531 | } | ||
5532 | |||
5533 | if (hci_find_irk_by_addr(hdev, ¶ms->addr, | ||
5534 | params->addr_type)) { | ||
5535 | /* White list can not be used with RPAs */ | ||
5536 | return 0x00; | ||
5537 | } | ||
5538 | |||
5539 | white_list_entries++; | ||
5540 | add_to_white_list(req, params); | ||
5541 | } | ||
5542 | |||
5543 | /* After adding all new pending connections, walk through | ||
5544 | * the list of pending reports and also add these to the | ||
5545 | * white list if there is still space. | ||
5546 | */ | ||
5547 | list_for_each_entry(params, &hdev->pend_le_reports, action) { | ||
5548 | if (hci_bdaddr_list_lookup(&hdev->le_white_list, | ||
5549 | ¶ms->addr, params->addr_type)) | ||
5550 | continue; | ||
5551 | |||
5552 | if (white_list_entries >= hdev->le_white_list_size) { | ||
5553 | /* Select filter policy to accept all advertising */ | ||
5554 | return 0x00; | ||
5555 | } | ||
5556 | |||
5557 | if (hci_find_irk_by_addr(hdev, ¶ms->addr, | ||
5558 | params->addr_type)) { | ||
5559 | /* White list can not be used with RPAs */ | ||
5560 | return 0x00; | ||
5561 | } | ||
5562 | |||
5563 | white_list_entries++; | ||
5564 | add_to_white_list(req, params); | ||
5565 | } | ||
5566 | |||
5567 | /* Select filter policy to use white list */ | ||
5568 | return 0x01; | ||
5569 | } | ||
5570 | |||
5400 | void hci_req_add_le_passive_scan(struct hci_request *req) | 5571 | void hci_req_add_le_passive_scan(struct hci_request *req) |
5401 | { | 5572 | { |
5402 | struct hci_cp_le_set_scan_param param_cp; | 5573 | struct hci_cp_le_set_scan_param param_cp; |
5403 | struct hci_cp_le_set_scan_enable enable_cp; | 5574 | struct hci_cp_le_set_scan_enable enable_cp; |
5404 | struct hci_dev *hdev = req->hdev; | 5575 | struct hci_dev *hdev = req->hdev; |
5405 | u8 own_addr_type; | 5576 | u8 own_addr_type; |
5577 | u8 filter_policy; | ||
5406 | 5578 | ||
5407 | /* Set require_privacy to false since no SCAN_REQ are send | 5579 | /* Set require_privacy to false since no SCAN_REQ are send |
5408 | * during passive scanning. Not using an unresolvable address | 5580 | * during passive scanning. Not using an unresolvable address |
@@ -5413,11 +5585,18 @@ void hci_req_add_le_passive_scan(struct hci_request *req) | |||
5413 | if (hci_update_random_address(req, false, &own_addr_type)) | 5585 | if (hci_update_random_address(req, false, &own_addr_type)) |
5414 | return; | 5586 | return; |
5415 | 5587 | ||
5588 | /* Adding or removing entries from the white list must | ||
5589 | * happen before enabling scanning. The controller does | ||
5590 | * not allow white list modification while scanning. | ||
5591 | */ | ||
5592 | filter_policy = update_white_list(req); | ||
5593 | |||
5416 | memset(¶m_cp, 0, sizeof(param_cp)); | 5594 | memset(¶m_cp, 0, sizeof(param_cp)); |
5417 | param_cp.type = LE_SCAN_PASSIVE; | 5595 | param_cp.type = LE_SCAN_PASSIVE; |
5418 | param_cp.interval = cpu_to_le16(hdev->le_scan_interval); | 5596 | param_cp.interval = cpu_to_le16(hdev->le_scan_interval); |
5419 | param_cp.window = cpu_to_le16(hdev->le_scan_window); | 5597 | param_cp.window = cpu_to_le16(hdev->le_scan_window); |
5420 | param_cp.own_address_type = own_addr_type; | 5598 | param_cp.own_address_type = own_addr_type; |
5599 | param_cp.filter_policy = filter_policy; | ||
5421 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), | 5600 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), |
5422 | ¶m_cp); | 5601 | ¶m_cp); |
5423 | 5602 | ||
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 4c41774aa556..be35598984d9 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -317,7 +317,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) | |||
317 | if (param & SCAN_PAGE) | 317 | if (param & SCAN_PAGE) |
318 | set_bit(HCI_PSCAN, &hdev->flags); | 318 | set_bit(HCI_PSCAN, &hdev->flags); |
319 | else | 319 | else |
320 | clear_bit(HCI_ISCAN, &hdev->flags); | 320 | clear_bit(HCI_PSCAN, &hdev->flags); |
321 | 321 | ||
322 | done: | 322 | done: |
323 | hci_dev_unlock(hdev); | 323 | hci_dev_unlock(hdev); |
@@ -2259,6 +2259,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2259 | break; | 2259 | break; |
2260 | /* Fall through */ | 2260 | /* Fall through */ |
2261 | 2261 | ||
2262 | case HCI_AUTO_CONN_DIRECT: | ||
2262 | case HCI_AUTO_CONN_ALWAYS: | 2263 | case HCI_AUTO_CONN_ALWAYS: |
2263 | list_del_init(¶ms->action); | 2264 | list_del_init(¶ms->action); |
2264 | list_add(¶ms->action, &hdev->pend_le_conns); | 2265 | list_add(¶ms->action, &hdev->pend_le_conns); |
@@ -3118,7 +3119,7 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
3118 | hci_conn_drop(conn); | 3119 | hci_conn_drop(conn); |
3119 | } | 3120 | } |
3120 | 3121 | ||
3121 | if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags) && | 3122 | if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) && |
3122 | !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { | 3123 | !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { |
3123 | hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, | 3124 | hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, |
3124 | sizeof(ev->bdaddr), &ev->bdaddr); | 3125 | sizeof(ev->bdaddr), &ev->bdaddr); |
@@ -3651,7 +3652,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
3651 | /* Allow pairing if we're pairable, the initiators of the | 3652 | /* Allow pairing if we're pairable, the initiators of the |
3652 | * pairing or if the remote is not requesting bonding. | 3653 | * pairing or if the remote is not requesting bonding. |
3653 | */ | 3654 | */ |
3654 | if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || | 3655 | if (test_bit(HCI_BONDABLE, &hdev->dev_flags) || |
3655 | test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || | 3656 | test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || |
3656 | (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { | 3657 | (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { |
3657 | struct hci_cp_io_capability_reply cp; | 3658 | struct hci_cp_io_capability_reply cp; |
@@ -3670,13 +3671,18 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
3670 | if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && | 3671 | if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && |
3671 | conn->auth_type != HCI_AT_NO_BONDING) | 3672 | conn->auth_type != HCI_AT_NO_BONDING) |
3672 | conn->auth_type |= 0x01; | 3673 | conn->auth_type |= 0x01; |
3673 | |||
3674 | cp.authentication = conn->auth_type; | ||
3675 | } else { | 3674 | } else { |
3676 | conn->auth_type = hci_get_auth_req(conn); | 3675 | conn->auth_type = hci_get_auth_req(conn); |
3677 | cp.authentication = conn->auth_type; | ||
3678 | } | 3676 | } |
3679 | 3677 | ||
3678 | /* If we're not bondable, force one of the non-bondable | ||
3679 | * authentication requirement values. | ||
3680 | */ | ||
3681 | if (!test_bit(HCI_BONDABLE, &hdev->dev_flags)) | ||
3682 | conn->auth_type &= HCI_AT_NO_BONDING_MITM; | ||
3683 | |||
3684 | cp.authentication = conn->auth_type; | ||
3685 | |||
3680 | if (hci_find_remote_oob_data(hdev, &conn->dst) && | 3686 | if (hci_find_remote_oob_data(hdev, &conn->dst) && |
3681 | (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) | 3687 | (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) |
3682 | cp.oob_data = 0x01; | 3688 | cp.oob_data = 0x01; |
@@ -4251,6 +4257,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, | |||
4251 | u8 addr_type, u8 adv_type) | 4257 | u8 addr_type, u8 adv_type) |
4252 | { | 4258 | { |
4253 | struct hci_conn *conn; | 4259 | struct hci_conn *conn; |
4260 | struct hci_conn_params *params; | ||
4254 | 4261 | ||
4255 | /* If the event is not connectable don't proceed further */ | 4262 | /* If the event is not connectable don't proceed further */ |
4256 | if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) | 4263 | if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) |
@@ -4266,18 +4273,35 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, | |||
4266 | if (hdev->conn_hash.le_num_slave > 0) | 4273 | if (hdev->conn_hash.le_num_slave > 0) |
4267 | return; | 4274 | return; |
4268 | 4275 | ||
4269 | /* If we're connectable, always connect any ADV_DIRECT_IND event */ | ||
4270 | if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) && | ||
4271 | adv_type == LE_ADV_DIRECT_IND) | ||
4272 | goto connect; | ||
4273 | |||
4274 | /* If we're not connectable only connect devices that we have in | 4276 | /* If we're not connectable only connect devices that we have in |
4275 | * our pend_le_conns list. | 4277 | * our pend_le_conns list. |
4276 | */ | 4278 | */ |
4277 | if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type)) | 4279 | params = hci_pend_le_action_lookup(&hdev->pend_le_conns, |
4280 | addr, addr_type); | ||
4281 | if (!params) | ||
4282 | return; | ||
4283 | |||
4284 | switch (params->auto_connect) { | ||
4285 | case HCI_AUTO_CONN_DIRECT: | ||
4286 | /* Only devices advertising with ADV_DIRECT_IND are | ||
4287 | * triggering a connection attempt. This is allowing | ||
4288 | * incoming connections from slave devices. | ||
4289 | */ | ||
4290 | if (adv_type != LE_ADV_DIRECT_IND) | ||
4291 | return; | ||
4292 | break; | ||
4293 | case HCI_AUTO_CONN_ALWAYS: | ||
4294 | /* Devices advertising with ADV_IND or ADV_DIRECT_IND | ||
4295 | * are triggering a connection attempt. This means | ||
4296 | * that incoming connectioms from slave device are | ||
4297 | * accepted and also outgoing connections to slave | ||
4298 | * devices are established when found. | ||
4299 | */ | ||
4300 | break; | ||
4301 | default: | ||
4278 | return; | 4302 | return; |
4303 | } | ||
4279 | 4304 | ||
4280 | connect: | ||
4281 | conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, | 4305 | conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, |
4282 | HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER); | 4306 | HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER); |
4283 | if (!IS_ERR(conn)) | 4307 | if (!IS_ERR(conn)) |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 8181ea4bc2f2..6c7ecf116e74 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -154,7 +154,7 @@ static int hidp_input_event(struct input_dev *dev, unsigned int type, | |||
154 | (!!test_bit(LED_COMPOSE, dev->led) << 3) | | 154 | (!!test_bit(LED_COMPOSE, dev->led) << 3) | |
155 | (!!test_bit(LED_SCROLLL, dev->led) << 2) | | 155 | (!!test_bit(LED_SCROLLL, dev->led) << 2) | |
156 | (!!test_bit(LED_CAPSL, dev->led) << 1) | | 156 | (!!test_bit(LED_CAPSL, dev->led) << 1) | |
157 | (!!test_bit(LED_NUML, dev->led)); | 157 | (!!test_bit(LED_NUML, dev->led) << 0); |
158 | 158 | ||
159 | if (session->leds == newleds) | 159 | if (session->leds == newleds) |
160 | return 0; | 160 | return 0; |
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 190668367e42..b8554d429d88 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c | |||
@@ -44,7 +44,7 @@ static const u16 mgmt_commands[] = { | |||
44 | MGMT_OP_SET_DISCOVERABLE, | 44 | MGMT_OP_SET_DISCOVERABLE, |
45 | MGMT_OP_SET_CONNECTABLE, | 45 | MGMT_OP_SET_CONNECTABLE, |
46 | MGMT_OP_SET_FAST_CONNECTABLE, | 46 | MGMT_OP_SET_FAST_CONNECTABLE, |
47 | MGMT_OP_SET_PAIRABLE, | 47 | MGMT_OP_SET_BONDABLE, |
48 | MGMT_OP_SET_LINK_SECURITY, | 48 | MGMT_OP_SET_LINK_SECURITY, |
49 | MGMT_OP_SET_SSP, | 49 | MGMT_OP_SET_SSP, |
50 | MGMT_OP_SET_HS, | 50 | MGMT_OP_SET_HS, |
@@ -553,7 +553,7 @@ static u32 get_supported_settings(struct hci_dev *hdev) | |||
553 | u32 settings = 0; | 553 | u32 settings = 0; |
554 | 554 | ||
555 | settings |= MGMT_SETTING_POWERED; | 555 | settings |= MGMT_SETTING_POWERED; |
556 | settings |= MGMT_SETTING_PAIRABLE; | 556 | settings |= MGMT_SETTING_BONDABLE; |
557 | settings |= MGMT_SETTING_DEBUG_KEYS; | 557 | settings |= MGMT_SETTING_DEBUG_KEYS; |
558 | settings |= MGMT_SETTING_CONNECTABLE; | 558 | settings |= MGMT_SETTING_CONNECTABLE; |
559 | settings |= MGMT_SETTING_DISCOVERABLE; | 559 | settings |= MGMT_SETTING_DISCOVERABLE; |
@@ -603,8 +603,8 @@ static u32 get_current_settings(struct hci_dev *hdev) | |||
603 | if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) | 603 | if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) |
604 | settings |= MGMT_SETTING_DISCOVERABLE; | 604 | settings |= MGMT_SETTING_DISCOVERABLE; |
605 | 605 | ||
606 | if (test_bit(HCI_PAIRABLE, &hdev->dev_flags)) | 606 | if (test_bit(HCI_BONDABLE, &hdev->dev_flags)) |
607 | settings |= MGMT_SETTING_PAIRABLE; | 607 | settings |= MGMT_SETTING_BONDABLE; |
608 | 608 | ||
609 | if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) | 609 | if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) |
610 | settings |= MGMT_SETTING_BREDR; | 610 | settings |= MGMT_SETTING_BREDR; |
@@ -1086,8 +1086,8 @@ static void enable_advertising(struct hci_request *req) | |||
1086 | return; | 1086 | return; |
1087 | 1087 | ||
1088 | memset(&cp, 0, sizeof(cp)); | 1088 | memset(&cp, 0, sizeof(cp)); |
1089 | cp.min_interval = cpu_to_le16(0x0800); | 1089 | cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval); |
1090 | cp.max_interval = cpu_to_le16(0x0800); | 1090 | cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval); |
1091 | cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND; | 1091 | cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND; |
1092 | cp.own_address_type = own_addr_type; | 1092 | cp.own_address_type = own_addr_type; |
1093 | cp.channel_map = hdev->le_adv_channel_map; | 1093 | cp.channel_map = hdev->le_adv_channel_map; |
@@ -1152,7 +1152,7 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) | |||
1152 | * for mgmt we require user-space to explicitly enable | 1152 | * for mgmt we require user-space to explicitly enable |
1153 | * it | 1153 | * it |
1154 | */ | 1154 | */ |
1155 | clear_bit(HCI_PAIRABLE, &hdev->dev_flags); | 1155 | clear_bit(HCI_BONDABLE, &hdev->dev_flags); |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | static int read_controller_info(struct sock *sk, struct hci_dev *hdev, | 1158 | static int read_controller_info(struct sock *sk, struct hci_dev *hdev, |
@@ -1881,7 +1881,18 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1881 | if (cp->val) { | 1881 | if (cp->val) { |
1882 | scan = SCAN_PAGE; | 1882 | scan = SCAN_PAGE; |
1883 | } else { | 1883 | } else { |
1884 | scan = 0; | 1884 | /* If we don't have any whitelist entries just |
1885 | * disable all scanning. If there are entries | ||
1886 | * and we had both page and inquiry scanning | ||
1887 | * enabled then fall back to only page scanning. | ||
1888 | * Otherwise no changes are needed. | ||
1889 | */ | ||
1890 | if (list_empty(&hdev->whitelist)) | ||
1891 | scan = SCAN_DISABLED; | ||
1892 | else if (test_bit(HCI_ISCAN, &hdev->flags)) | ||
1893 | scan = SCAN_PAGE; | ||
1894 | else | ||
1895 | goto no_scan_update; | ||
1885 | 1896 | ||
1886 | if (test_bit(HCI_ISCAN, &hdev->flags) && | 1897 | if (test_bit(HCI_ISCAN, &hdev->flags) && |
1887 | hdev->discov_timeout > 0) | 1898 | hdev->discov_timeout > 0) |
@@ -1891,6 +1902,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1891 | hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | 1902 | hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); |
1892 | } | 1903 | } |
1893 | 1904 | ||
1905 | no_scan_update: | ||
1894 | /* If we're going from non-connectable to connectable or | 1906 | /* If we're going from non-connectable to connectable or |
1895 | * vice-versa when fast connectable is enabled ensure that fast | 1907 | * vice-versa when fast connectable is enabled ensure that fast |
1896 | * connectable gets disabled. write_fast_connectable won't do | 1908 | * connectable gets disabled. write_fast_connectable won't do |
@@ -1918,7 +1930,7 @@ failed: | |||
1918 | return err; | 1930 | return err; |
1919 | } | 1931 | } |
1920 | 1932 | ||
1921 | static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data, | 1933 | static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data, |
1922 | u16 len) | 1934 | u16 len) |
1923 | { | 1935 | { |
1924 | struct mgmt_mode *cp = data; | 1936 | struct mgmt_mode *cp = data; |
@@ -1928,17 +1940,17 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1928 | BT_DBG("request for %s", hdev->name); | 1940 | BT_DBG("request for %s", hdev->name); |
1929 | 1941 | ||
1930 | if (cp->val != 0x00 && cp->val != 0x01) | 1942 | if (cp->val != 0x00 && cp->val != 0x01) |
1931 | return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE, | 1943 | return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE, |
1932 | MGMT_STATUS_INVALID_PARAMS); | 1944 | MGMT_STATUS_INVALID_PARAMS); |
1933 | 1945 | ||
1934 | hci_dev_lock(hdev); | 1946 | hci_dev_lock(hdev); |
1935 | 1947 | ||
1936 | if (cp->val) | 1948 | if (cp->val) |
1937 | changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags); | 1949 | changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags); |
1938 | else | 1950 | else |
1939 | changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags); | 1951 | changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags); |
1940 | 1952 | ||
1941 | err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev); | 1953 | err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev); |
1942 | if (err < 0) | 1954 | if (err < 0) |
1943 | goto unlock; | 1955 | goto unlock; |
1944 | 1956 | ||
@@ -2264,7 +2276,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) | |||
2264 | 2276 | ||
2265 | if (val) { | 2277 | if (val) { |
2266 | hci_cp.le = val; | 2278 | hci_cp.le = val; |
2267 | hci_cp.simul = lmp_le_br_capable(hdev); | 2279 | hci_cp.simul = 0x00; |
2268 | } else { | 2280 | } else { |
2269 | if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) | 2281 | if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) |
2270 | disable_advertising(&req); | 2282 | disable_advertising(&req); |
@@ -3201,7 +3213,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, | |||
3201 | conn->io_capability = cp->io_cap; | 3213 | conn->io_capability = cp->io_cap; |
3202 | cmd->user_data = conn; | 3214 | cmd->user_data = conn; |
3203 | 3215 | ||
3204 | if (conn->state == BT_CONNECTED && | 3216 | if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) && |
3205 | hci_conn_security(conn, sec_level, auth_type, true)) | 3217 | hci_conn_security(conn, sec_level, auth_type, true)) |
3206 | pairing_complete(cmd, 0); | 3218 | pairing_complete(cmd, 0); |
3207 | 3219 | ||
@@ -5271,7 +5283,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, | |||
5271 | MGMT_STATUS_INVALID_PARAMS, | 5283 | MGMT_STATUS_INVALID_PARAMS, |
5272 | &cp->addr, sizeof(cp->addr)); | 5284 | &cp->addr, sizeof(cp->addr)); |
5273 | 5285 | ||
5274 | if (cp->action != 0x00 && cp->action != 0x01) | 5286 | if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02) |
5275 | return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, | 5287 | return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, |
5276 | MGMT_STATUS_INVALID_PARAMS, | 5288 | MGMT_STATUS_INVALID_PARAMS, |
5277 | &cp->addr, sizeof(cp->addr)); | 5289 | &cp->addr, sizeof(cp->addr)); |
@@ -5281,7 +5293,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, | |||
5281 | if (cp->addr.type == BDADDR_BREDR) { | 5293 | if (cp->addr.type == BDADDR_BREDR) { |
5282 | bool update_scan; | 5294 | bool update_scan; |
5283 | 5295 | ||
5284 | /* Only "connect" action supported for now */ | 5296 | /* Only incoming connections action is supported for now */ |
5285 | if (cp->action != 0x01) { | 5297 | if (cp->action != 0x01) { |
5286 | err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, | 5298 | err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, |
5287 | MGMT_STATUS_INVALID_PARAMS, | 5299 | MGMT_STATUS_INVALID_PARAMS, |
@@ -5307,8 +5319,10 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, | |||
5307 | else | 5319 | else |
5308 | addr_type = ADDR_LE_DEV_RANDOM; | 5320 | addr_type = ADDR_LE_DEV_RANDOM; |
5309 | 5321 | ||
5310 | if (cp->action) | 5322 | if (cp->action == 0x02) |
5311 | auto_conn = HCI_AUTO_CONN_ALWAYS; | 5323 | auto_conn = HCI_AUTO_CONN_ALWAYS; |
5324 | else if (cp->action == 0x01) | ||
5325 | auto_conn = HCI_AUTO_CONN_DIRECT; | ||
5312 | else | 5326 | else |
5313 | auto_conn = HCI_AUTO_CONN_REPORT; | 5327 | auto_conn = HCI_AUTO_CONN_REPORT; |
5314 | 5328 | ||
@@ -5665,7 +5679,7 @@ static const struct mgmt_handler { | |||
5665 | { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE }, | 5679 | { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE }, |
5666 | { set_connectable, false, MGMT_SETTING_SIZE }, | 5680 | { set_connectable, false, MGMT_SETTING_SIZE }, |
5667 | { set_fast_connectable, false, MGMT_SETTING_SIZE }, | 5681 | { set_fast_connectable, false, MGMT_SETTING_SIZE }, |
5668 | { set_pairable, false, MGMT_SETTING_SIZE }, | 5682 | { set_bondable, false, MGMT_SETTING_SIZE }, |
5669 | { set_link_security, false, MGMT_SETTING_SIZE }, | 5683 | { set_link_security, false, MGMT_SETTING_SIZE }, |
5670 | { set_ssp, false, MGMT_SETTING_SIZE }, | 5684 | { set_ssp, false, MGMT_SETTING_SIZE }, |
5671 | { set_hs, false, MGMT_SETTING_SIZE }, | 5685 | { set_hs, false, MGMT_SETTING_SIZE }, |
@@ -5870,6 +5884,7 @@ static void restart_le_actions(struct hci_dev *hdev) | |||
5870 | list_del_init(&p->action); | 5884 | list_del_init(&p->action); |
5871 | 5885 | ||
5872 | switch (p->auto_connect) { | 5886 | switch (p->auto_connect) { |
5887 | case HCI_AUTO_CONN_DIRECT: | ||
5873 | case HCI_AUTO_CONN_ALWAYS: | 5888 | case HCI_AUTO_CONN_ALWAYS: |
5874 | list_add(&p->action, &hdev->pend_le_conns); | 5889 | list_add(&p->action, &hdev->pend_le_conns); |
5875 | break; | 5890 | break; |
@@ -5922,8 +5937,8 @@ static int powered_update_hci(struct hci_dev *hdev) | |||
5922 | lmp_bredr_capable(hdev)) { | 5937 | lmp_bredr_capable(hdev)) { |
5923 | struct hci_cp_write_le_host_supported cp; | 5938 | struct hci_cp_write_le_host_supported cp; |
5924 | 5939 | ||
5925 | cp.le = 1; | 5940 | cp.le = 0x01; |
5926 | cp.simul = lmp_le_br_capable(hdev); | 5941 | cp.simul = 0x00; |
5927 | 5942 | ||
5928 | /* Check first if we already have the right | 5943 | /* Check first if we already have the right |
5929 | * host state (host features set) | 5944 | * host state (host features set) |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index a0690a84f3e9..af73bc3acb40 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -1910,10 +1910,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s) | |||
1910 | /* Get data directly from socket receive queue without copying it. */ | 1910 | /* Get data directly from socket receive queue without copying it. */ |
1911 | while ((skb = skb_dequeue(&sk->sk_receive_queue))) { | 1911 | while ((skb = skb_dequeue(&sk->sk_receive_queue))) { |
1912 | skb_orphan(skb); | 1912 | skb_orphan(skb); |
1913 | if (!skb_linearize(skb)) | 1913 | if (!skb_linearize(skb)) { |
1914 | s = rfcomm_recv_frame(s, skb); | 1914 | s = rfcomm_recv_frame(s, skb); |
1915 | else | 1915 | if (!s) |
1916 | break; | ||
1917 | } else { | ||
1916 | kfree_skb(skb); | 1918 | kfree_skb(skb); |
1919 | } | ||
1917 | } | 1920 | } |
1918 | 1921 | ||
1919 | if (s && (sk->sk_state == BT_CLOSED)) | 1922 | if (s && (sk->sk_state == BT_CLOSED)) |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index e49c83d8b957..fd3294300803 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -307,7 +307,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, | |||
307 | struct hci_dev *hdev = hcon->hdev; | 307 | struct hci_dev *hdev = hcon->hdev; |
308 | u8 local_dist = 0, remote_dist = 0; | 308 | u8 local_dist = 0, remote_dist = 0; |
309 | 309 | ||
310 | if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) { | 310 | if (test_bit(HCI_BONDABLE, &conn->hcon->hdev->dev_flags)) { |
311 | local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; | 311 | local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; |
312 | remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; | 312 | remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; |
313 | authreq |= SMP_AUTH_BONDING; | 313 | authreq |= SMP_AUTH_BONDING; |
@@ -579,13 +579,16 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) | |||
579 | struct smp_chan *smp; | 579 | struct smp_chan *smp; |
580 | 580 | ||
581 | smp = kzalloc(sizeof(*smp), GFP_ATOMIC); | 581 | smp = kzalloc(sizeof(*smp), GFP_ATOMIC); |
582 | if (!smp) | 582 | if (!smp) { |
583 | clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags); | ||
583 | return NULL; | 584 | return NULL; |
585 | } | ||
584 | 586 | ||
585 | smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); | 587 | smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); |
586 | if (IS_ERR(smp->tfm_aes)) { | 588 | if (IS_ERR(smp->tfm_aes)) { |
587 | BT_ERR("Unable to create ECB crypto context"); | 589 | BT_ERR("Unable to create ECB crypto context"); |
588 | kfree(smp); | 590 | kfree(smp); |
591 | clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags); | ||
589 | return NULL; | 592 | return NULL; |
590 | } | 593 | } |
591 | 594 | ||
@@ -701,7 +704,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
701 | if (!smp) | 704 | if (!smp) |
702 | return SMP_UNSPECIFIED; | 705 | return SMP_UNSPECIFIED; |
703 | 706 | ||
704 | if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags) && | 707 | if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) && |
705 | (req->auth_req & SMP_AUTH_BONDING)) | 708 | (req->auth_req & SMP_AUTH_BONDING)) |
706 | return SMP_PAIRING_NOTSUPP; | 709 | return SMP_PAIRING_NOTSUPP; |
707 | 710 | ||
@@ -923,14 +926,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
923 | if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) | 926 | if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) |
924 | return 0; | 927 | return 0; |
925 | 928 | ||
926 | if (!test_bit(HCI_PAIRABLE, &hcon->hdev->dev_flags) && | ||
927 | (rp->auth_req & SMP_AUTH_BONDING)) | ||
928 | return SMP_PAIRING_NOTSUPP; | ||
929 | |||
930 | smp = smp_chan_create(conn); | 929 | smp = smp_chan_create(conn); |
931 | if (!smp) | 930 | if (!smp) |
932 | return SMP_UNSPECIFIED; | 931 | return SMP_UNSPECIFIED; |
933 | 932 | ||
933 | if (!test_bit(HCI_BONDABLE, &hcon->hdev->dev_flags) && | ||
934 | (rp->auth_req & SMP_AUTH_BONDING)) | ||
935 | return SMP_PAIRING_NOTSUPP; | ||
936 | |||
934 | skb_pull(skb, sizeof(*rp)); | 937 | skb_pull(skb, sizeof(*rp)); |
935 | 938 | ||
936 | memset(&cp, 0, sizeof(cp)); | 939 | memset(&cp, 0, sizeof(cp)); |
@@ -1291,6 +1294,22 @@ static void smp_notify_keys(struct l2cap_conn *conn) | |||
1291 | bacpy(&hcon->dst, &smp->remote_irk->bdaddr); | 1294 | bacpy(&hcon->dst, &smp->remote_irk->bdaddr); |
1292 | hcon->dst_type = smp->remote_irk->addr_type; | 1295 | hcon->dst_type = smp->remote_irk->addr_type; |
1293 | l2cap_conn_update_id_addr(hcon); | 1296 | l2cap_conn_update_id_addr(hcon); |
1297 | |||
1298 | /* When receiving an indentity resolving key for | ||
1299 | * a remote device that does not use a resolvable | ||
1300 | * private address, just remove the key so that | ||
1301 | * it is possible to use the controller white | ||
1302 | * list for scanning. | ||
1303 | * | ||
1304 | * Userspace will have been told to not store | ||
1305 | * this key at this point. So it is safe to | ||
1306 | * just remove it. | ||
1307 | */ | ||
1308 | if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) { | ||
1309 | list_del(&smp->remote_irk->list); | ||
1310 | kfree(smp->remote_irk); | ||
1311 | smp->remote_irk = NULL; | ||
1312 | } | ||
1294 | } | 1313 | } |
1295 | 1314 | ||
1296 | /* The LTKs and CSRKs should be persistent only if both sides | 1315 | /* The LTKs and CSRKs should be persistent only if both sides |
diff --git a/net/nfc/digital.h b/net/nfc/digital.h index 71ad7eefddd4..3c39c72eb038 100644 --- a/net/nfc/digital.h +++ b/net/nfc/digital.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #define DIGITAL_CMD_TG_SEND 1 | 29 | #define DIGITAL_CMD_TG_SEND 1 |
30 | #define DIGITAL_CMD_TG_LISTEN 2 | 30 | #define DIGITAL_CMD_TG_LISTEN 2 |
31 | #define DIGITAL_CMD_TG_LISTEN_MDAA 3 | 31 | #define DIGITAL_CMD_TG_LISTEN_MDAA 3 |
32 | #define DIGITAL_CMD_TG_LISTEN_MD 4 | ||
32 | 33 | ||
33 | #define DIGITAL_MAX_HEADER_LEN 7 | 34 | #define DIGITAL_MAX_HEADER_LEN 7 |
34 | #define DIGITAL_CRC_LEN 2 | 35 | #define DIGITAL_CRC_LEN 2 |
@@ -121,6 +122,8 @@ int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb); | |||
121 | 122 | ||
122 | int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech); | 123 | int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech); |
123 | int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech); | 124 | int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech); |
125 | void digital_tg_recv_md_req(struct nfc_digital_dev *ddev, void *arg, | ||
126 | struct sk_buff *resp); | ||
124 | 127 | ||
125 | typedef u16 (*crc_func_t)(u16, const u8 *, size_t); | 128 | typedef u16 (*crc_func_t)(u16, const u8 *, size_t); |
126 | 129 | ||
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c index a6ce3c627e4e..009bcf317101 100644 --- a/net/nfc/digital_core.c +++ b/net/nfc/digital_core.c | |||
@@ -201,6 +201,11 @@ static void digital_wq_cmd(struct work_struct *work) | |||
201 | digital_send_cmd_complete, cmd); | 201 | digital_send_cmd_complete, cmd); |
202 | break; | 202 | break; |
203 | 203 | ||
204 | case DIGITAL_CMD_TG_LISTEN_MD: | ||
205 | rc = ddev->ops->tg_listen_md(ddev, cmd->timeout, | ||
206 | digital_send_cmd_complete, cmd); | ||
207 | break; | ||
208 | |||
204 | default: | 209 | default: |
205 | pr_err("Unknown cmd type %d\n", cmd->type); | 210 | pr_err("Unknown cmd type %d\n", cmd->type); |
206 | return; | 211 | return; |
@@ -293,12 +298,19 @@ static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech) | |||
293 | 500, digital_tg_recv_atr_req, NULL); | 298 | 500, digital_tg_recv_atr_req, NULL); |
294 | } | 299 | } |
295 | 300 | ||
301 | static int digital_tg_listen_md(struct nfc_digital_dev *ddev, u8 rf_tech) | ||
302 | { | ||
303 | return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MD, NULL, NULL, 500, | ||
304 | digital_tg_recv_md_req, NULL); | ||
305 | } | ||
306 | |||
296 | int digital_target_found(struct nfc_digital_dev *ddev, | 307 | int digital_target_found(struct nfc_digital_dev *ddev, |
297 | struct nfc_target *target, u8 protocol) | 308 | struct nfc_target *target, u8 protocol) |
298 | { | 309 | { |
299 | int rc; | 310 | int rc; |
300 | u8 framing; | 311 | u8 framing; |
301 | u8 rf_tech; | 312 | u8 rf_tech; |
313 | u8 poll_tech_count; | ||
302 | int (*check_crc)(struct sk_buff *skb); | 314 | int (*check_crc)(struct sk_buff *skb); |
303 | void (*add_crc)(struct sk_buff *skb); | 315 | void (*add_crc)(struct sk_buff *skb); |
304 | 316 | ||
@@ -375,12 +387,16 @@ int digital_target_found(struct nfc_digital_dev *ddev, | |||
375 | return rc; | 387 | return rc; |
376 | 388 | ||
377 | target->supported_protocols = (1 << protocol); | 389 | target->supported_protocols = (1 << protocol); |
378 | rc = nfc_targets_found(ddev->nfc_dev, target, 1); | ||
379 | if (rc) | ||
380 | return rc; | ||
381 | 390 | ||
391 | poll_tech_count = ddev->poll_tech_count; | ||
382 | ddev->poll_tech_count = 0; | 392 | ddev->poll_tech_count = 0; |
383 | 393 | ||
394 | rc = nfc_targets_found(ddev->nfc_dev, target, 1); | ||
395 | if (rc) { | ||
396 | ddev->poll_tech_count = poll_tech_count; | ||
397 | return rc; | ||
398 | } | ||
399 | |||
384 | return 0; | 400 | return 0; |
385 | } | 401 | } |
386 | 402 | ||
@@ -505,6 +521,9 @@ static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols, | |||
505 | if (ddev->ops->tg_listen_mdaa) { | 521 | if (ddev->ops->tg_listen_mdaa) { |
506 | digital_add_poll_tech(ddev, 0, | 522 | digital_add_poll_tech(ddev, 0, |
507 | digital_tg_listen_mdaa); | 523 | digital_tg_listen_mdaa); |
524 | } else if (ddev->ops->tg_listen_md) { | ||
525 | digital_add_poll_tech(ddev, 0, | ||
526 | digital_tg_listen_md); | ||
508 | } else { | 527 | } else { |
509 | digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A, | 528 | digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A, |
510 | digital_tg_listen_nfca); | 529 | digital_tg_listen_nfca); |
@@ -732,7 +751,7 @@ struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops, | |||
732 | 751 | ||
733 | if (!ops->in_configure_hw || !ops->in_send_cmd || !ops->tg_listen || | 752 | if (!ops->in_configure_hw || !ops->in_send_cmd || !ops->tg_listen || |
734 | !ops->tg_configure_hw || !ops->tg_send_cmd || !ops->abort_cmd || | 753 | !ops->tg_configure_hw || !ops->tg_send_cmd || !ops->abort_cmd || |
735 | !ops->switch_rf) | 754 | !ops->switch_rf || (ops->tg_listen_md && !ops->tg_get_rf_tech)) |
736 | return NULL; | 755 | return NULL; |
737 | 756 | ||
738 | ddev = kzalloc(sizeof(struct nfc_digital_dev), GFP_KERNEL); | 757 | ddev = kzalloc(sizeof(struct nfc_digital_dev), GFP_KERNEL); |
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c index 37deb173c956..e1638dab076d 100644 --- a/net/nfc/digital_dep.c +++ b/net/nfc/digital_dep.c | |||
@@ -671,6 +671,7 @@ void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg, | |||
671 | int rc; | 671 | int rc; |
672 | struct digital_atr_req *atr_req; | 672 | struct digital_atr_req *atr_req; |
673 | size_t gb_len, min_size; | 673 | size_t gb_len, min_size; |
674 | u8 poll_tech_count; | ||
674 | 675 | ||
675 | if (IS_ERR(resp)) { | 676 | if (IS_ERR(resp)) { |
676 | rc = PTR_ERR(resp); | 677 | rc = PTR_ERR(resp); |
@@ -728,12 +729,16 @@ void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg, | |||
728 | goto exit; | 729 | goto exit; |
729 | 730 | ||
730 | gb_len = resp->len - sizeof(struct digital_atr_req); | 731 | gb_len = resp->len - sizeof(struct digital_atr_req); |
732 | |||
733 | poll_tech_count = ddev->poll_tech_count; | ||
734 | ddev->poll_tech_count = 0; | ||
735 | |||
731 | rc = nfc_tm_activated(ddev->nfc_dev, NFC_PROTO_NFC_DEP_MASK, | 736 | rc = nfc_tm_activated(ddev->nfc_dev, NFC_PROTO_NFC_DEP_MASK, |
732 | NFC_COMM_PASSIVE, atr_req->gb, gb_len); | 737 | NFC_COMM_PASSIVE, atr_req->gb, gb_len); |
733 | if (rc) | 738 | if (rc) { |
739 | ddev->poll_tech_count = poll_tech_count; | ||
734 | goto exit; | 740 | goto exit; |
735 | 741 | } | |
736 | ddev->poll_tech_count = 0; | ||
737 | 742 | ||
738 | rc = 0; | 743 | rc = 0; |
739 | exit: | 744 | exit: |
diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c index c2c1c0189b7c..fb58ed2dd41d 100644 --- a/net/nfc/digital_technology.c +++ b/net/nfc/digital_technology.c | |||
@@ -318,6 +318,8 @@ static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg, | |||
318 | 318 | ||
319 | if (DIGITAL_SEL_RES_IS_T2T(sel_res)) { | 319 | if (DIGITAL_SEL_RES_IS_T2T(sel_res)) { |
320 | nfc_proto = NFC_PROTO_MIFARE; | 320 | nfc_proto = NFC_PROTO_MIFARE; |
321 | } else if (DIGITAL_SEL_RES_IS_NFC_DEP(sel_res)) { | ||
322 | nfc_proto = NFC_PROTO_NFC_DEP; | ||
321 | } else if (DIGITAL_SEL_RES_IS_T4T(sel_res)) { | 323 | } else if (DIGITAL_SEL_RES_IS_T4T(sel_res)) { |
322 | rc = digital_in_send_rats(ddev, target); | 324 | rc = digital_in_send_rats(ddev, target); |
323 | if (rc) | 325 | if (rc) |
@@ -327,8 +329,6 @@ static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg, | |||
327 | * done when receiving the ATS | 329 | * done when receiving the ATS |
328 | */ | 330 | */ |
329 | goto exit_free_skb; | 331 | goto exit_free_skb; |
330 | } else if (DIGITAL_SEL_RES_IS_NFC_DEP(sel_res)) { | ||
331 | nfc_proto = NFC_PROTO_NFC_DEP; | ||
332 | } else { | 332 | } else { |
333 | rc = -EOPNOTSUPP; | 333 | rc = -EOPNOTSUPP; |
334 | goto exit; | 334 | goto exit; |
@@ -944,6 +944,13 @@ static int digital_tg_send_sel_res(struct nfc_digital_dev *ddev) | |||
944 | if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) | 944 | if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) |
945 | digital_skb_add_crc_a(skb); | 945 | digital_skb_add_crc_a(skb); |
946 | 946 | ||
947 | rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, | ||
948 | NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE); | ||
949 | if (rc) { | ||
950 | kfree_skb(skb); | ||
951 | return rc; | ||
952 | } | ||
953 | |||
947 | rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_atr_req, | 954 | rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_atr_req, |
948 | NULL); | 955 | NULL); |
949 | if (rc) | 956 | if (rc) |
@@ -1002,6 +1009,13 @@ static int digital_tg_send_sdd_res(struct nfc_digital_dev *ddev) | |||
1002 | for (i = 0; i < 4; i++) | 1009 | for (i = 0; i < 4; i++) |
1003 | sdd_res->bcc ^= sdd_res->nfcid1[i]; | 1010 | sdd_res->bcc ^= sdd_res->nfcid1[i]; |
1004 | 1011 | ||
1012 | rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, | ||
1013 | NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A); | ||
1014 | if (rc) { | ||
1015 | kfree_skb(skb); | ||
1016 | return rc; | ||
1017 | } | ||
1018 | |||
1005 | rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sel_req, | 1019 | rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sel_req, |
1006 | NULL); | 1020 | NULL); |
1007 | if (rc) | 1021 | if (rc) |
@@ -1054,6 +1068,13 @@ static int digital_tg_send_sens_res(struct nfc_digital_dev *ddev) | |||
1054 | sens_res[0] = (DIGITAL_SENS_RES_NFC_DEP >> 8) & 0xFF; | 1068 | sens_res[0] = (DIGITAL_SENS_RES_NFC_DEP >> 8) & 0xFF; |
1055 | sens_res[1] = DIGITAL_SENS_RES_NFC_DEP & 0xFF; | 1069 | sens_res[1] = DIGITAL_SENS_RES_NFC_DEP & 0xFF; |
1056 | 1070 | ||
1071 | rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, | ||
1072 | NFC_DIGITAL_FRAMING_NFCA_STANDARD); | ||
1073 | if (rc) { | ||
1074 | kfree_skb(skb); | ||
1075 | return rc; | ||
1076 | } | ||
1077 | |||
1057 | rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sdd_req, | 1078 | rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sdd_req, |
1058 | NULL); | 1079 | NULL); |
1059 | if (rc) | 1080 | if (rc) |
@@ -1197,33 +1218,48 @@ exit: | |||
1197 | dev_kfree_skb(resp); | 1218 | dev_kfree_skb(resp); |
1198 | } | 1219 | } |
1199 | 1220 | ||
1200 | int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech) | 1221 | static int digital_tg_config_nfca(struct nfc_digital_dev *ddev) |
1201 | { | 1222 | { |
1202 | int rc; | 1223 | int rc; |
1203 | 1224 | ||
1204 | rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech); | 1225 | rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, |
1226 | NFC_DIGITAL_RF_TECH_106A); | ||
1205 | if (rc) | 1227 | if (rc) |
1206 | return rc; | 1228 | return rc; |
1207 | 1229 | ||
1208 | rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, | 1230 | return digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, |
1209 | NFC_DIGITAL_FRAMING_NFCA_NFC_DEP); | 1231 | NFC_DIGITAL_FRAMING_NFCA_NFC_DEP); |
1232 | } | ||
1233 | |||
1234 | int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech) | ||
1235 | { | ||
1236 | int rc; | ||
1237 | |||
1238 | rc = digital_tg_config_nfca(ddev); | ||
1210 | if (rc) | 1239 | if (rc) |
1211 | return rc; | 1240 | return rc; |
1212 | 1241 | ||
1213 | return digital_tg_listen(ddev, 300, digital_tg_recv_sens_req, NULL); | 1242 | return digital_tg_listen(ddev, 300, digital_tg_recv_sens_req, NULL); |
1214 | } | 1243 | } |
1215 | 1244 | ||
1216 | int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech) | 1245 | static int digital_tg_config_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech) |
1217 | { | 1246 | { |
1218 | int rc; | 1247 | int rc; |
1219 | u8 *nfcid2; | ||
1220 | 1248 | ||
1221 | rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech); | 1249 | rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech); |
1222 | if (rc) | 1250 | if (rc) |
1223 | return rc; | 1251 | return rc; |
1224 | 1252 | ||
1225 | rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, | 1253 | return digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, |
1226 | NFC_DIGITAL_FRAMING_NFCF_NFC_DEP); | 1254 | NFC_DIGITAL_FRAMING_NFCF_NFC_DEP); |
1255 | } | ||
1256 | |||
1257 | int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech) | ||
1258 | { | ||
1259 | int rc; | ||
1260 | u8 *nfcid2; | ||
1261 | |||
1262 | rc = digital_tg_config_nfcf(ddev, rf_tech); | ||
1227 | if (rc) | 1263 | if (rc) |
1228 | return rc; | 1264 | return rc; |
1229 | 1265 | ||
@@ -1237,3 +1273,43 @@ int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech) | |||
1237 | 1273 | ||
1238 | return digital_tg_listen(ddev, 300, digital_tg_recv_sensf_req, nfcid2); | 1274 | return digital_tg_listen(ddev, 300, digital_tg_recv_sensf_req, nfcid2); |
1239 | } | 1275 | } |
1276 | |||
1277 | void digital_tg_recv_md_req(struct nfc_digital_dev *ddev, void *arg, | ||
1278 | struct sk_buff *resp) | ||
1279 | { | ||
1280 | u8 rf_tech; | ||
1281 | int rc; | ||
1282 | |||
1283 | if (IS_ERR(resp)) { | ||
1284 | resp = NULL; | ||
1285 | goto exit_free_skb; | ||
1286 | } | ||
1287 | |||
1288 | rc = ddev->ops->tg_get_rf_tech(ddev, &rf_tech); | ||
1289 | if (rc) | ||
1290 | goto exit_free_skb; | ||
1291 | |||
1292 | switch (rf_tech) { | ||
1293 | case NFC_DIGITAL_RF_TECH_106A: | ||
1294 | rc = digital_tg_config_nfca(ddev); | ||
1295 | if (rc) | ||
1296 | goto exit_free_skb; | ||
1297 | digital_tg_recv_sens_req(ddev, arg, resp); | ||
1298 | break; | ||
1299 | case NFC_DIGITAL_RF_TECH_212F: | ||
1300 | case NFC_DIGITAL_RF_TECH_424F: | ||
1301 | rc = digital_tg_config_nfcf(ddev, rf_tech); | ||
1302 | if (rc) | ||
1303 | goto exit_free_skb; | ||
1304 | digital_tg_recv_sensf_req(ddev, arg, resp); | ||
1305 | break; | ||
1306 | default: | ||
1307 | goto exit_free_skb; | ||
1308 | } | ||
1309 | |||
1310 | return; | ||
1311 | |||
1312 | exit_free_skb: | ||
1313 | digital_poll_next_tech(ddev); | ||
1314 | dev_kfree_skb(resp); | ||
1315 | } | ||
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index 47403705197e..117708263ced 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c | |||
@@ -553,8 +553,11 @@ static void hci_stop_poll(struct nfc_dev *nfc_dev) | |||
553 | { | 553 | { |
554 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | 554 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); |
555 | 555 | ||
556 | nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, | 556 | if (hdev->ops->stop_poll) |
557 | NFC_HCI_EVT_END_OPERATION, NULL, 0); | 557 | hdev->ops->stop_poll(hdev); |
558 | else | ||
559 | nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, | ||
560 | NFC_HCI_EVT_END_OPERATION, NULL, 0); | ||
558 | } | 561 | } |
559 | 562 | ||
560 | static int hci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, | 563 | static int hci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, |
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index f8f6af231381..df91bb95b12a 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c | |||
@@ -166,7 +166,9 @@ static int nci_add_new_protocol(struct nci_dev *ndev, | |||
166 | struct rf_tech_specific_params_nfcf_poll *nfcf_poll; | 166 | struct rf_tech_specific_params_nfcf_poll *nfcf_poll; |
167 | __u32 protocol; | 167 | __u32 protocol; |
168 | 168 | ||
169 | if (rf_protocol == NCI_RF_PROTOCOL_T2T) | 169 | if (rf_protocol == NCI_RF_PROTOCOL_T1T) |
170 | protocol = NFC_PROTO_JEWEL_MASK; | ||
171 | else if (rf_protocol == NCI_RF_PROTOCOL_T2T) | ||
170 | protocol = NFC_PROTO_MIFARE_MASK; | 172 | protocol = NFC_PROTO_MIFARE_MASK; |
171 | else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) | 173 | else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) |
172 | if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) | 174 | if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) |