diff options
author | Alexander Aring <aar@pengutronix.de> | 2016-04-11 05:04:18 -0400 |
---|---|---|
committer | Marcel Holtmann <marcel@holtmann.org> | 2016-04-13 04:41:09 -0400 |
commit | 2e4d60cbcfc2d16a2a2efaae3fe08f2e457d59a1 (patch) | |
tree | 1ecef44ca224fa5e64dc0dbba6f19d9c2826045c | |
parent | 5a7f97e570fbe0ae7e6fd035f7af0cd6a1a9baa1 (diff) |
6lowpan: change naming for lowpan private data
This patch changes the naming for interface private data for lowpan
intefaces. The current private data scheme is:
-------------------------------------------------
| 6LoWPAN Generic | LinkLayer 6LoWPAN |
-------------------------------------------------
the current naming schemes are:
- 6LoWPAN Generic:
- lowpan_priv
- LinkLayer 6LoWPAN:
- BTLE
- lowpan_dev
- 802.15.4:
- lowpan_dev_info
the new naming scheme with this patch will be:
- 6LoWPAN Generic:
- lowpan_dev
- LinkLayer 6LoWPAN:
- BTLE
- lowpan_btle_dev
- 802.15.4:
- lowpan_802154_dev
Signed-off-by: Alexander Aring <aar@pengutronix.de>
Reviewed-by: Stefan Schmidt<stefan@osg.samsung.com>
Acked-by: Jukka Rissanen <jukka.rissanen@linux.intel.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
-rw-r--r-- | include/net/6lowpan.h | 6 | ||||
-rw-r--r-- | net/6lowpan/core.c | 8 | ||||
-rw-r--r-- | net/6lowpan/debugfs.c | 22 | ||||
-rw-r--r-- | net/6lowpan/iphc.c | 38 | ||||
-rw-r--r-- | net/6lowpan/nhc_udp.c | 2 | ||||
-rw-r--r-- | net/bluetooth/6lowpan.c | 82 | ||||
-rw-r--r-- | net/ieee802154/6lowpan/6lowpan_i.h | 6 | ||||
-rw-r--r-- | net/ieee802154/6lowpan/core.c | 6 | ||||
-rw-r--r-- | net/ieee802154/6lowpan/tx.c | 14 |
9 files changed, 94 insertions, 90 deletions
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index da3a77d25fcb..f204664f37ab 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h | |||
@@ -93,7 +93,7 @@ static inline bool lowpan_is_iphc(u8 dispatch) | |||
93 | } | 93 | } |
94 | 94 | ||
95 | #define LOWPAN_PRIV_SIZE(llpriv_size) \ | 95 | #define LOWPAN_PRIV_SIZE(llpriv_size) \ |
96 | (sizeof(struct lowpan_priv) + llpriv_size) | 96 | (sizeof(struct lowpan_dev) + llpriv_size) |
97 | 97 | ||
98 | enum lowpan_lltypes { | 98 | enum lowpan_lltypes { |
99 | LOWPAN_LLTYPE_BTLE, | 99 | LOWPAN_LLTYPE_BTLE, |
@@ -129,7 +129,7 @@ lowpan_iphc_ctx_is_compression(const struct lowpan_iphc_ctx *ctx) | |||
129 | return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags); | 129 | return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags); |
130 | } | 130 | } |
131 | 131 | ||
132 | struct lowpan_priv { | 132 | struct lowpan_dev { |
133 | enum lowpan_lltypes lltype; | 133 | enum lowpan_lltypes lltype; |
134 | struct dentry *iface_debugfs; | 134 | struct dentry *iface_debugfs; |
135 | struct lowpan_iphc_ctx_table ctx; | 135 | struct lowpan_iphc_ctx_table ctx; |
@@ -139,7 +139,7 @@ struct lowpan_priv { | |||
139 | }; | 139 | }; |
140 | 140 | ||
141 | static inline | 141 | static inline |
142 | struct lowpan_priv *lowpan_priv(const struct net_device *dev) | 142 | struct lowpan_dev *lowpan_dev(const struct net_device *dev) |
143 | { | 143 | { |
144 | return netdev_priv(dev); | 144 | return netdev_priv(dev); |
145 | } | 145 | } |
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c index 34e44c0c0836..7a240b3eaed1 100644 --- a/net/6lowpan/core.c +++ b/net/6lowpan/core.c | |||
@@ -27,11 +27,11 @@ int lowpan_register_netdevice(struct net_device *dev, | |||
27 | dev->mtu = IPV6_MIN_MTU; | 27 | dev->mtu = IPV6_MIN_MTU; |
28 | dev->priv_flags |= IFF_NO_QUEUE; | 28 | dev->priv_flags |= IFF_NO_QUEUE; |
29 | 29 | ||
30 | lowpan_priv(dev)->lltype = lltype; | 30 | lowpan_dev(dev)->lltype = lltype; |
31 | 31 | ||
32 | spin_lock_init(&lowpan_priv(dev)->ctx.lock); | 32 | spin_lock_init(&lowpan_dev(dev)->ctx.lock); |
33 | for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) | 33 | for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) |
34 | lowpan_priv(dev)->ctx.table[i].id = i; | 34 | lowpan_dev(dev)->ctx.table[i].id = i; |
35 | 35 | ||
36 | ret = register_netdevice(dev); | 36 | ret = register_netdevice(dev); |
37 | if (ret < 0) | 37 | if (ret < 0) |
@@ -85,7 +85,7 @@ static int lowpan_event(struct notifier_block *unused, | |||
85 | case NETDEV_DOWN: | 85 | case NETDEV_DOWN: |
86 | for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) | 86 | for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) |
87 | clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, | 87 | clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, |
88 | &lowpan_priv(dev)->ctx.table[i].flags); | 88 | &lowpan_dev(dev)->ctx.table[i].flags); |
89 | break; | 89 | break; |
90 | default: | 90 | default: |
91 | return NOTIFY_DONE; | 91 | return NOTIFY_DONE; |
diff --git a/net/6lowpan/debugfs.c b/net/6lowpan/debugfs.c index 0793a8157472..acbaa3db493b 100644 --- a/net/6lowpan/debugfs.c +++ b/net/6lowpan/debugfs.c | |||
@@ -172,7 +172,7 @@ static const struct file_operations lowpan_ctx_pfx_fops = { | |||
172 | static int lowpan_dev_debugfs_ctx_init(struct net_device *dev, | 172 | static int lowpan_dev_debugfs_ctx_init(struct net_device *dev, |
173 | struct dentry *ctx, u8 id) | 173 | struct dentry *ctx, u8 id) |
174 | { | 174 | { |
175 | struct lowpan_priv *lpriv = lowpan_priv(dev); | 175 | struct lowpan_dev *ldev = lowpan_dev(dev); |
176 | struct dentry *dentry, *root; | 176 | struct dentry *dentry, *root; |
177 | char buf[32]; | 177 | char buf[32]; |
178 | 178 | ||
@@ -185,25 +185,25 @@ static int lowpan_dev_debugfs_ctx_init(struct net_device *dev, | |||
185 | return -EINVAL; | 185 | return -EINVAL; |
186 | 186 | ||
187 | dentry = debugfs_create_file("active", 0644, root, | 187 | dentry = debugfs_create_file("active", 0644, root, |
188 | &lpriv->ctx.table[id], | 188 | &ldev->ctx.table[id], |
189 | &lowpan_ctx_flag_active_fops); | 189 | &lowpan_ctx_flag_active_fops); |
190 | if (!dentry) | 190 | if (!dentry) |
191 | return -EINVAL; | 191 | return -EINVAL; |
192 | 192 | ||
193 | dentry = debugfs_create_file("compression", 0644, root, | 193 | dentry = debugfs_create_file("compression", 0644, root, |
194 | &lpriv->ctx.table[id], | 194 | &ldev->ctx.table[id], |
195 | &lowpan_ctx_flag_c_fops); | 195 | &lowpan_ctx_flag_c_fops); |
196 | if (!dentry) | 196 | if (!dentry) |
197 | return -EINVAL; | 197 | return -EINVAL; |
198 | 198 | ||
199 | dentry = debugfs_create_file("prefix", 0644, root, | 199 | dentry = debugfs_create_file("prefix", 0644, root, |
200 | &lpriv->ctx.table[id], | 200 | &ldev->ctx.table[id], |
201 | &lowpan_ctx_pfx_fops); | 201 | &lowpan_ctx_pfx_fops); |
202 | if (!dentry) | 202 | if (!dentry) |
203 | return -EINVAL; | 203 | return -EINVAL; |
204 | 204 | ||
205 | dentry = debugfs_create_file("prefix_len", 0644, root, | 205 | dentry = debugfs_create_file("prefix_len", 0644, root, |
206 | &lpriv->ctx.table[id], | 206 | &ldev->ctx.table[id], |
207 | &lowpan_ctx_plen_fops); | 207 | &lowpan_ctx_plen_fops); |
208 | if (!dentry) | 208 | if (!dentry) |
209 | return -EINVAL; | 209 | return -EINVAL; |
@@ -247,21 +247,21 @@ static const struct file_operations lowpan_context_fops = { | |||
247 | 247 | ||
248 | int lowpan_dev_debugfs_init(struct net_device *dev) | 248 | int lowpan_dev_debugfs_init(struct net_device *dev) |
249 | { | 249 | { |
250 | struct lowpan_priv *lpriv = lowpan_priv(dev); | 250 | struct lowpan_dev *ldev = lowpan_dev(dev); |
251 | struct dentry *contexts, *dentry; | 251 | struct dentry *contexts, *dentry; |
252 | int ret, i; | 252 | int ret, i; |
253 | 253 | ||
254 | /* creating the root */ | 254 | /* creating the root */ |
255 | lpriv->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs); | 255 | ldev->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs); |
256 | if (!lpriv->iface_debugfs) | 256 | if (!ldev->iface_debugfs) |
257 | goto fail; | 257 | goto fail; |
258 | 258 | ||
259 | contexts = debugfs_create_dir("contexts", lpriv->iface_debugfs); | 259 | contexts = debugfs_create_dir("contexts", ldev->iface_debugfs); |
260 | if (!contexts) | 260 | if (!contexts) |
261 | goto remove_root; | 261 | goto remove_root; |
262 | 262 | ||
263 | dentry = debugfs_create_file("show", 0644, contexts, | 263 | dentry = debugfs_create_file("show", 0644, contexts, |
264 | &lowpan_priv(dev)->ctx, | 264 | &lowpan_dev(dev)->ctx, |
265 | &lowpan_context_fops); | 265 | &lowpan_context_fops); |
266 | if (!dentry) | 266 | if (!dentry) |
267 | goto remove_root; | 267 | goto remove_root; |
@@ -282,7 +282,7 @@ fail: | |||
282 | 282 | ||
283 | void lowpan_dev_debugfs_exit(struct net_device *dev) | 283 | void lowpan_dev_debugfs_exit(struct net_device *dev) |
284 | { | 284 | { |
285 | debugfs_remove_recursive(lowpan_priv(dev)->iface_debugfs); | 285 | debugfs_remove_recursive(lowpan_dev(dev)->iface_debugfs); |
286 | } | 286 | } |
287 | 287 | ||
288 | int __init lowpan_debugfs_init(void) | 288 | int __init lowpan_debugfs_init(void) |
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 68c80f3c9add..5fb764e45d80 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c | |||
@@ -207,7 +207,7 @@ static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr, | |||
207 | static struct lowpan_iphc_ctx * | 207 | static struct lowpan_iphc_ctx * |
208 | lowpan_iphc_ctx_get_by_id(const struct net_device *dev, u8 id) | 208 | lowpan_iphc_ctx_get_by_id(const struct net_device *dev, u8 id) |
209 | { | 209 | { |
210 | struct lowpan_iphc_ctx *ret = &lowpan_priv(dev)->ctx.table[id]; | 210 | struct lowpan_iphc_ctx *ret = &lowpan_dev(dev)->ctx.table[id]; |
211 | 211 | ||
212 | if (!lowpan_iphc_ctx_is_active(ret)) | 212 | if (!lowpan_iphc_ctx_is_active(ret)) |
213 | return NULL; | 213 | return NULL; |
@@ -219,7 +219,7 @@ static struct lowpan_iphc_ctx * | |||
219 | lowpan_iphc_ctx_get_by_addr(const struct net_device *dev, | 219 | lowpan_iphc_ctx_get_by_addr(const struct net_device *dev, |
220 | const struct in6_addr *addr) | 220 | const struct in6_addr *addr) |
221 | { | 221 | { |
222 | struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table; | 222 | struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table; |
223 | struct lowpan_iphc_ctx *ret = NULL; | 223 | struct lowpan_iphc_ctx *ret = NULL; |
224 | struct in6_addr addr_pfx; | 224 | struct in6_addr addr_pfx; |
225 | u8 addr_plen; | 225 | u8 addr_plen; |
@@ -263,7 +263,7 @@ static struct lowpan_iphc_ctx * | |||
263 | lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev, | 263 | lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev, |
264 | const struct in6_addr *addr) | 264 | const struct in6_addr *addr) |
265 | { | 265 | { |
266 | struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table; | 266 | struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table; |
267 | struct lowpan_iphc_ctx *ret = NULL; | 267 | struct lowpan_iphc_ctx *ret = NULL; |
268 | struct in6_addr addr_mcast, network_pfx = {}; | 268 | struct in6_addr addr_mcast, network_pfx = {}; |
269 | int i; | 269 | int i; |
@@ -332,7 +332,7 @@ static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev, | |||
332 | case LOWPAN_IPHC_SAM_11: | 332 | case LOWPAN_IPHC_SAM_11: |
333 | case LOWPAN_IPHC_DAM_11: | 333 | case LOWPAN_IPHC_DAM_11: |
334 | fail = false; | 334 | fail = false; |
335 | switch (lowpan_priv(dev)->lltype) { | 335 | switch (lowpan_dev(dev)->lltype) { |
336 | case LOWPAN_LLTYPE_IEEE802154: | 336 | case LOWPAN_LLTYPE_IEEE802154: |
337 | iphc_uncompress_802154_lladdr(ipaddr, lladdr); | 337 | iphc_uncompress_802154_lladdr(ipaddr, lladdr); |
338 | break; | 338 | break; |
@@ -393,7 +393,7 @@ static int uncompress_ctx_addr(struct sk_buff *skb, | |||
393 | case LOWPAN_IPHC_SAM_11: | 393 | case LOWPAN_IPHC_SAM_11: |
394 | case LOWPAN_IPHC_DAM_11: | 394 | case LOWPAN_IPHC_DAM_11: |
395 | fail = false; | 395 | fail = false; |
396 | switch (lowpan_priv(dev)->lltype) { | 396 | switch (lowpan_dev(dev)->lltype) { |
397 | case LOWPAN_LLTYPE_IEEE802154: | 397 | case LOWPAN_LLTYPE_IEEE802154: |
398 | iphc_uncompress_802154_lladdr(ipaddr, lladdr); | 398 | iphc_uncompress_802154_lladdr(ipaddr, lladdr); |
399 | break; | 399 | break; |
@@ -657,17 +657,17 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, | |||
657 | } | 657 | } |
658 | 658 | ||
659 | if (iphc1 & LOWPAN_IPHC_SAC) { | 659 | if (iphc1 & LOWPAN_IPHC_SAC) { |
660 | spin_lock_bh(&lowpan_priv(dev)->ctx.lock); | 660 | spin_lock_bh(&lowpan_dev(dev)->ctx.lock); |
661 | ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_SCI(cid)); | 661 | ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_SCI(cid)); |
662 | if (!ci) { | 662 | if (!ci) { |
663 | spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); | 663 | spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); |
664 | return -EINVAL; | 664 | return -EINVAL; |
665 | } | 665 | } |
666 | 666 | ||
667 | pr_debug("SAC bit is set. Handle context based source address.\n"); | 667 | pr_debug("SAC bit is set. Handle context based source address.\n"); |
668 | err = uncompress_ctx_addr(skb, dev, ci, &hdr.saddr, | 668 | err = uncompress_ctx_addr(skb, dev, ci, &hdr.saddr, |
669 | iphc1 & LOWPAN_IPHC_SAM_MASK, saddr); | 669 | iphc1 & LOWPAN_IPHC_SAM_MASK, saddr); |
670 | spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); | 670 | spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); |
671 | } else { | 671 | } else { |
672 | /* Source address uncompression */ | 672 | /* Source address uncompression */ |
673 | pr_debug("source address stateless compression\n"); | 673 | pr_debug("source address stateless compression\n"); |
@@ -681,10 +681,10 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, | |||
681 | 681 | ||
682 | switch (iphc1 & (LOWPAN_IPHC_M | LOWPAN_IPHC_DAC)) { | 682 | switch (iphc1 & (LOWPAN_IPHC_M | LOWPAN_IPHC_DAC)) { |
683 | case LOWPAN_IPHC_M | LOWPAN_IPHC_DAC: | 683 | case LOWPAN_IPHC_M | LOWPAN_IPHC_DAC: |
684 | spin_lock_bh(&lowpan_priv(dev)->ctx.lock); | 684 | spin_lock_bh(&lowpan_dev(dev)->ctx.lock); |
685 | ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid)); | 685 | ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid)); |
686 | if (!ci) { | 686 | if (!ci) { |
687 | spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); | 687 | spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); |
688 | return -EINVAL; | 688 | return -EINVAL; |
689 | } | 689 | } |
690 | 690 | ||
@@ -693,7 +693,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, | |||
693 | err = lowpan_uncompress_multicast_ctx_daddr(skb, ci, | 693 | err = lowpan_uncompress_multicast_ctx_daddr(skb, ci, |
694 | &hdr.daddr, | 694 | &hdr.daddr, |
695 | iphc1 & LOWPAN_IPHC_DAM_MASK); | 695 | iphc1 & LOWPAN_IPHC_DAM_MASK); |
696 | spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); | 696 | spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); |
697 | break; | 697 | break; |
698 | case LOWPAN_IPHC_M: | 698 | case LOWPAN_IPHC_M: |
699 | /* multicast */ | 699 | /* multicast */ |
@@ -701,10 +701,10 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, | |||
701 | iphc1 & LOWPAN_IPHC_DAM_MASK); | 701 | iphc1 & LOWPAN_IPHC_DAM_MASK); |
702 | break; | 702 | break; |
703 | case LOWPAN_IPHC_DAC: | 703 | case LOWPAN_IPHC_DAC: |
704 | spin_lock_bh(&lowpan_priv(dev)->ctx.lock); | 704 | spin_lock_bh(&lowpan_dev(dev)->ctx.lock); |
705 | ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid)); | 705 | ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid)); |
706 | if (!ci) { | 706 | if (!ci) { |
707 | spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); | 707 | spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); |
708 | return -EINVAL; | 708 | return -EINVAL; |
709 | } | 709 | } |
710 | 710 | ||
@@ -712,7 +712,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, | |||
712 | pr_debug("DAC bit is set. Handle context based destination address.\n"); | 712 | pr_debug("DAC bit is set. Handle context based destination address.\n"); |
713 | err = uncompress_ctx_addr(skb, dev, ci, &hdr.daddr, | 713 | err = uncompress_ctx_addr(skb, dev, ci, &hdr.daddr, |
714 | iphc1 & LOWPAN_IPHC_DAM_MASK, daddr); | 714 | iphc1 & LOWPAN_IPHC_DAM_MASK, daddr); |
715 | spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); | 715 | spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); |
716 | break; | 716 | break; |
717 | default: | 717 | default: |
718 | err = uncompress_addr(skb, dev, &hdr.daddr, | 718 | err = uncompress_addr(skb, dev, &hdr.daddr, |
@@ -736,7 +736,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, | |||
736 | return err; | 736 | return err; |
737 | } | 737 | } |
738 | 738 | ||
739 | switch (lowpan_priv(dev)->lltype) { | 739 | switch (lowpan_dev(dev)->lltype) { |
740 | case LOWPAN_LLTYPE_IEEE802154: | 740 | case LOWPAN_LLTYPE_IEEE802154: |
741 | if (lowpan_802154_cb(skb)->d_size) | 741 | if (lowpan_802154_cb(skb)->d_size) |
742 | hdr.payload_len = htons(lowpan_802154_cb(skb)->d_size - | 742 | hdr.payload_len = htons(lowpan_802154_cb(skb)->d_size - |
@@ -1033,7 +1033,7 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev, | |||
1033 | skb->data, skb->len); | 1033 | skb->data, skb->len); |
1034 | 1034 | ||
1035 | ipv6_daddr_type = ipv6_addr_type(&hdr->daddr); | 1035 | ipv6_daddr_type = ipv6_addr_type(&hdr->daddr); |
1036 | spin_lock_bh(&lowpan_priv(dev)->ctx.lock); | 1036 | spin_lock_bh(&lowpan_dev(dev)->ctx.lock); |
1037 | if (ipv6_daddr_type & IPV6_ADDR_MULTICAST) | 1037 | if (ipv6_daddr_type & IPV6_ADDR_MULTICAST) |
1038 | dci = lowpan_iphc_ctx_get_by_mcast_addr(dev, &hdr->daddr); | 1038 | dci = lowpan_iphc_ctx_get_by_mcast_addr(dev, &hdr->daddr); |
1039 | else | 1039 | else |
@@ -1042,15 +1042,15 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev, | |||
1042 | memcpy(&dci_entry, dci, sizeof(*dci)); | 1042 | memcpy(&dci_entry, dci, sizeof(*dci)); |
1043 | cid |= dci->id; | 1043 | cid |= dci->id; |
1044 | } | 1044 | } |
1045 | spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); | 1045 | spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); |
1046 | 1046 | ||
1047 | spin_lock_bh(&lowpan_priv(dev)->ctx.lock); | 1047 | spin_lock_bh(&lowpan_dev(dev)->ctx.lock); |
1048 | sci = lowpan_iphc_ctx_get_by_addr(dev, &hdr->saddr); | 1048 | sci = lowpan_iphc_ctx_get_by_addr(dev, &hdr->saddr); |
1049 | if (sci) { | 1049 | if (sci) { |
1050 | memcpy(&sci_entry, sci, sizeof(*sci)); | 1050 | memcpy(&sci_entry, sci, sizeof(*sci)); |
1051 | cid |= (sci->id << 4); | 1051 | cid |= (sci->id << 4); |
1052 | } | 1052 | } |
1053 | spin_unlock_bh(&lowpan_priv(dev)->ctx.lock); | 1053 | spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); |
1054 | 1054 | ||
1055 | /* if cid is zero it will be compressed */ | 1055 | /* if cid is zero it will be compressed */ |
1056 | if (cid) { | 1056 | if (cid) { |
diff --git a/net/6lowpan/nhc_udp.c b/net/6lowpan/nhc_udp.c index 69537a2eaab1..225d91906dfa 100644 --- a/net/6lowpan/nhc_udp.c +++ b/net/6lowpan/nhc_udp.c | |||
@@ -91,7 +91,7 @@ static int udp_uncompress(struct sk_buff *skb, size_t needed) | |||
91 | * here, we obtain the hint from the remaining size of the | 91 | * here, we obtain the hint from the remaining size of the |
92 | * frame | 92 | * frame |
93 | */ | 93 | */ |
94 | switch (lowpan_priv(skb->dev)->lltype) { | 94 | switch (lowpan_dev(skb->dev)->lltype) { |
95 | case LOWPAN_LLTYPE_IEEE802154: | 95 | case LOWPAN_LLTYPE_IEEE802154: |
96 | if (lowpan_802154_cb(skb)->d_size) | 96 | if (lowpan_802154_cb(skb)->d_size) |
97 | uh.len = htons(lowpan_802154_cb(skb)->d_size - | 97 | uh.len = htons(lowpan_802154_cb(skb)->d_size - |
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 8a4cc2f7f0db..38e82ddd7ccd 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c | |||
@@ -68,7 +68,7 @@ struct lowpan_peer { | |||
68 | struct in6_addr peer_addr; | 68 | struct in6_addr peer_addr; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | struct lowpan_dev { | 71 | struct lowpan_btle_dev { |
72 | struct list_head list; | 72 | struct list_head list; |
73 | 73 | ||
74 | struct hci_dev *hdev; | 74 | struct hci_dev *hdev; |
@@ -80,18 +80,21 @@ struct lowpan_dev { | |||
80 | struct delayed_work notify_peers; | 80 | struct delayed_work notify_peers; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev) | 83 | static inline struct lowpan_btle_dev * |
84 | lowpan_btle_dev(const struct net_device *netdev) | ||
84 | { | 85 | { |
85 | return (struct lowpan_dev *)lowpan_priv(netdev)->priv; | 86 | return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv; |
86 | } | 87 | } |
87 | 88 | ||
88 | static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer) | 89 | static inline void peer_add(struct lowpan_btle_dev *dev, |
90 | struct lowpan_peer *peer) | ||
89 | { | 91 | { |
90 | list_add_rcu(&peer->list, &dev->peers); | 92 | list_add_rcu(&peer->list, &dev->peers); |
91 | atomic_inc(&dev->peer_count); | 93 | atomic_inc(&dev->peer_count); |
92 | } | 94 | } |
93 | 95 | ||
94 | static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) | 96 | static inline bool peer_del(struct lowpan_btle_dev *dev, |
97 | struct lowpan_peer *peer) | ||
95 | { | 98 | { |
96 | list_del_rcu(&peer->list); | 99 | list_del_rcu(&peer->list); |
97 | kfree_rcu(peer, rcu); | 100 | kfree_rcu(peer, rcu); |
@@ -106,7 +109,7 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) | |||
106 | return false; | 109 | return false; |
107 | } | 110 | } |
108 | 111 | ||
109 | static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev, | 112 | static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_btle_dev *dev, |
110 | bdaddr_t *ba, __u8 type) | 113 | bdaddr_t *ba, __u8 type) |
111 | { | 114 | { |
112 | struct lowpan_peer *peer; | 115 | struct lowpan_peer *peer; |
@@ -134,8 +137,8 @@ static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev, | |||
134 | return NULL; | 137 | return NULL; |
135 | } | 138 | } |
136 | 139 | ||
137 | static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev, | 140 | static inline struct lowpan_peer * |
138 | struct l2cap_chan *chan) | 141 | __peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan) |
139 | { | 142 | { |
140 | struct lowpan_peer *peer; | 143 | struct lowpan_peer *peer; |
141 | 144 | ||
@@ -147,8 +150,8 @@ static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev, | |||
147 | return NULL; | 150 | return NULL; |
148 | } | 151 | } |
149 | 152 | ||
150 | static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev, | 153 | static inline struct lowpan_peer * |
151 | struct l2cap_conn *conn) | 154 | __peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn) |
152 | { | 155 | { |
153 | struct lowpan_peer *peer; | 156 | struct lowpan_peer *peer; |
154 | 157 | ||
@@ -160,7 +163,7 @@ static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev, | |||
160 | return NULL; | 163 | return NULL; |
161 | } | 164 | } |
162 | 165 | ||
163 | static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, | 166 | static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev, |
164 | struct in6_addr *daddr, | 167 | struct in6_addr *daddr, |
165 | struct sk_buff *skb) | 168 | struct sk_buff *skb) |
166 | { | 169 | { |
@@ -220,7 +223,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, | |||
220 | 223 | ||
221 | static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) | 224 | static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) |
222 | { | 225 | { |
223 | struct lowpan_dev *entry; | 226 | struct lowpan_btle_dev *entry; |
224 | struct lowpan_peer *peer = NULL; | 227 | struct lowpan_peer *peer = NULL; |
225 | 228 | ||
226 | rcu_read_lock(); | 229 | rcu_read_lock(); |
@@ -236,10 +239,10 @@ static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) | |||
236 | return peer; | 239 | return peer; |
237 | } | 240 | } |
238 | 241 | ||
239 | static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn) | 242 | static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn) |
240 | { | 243 | { |
241 | struct lowpan_dev *entry; | 244 | struct lowpan_btle_dev *entry; |
242 | struct lowpan_dev *dev = NULL; | 245 | struct lowpan_btle_dev *dev = NULL; |
243 | 246 | ||
244 | rcu_read_lock(); | 247 | rcu_read_lock(); |
245 | 248 | ||
@@ -270,10 +273,10 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, | |||
270 | struct l2cap_chan *chan) | 273 | struct l2cap_chan *chan) |
271 | { | 274 | { |
272 | const u8 *saddr, *daddr; | 275 | const u8 *saddr, *daddr; |
273 | struct lowpan_dev *dev; | 276 | struct lowpan_btle_dev *dev; |
274 | struct lowpan_peer *peer; | 277 | struct lowpan_peer *peer; |
275 | 278 | ||
276 | dev = lowpan_dev(netdev); | 279 | dev = lowpan_btle_dev(netdev); |
277 | 280 | ||
278 | rcu_read_lock(); | 281 | rcu_read_lock(); |
279 | peer = __peer_lookup_chan(dev, chan); | 282 | peer = __peer_lookup_chan(dev, chan); |
@@ -375,7 +378,7 @@ drop: | |||
375 | /* Packet from BT LE device */ | 378 | /* Packet from BT LE device */ |
376 | static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) | 379 | static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) |
377 | { | 380 | { |
378 | struct lowpan_dev *dev; | 381 | struct lowpan_btle_dev *dev; |
379 | struct lowpan_peer *peer; | 382 | struct lowpan_peer *peer; |
380 | int err; | 383 | int err; |
381 | 384 | ||
@@ -431,13 +434,13 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, | |||
431 | bdaddr_t *peer_addr, u8 *peer_addr_type) | 434 | bdaddr_t *peer_addr, u8 *peer_addr_type) |
432 | { | 435 | { |
433 | struct in6_addr ipv6_daddr; | 436 | struct in6_addr ipv6_daddr; |
434 | struct lowpan_dev *dev; | 437 | struct lowpan_btle_dev *dev; |
435 | struct lowpan_peer *peer; | 438 | struct lowpan_peer *peer; |
436 | bdaddr_t addr, *any = BDADDR_ANY; | 439 | bdaddr_t addr, *any = BDADDR_ANY; |
437 | u8 *daddr = any->b; | 440 | u8 *daddr = any->b; |
438 | int err, status = 0; | 441 | int err, status = 0; |
439 | 442 | ||
440 | dev = lowpan_dev(netdev); | 443 | dev = lowpan_btle_dev(netdev); |
441 | 444 | ||
442 | memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr)); | 445 | memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr)); |
443 | 446 | ||
@@ -543,19 +546,19 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, | |||
543 | static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) | 546 | static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) |
544 | { | 547 | { |
545 | struct sk_buff *local_skb; | 548 | struct sk_buff *local_skb; |
546 | struct lowpan_dev *entry; | 549 | struct lowpan_btle_dev *entry; |
547 | int err = 0; | 550 | int err = 0; |
548 | 551 | ||
549 | rcu_read_lock(); | 552 | rcu_read_lock(); |
550 | 553 | ||
551 | list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { | 554 | list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { |
552 | struct lowpan_peer *pentry; | 555 | struct lowpan_peer *pentry; |
553 | struct lowpan_dev *dev; | 556 | struct lowpan_btle_dev *dev; |
554 | 557 | ||
555 | if (entry->netdev != netdev) | 558 | if (entry->netdev != netdev) |
556 | continue; | 559 | continue; |
557 | 560 | ||
558 | dev = lowpan_dev(entry->netdev); | 561 | dev = lowpan_btle_dev(entry->netdev); |
559 | 562 | ||
560 | list_for_each_entry_rcu(pentry, &dev->peers, list) { | 563 | list_for_each_entry_rcu(pentry, &dev->peers, list) { |
561 | int ret; | 564 | int ret; |
@@ -723,8 +726,8 @@ static void ifdown(struct net_device *netdev) | |||
723 | 726 | ||
724 | static void do_notify_peers(struct work_struct *work) | 727 | static void do_notify_peers(struct work_struct *work) |
725 | { | 728 | { |
726 | struct lowpan_dev *dev = container_of(work, struct lowpan_dev, | 729 | struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev, |
727 | notify_peers.work); | 730 | notify_peers.work); |
728 | 731 | ||
729 | netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */ | 732 | netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */ |
730 | } | 733 | } |
@@ -766,7 +769,7 @@ static void set_ip_addr_bits(u8 addr_type, u8 *addr) | |||
766 | } | 769 | } |
767 | 770 | ||
768 | static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, | 771 | static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, |
769 | struct lowpan_dev *dev) | 772 | struct lowpan_btle_dev *dev) |
770 | { | 773 | { |
771 | struct lowpan_peer *peer; | 774 | struct lowpan_peer *peer; |
772 | 775 | ||
@@ -803,12 +806,12 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, | |||
803 | return peer->chan; | 806 | return peer->chan; |
804 | } | 807 | } |
805 | 808 | ||
806 | static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) | 809 | static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev) |
807 | { | 810 | { |
808 | struct net_device *netdev; | 811 | struct net_device *netdev; |
809 | int err = 0; | 812 | int err = 0; |
810 | 813 | ||
811 | netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)), | 814 | netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)), |
812 | IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN, | 815 | IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN, |
813 | netdev_setup); | 816 | netdev_setup); |
814 | if (!netdev) | 817 | if (!netdev) |
@@ -820,7 +823,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) | |||
820 | SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev); | 823 | SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev); |
821 | SET_NETDEV_DEVTYPE(netdev, &bt_type); | 824 | SET_NETDEV_DEVTYPE(netdev, &bt_type); |
822 | 825 | ||
823 | *dev = lowpan_dev(netdev); | 826 | *dev = lowpan_btle_dev(netdev); |
824 | (*dev)->netdev = netdev; | 827 | (*dev)->netdev = netdev; |
825 | (*dev)->hdev = chan->conn->hcon->hdev; | 828 | (*dev)->hdev = chan->conn->hcon->hdev; |
826 | INIT_LIST_HEAD(&(*dev)->peers); | 829 | INIT_LIST_HEAD(&(*dev)->peers); |
@@ -853,7 +856,7 @@ out: | |||
853 | 856 | ||
854 | static inline void chan_ready_cb(struct l2cap_chan *chan) | 857 | static inline void chan_ready_cb(struct l2cap_chan *chan) |
855 | { | 858 | { |
856 | struct lowpan_dev *dev; | 859 | struct lowpan_btle_dev *dev; |
857 | 860 | ||
858 | dev = lookup_dev(chan->conn); | 861 | dev = lookup_dev(chan->conn); |
859 | 862 | ||
@@ -890,8 +893,9 @@ static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan) | |||
890 | 893 | ||
891 | static void delete_netdev(struct work_struct *work) | 894 | static void delete_netdev(struct work_struct *work) |
892 | { | 895 | { |
893 | struct lowpan_dev *entry = container_of(work, struct lowpan_dev, | 896 | struct lowpan_btle_dev *entry = container_of(work, |
894 | delete_netdev); | 897 | struct lowpan_btle_dev, |
898 | delete_netdev); | ||
895 | 899 | ||
896 | lowpan_unregister_netdev(entry->netdev); | 900 | lowpan_unregister_netdev(entry->netdev); |
897 | 901 | ||
@@ -900,8 +904,8 @@ static void delete_netdev(struct work_struct *work) | |||
900 | 904 | ||
901 | static void chan_close_cb(struct l2cap_chan *chan) | 905 | static void chan_close_cb(struct l2cap_chan *chan) |
902 | { | 906 | { |
903 | struct lowpan_dev *entry; | 907 | struct lowpan_btle_dev *entry; |
904 | struct lowpan_dev *dev = NULL; | 908 | struct lowpan_btle_dev *dev = NULL; |
905 | struct lowpan_peer *peer; | 909 | struct lowpan_peer *peer; |
906 | int err = -ENOENT; | 910 | int err = -ENOENT; |
907 | bool last = false, remove = true; | 911 | bool last = false, remove = true; |
@@ -921,7 +925,7 @@ static void chan_close_cb(struct l2cap_chan *chan) | |||
921 | spin_lock(&devices_lock); | 925 | spin_lock(&devices_lock); |
922 | 926 | ||
923 | list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { | 927 | list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { |
924 | dev = lowpan_dev(entry->netdev); | 928 | dev = lowpan_btle_dev(entry->netdev); |
925 | peer = __peer_lookup_chan(dev, chan); | 929 | peer = __peer_lookup_chan(dev, chan); |
926 | if (peer) { | 930 | if (peer) { |
927 | last = peer_del(dev, peer); | 931 | last = peer_del(dev, peer); |
@@ -1131,7 +1135,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, | |||
1131 | 1135 | ||
1132 | static void disconnect_all_peers(void) | 1136 | static void disconnect_all_peers(void) |
1133 | { | 1137 | { |
1134 | struct lowpan_dev *entry; | 1138 | struct lowpan_btle_dev *entry; |
1135 | struct lowpan_peer *peer, *tmp_peer, *new_peer; | 1139 | struct lowpan_peer *peer, *tmp_peer, *new_peer; |
1136 | struct list_head peers; | 1140 | struct list_head peers; |
1137 | 1141 | ||
@@ -1291,7 +1295,7 @@ static ssize_t lowpan_control_write(struct file *fp, | |||
1291 | 1295 | ||
1292 | static int lowpan_control_show(struct seq_file *f, void *ptr) | 1296 | static int lowpan_control_show(struct seq_file *f, void *ptr) |
1293 | { | 1297 | { |
1294 | struct lowpan_dev *entry; | 1298 | struct lowpan_btle_dev *entry; |
1295 | struct lowpan_peer *peer; | 1299 | struct lowpan_peer *peer; |
1296 | 1300 | ||
1297 | spin_lock(&devices_lock); | 1301 | spin_lock(&devices_lock); |
@@ -1322,7 +1326,7 @@ static const struct file_operations lowpan_control_fops = { | |||
1322 | 1326 | ||
1323 | static void disconnect_devices(void) | 1327 | static void disconnect_devices(void) |
1324 | { | 1328 | { |
1325 | struct lowpan_dev *entry, *tmp, *new_dev; | 1329 | struct lowpan_btle_dev *entry, *tmp, *new_dev; |
1326 | struct list_head devices; | 1330 | struct list_head devices; |
1327 | 1331 | ||
1328 | INIT_LIST_HEAD(&devices); | 1332 | INIT_LIST_HEAD(&devices); |
@@ -1360,7 +1364,7 @@ static int device_event(struct notifier_block *unused, | |||
1360 | unsigned long event, void *ptr) | 1364 | unsigned long event, void *ptr) |
1361 | { | 1365 | { |
1362 | struct net_device *netdev = netdev_notifier_info_to_dev(ptr); | 1366 | struct net_device *netdev = netdev_notifier_info_to_dev(ptr); |
1363 | struct lowpan_dev *entry; | 1367 | struct lowpan_btle_dev *entry; |
1364 | 1368 | ||
1365 | if (netdev->type != ARPHRD_6LOWPAN) | 1369 | if (netdev->type != ARPHRD_6LOWPAN) |
1366 | return NOTIFY_DONE; | 1370 | return NOTIFY_DONE; |
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h index b4092a9ff24a..b02b74de8ffa 100644 --- a/net/ieee802154/6lowpan/6lowpan_i.h +++ b/net/ieee802154/6lowpan/6lowpan_i.h | |||
@@ -48,15 +48,15 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | /* private device info */ | 50 | /* private device info */ |
51 | struct lowpan_dev_info { | 51 | struct lowpan_802154_dev { |
52 | struct net_device *wdev; /* wpan device ptr */ | 52 | struct net_device *wdev; /* wpan device ptr */ |
53 | u16 fragment_tag; | 53 | u16 fragment_tag; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | static inline struct | 56 | static inline struct |
57 | lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) | 57 | lowpan_802154_dev *lowpan_802154_dev(const struct net_device *dev) |
58 | { | 58 | { |
59 | return (struct lowpan_dev_info *)lowpan_priv(dev)->priv; | 59 | return (struct lowpan_802154_dev *)lowpan_dev(dev)->priv; |
60 | } | 60 | } |
61 | 61 | ||
62 | int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type); | 62 | int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type); |
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 0023c9048812..dd085db8580e 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c | |||
@@ -148,7 +148,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev, | |||
148 | return -EBUSY; | 148 | return -EBUSY; |
149 | } | 149 | } |
150 | 150 | ||
151 | lowpan_dev_info(ldev)->wdev = wdev; | 151 | lowpan_802154_dev(ldev)->wdev = wdev; |
152 | /* Set the lowpan hardware address to the wpan hardware address. */ | 152 | /* Set the lowpan hardware address to the wpan hardware address. */ |
153 | memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN); | 153 | memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN); |
154 | /* We need headroom for possible wpan_dev_hard_header call and tailroom | 154 | /* We need headroom for possible wpan_dev_hard_header call and tailroom |
@@ -173,7 +173,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev, | |||
173 | 173 | ||
174 | static void lowpan_dellink(struct net_device *ldev, struct list_head *head) | 174 | static void lowpan_dellink(struct net_device *ldev, struct list_head *head) |
175 | { | 175 | { |
176 | struct net_device *wdev = lowpan_dev_info(ldev)->wdev; | 176 | struct net_device *wdev = lowpan_802154_dev(ldev)->wdev; |
177 | 177 | ||
178 | ASSERT_RTNL(); | 178 | ASSERT_RTNL(); |
179 | 179 | ||
@@ -184,7 +184,7 @@ static void lowpan_dellink(struct net_device *ldev, struct list_head *head) | |||
184 | 184 | ||
185 | static struct rtnl_link_ops lowpan_link_ops __read_mostly = { | 185 | static struct rtnl_link_ops lowpan_link_ops __read_mostly = { |
186 | .kind = "lowpan", | 186 | .kind = "lowpan", |
187 | .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)), | 187 | .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_802154_dev)), |
188 | .setup = lowpan_setup, | 188 | .setup = lowpan_setup, |
189 | .newlink = lowpan_newlink, | 189 | .newlink = lowpan_newlink, |
190 | .dellink = lowpan_dellink, | 190 | .dellink = lowpan_dellink, |
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c index d4353faced35..e459afd16bb3 100644 --- a/net/ieee802154/6lowpan/tx.c +++ b/net/ieee802154/6lowpan/tx.c | |||
@@ -84,7 +84,7 @@ static struct sk_buff* | |||
84 | lowpan_alloc_frag(struct sk_buff *skb, int size, | 84 | lowpan_alloc_frag(struct sk_buff *skb, int size, |
85 | const struct ieee802154_hdr *master_hdr, bool frag1) | 85 | const struct ieee802154_hdr *master_hdr, bool frag1) |
86 | { | 86 | { |
87 | struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev; | 87 | struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev; |
88 | struct sk_buff *frag; | 88 | struct sk_buff *frag; |
89 | int rc; | 89 | int rc; |
90 | 90 | ||
@@ -148,8 +148,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev, | |||
148 | int frag_cap, frag_len, payload_cap, rc; | 148 | int frag_cap, frag_len, payload_cap, rc; |
149 | int skb_unprocessed, skb_offset; | 149 | int skb_unprocessed, skb_offset; |
150 | 150 | ||
151 | frag_tag = htons(lowpan_dev_info(ldev)->fragment_tag); | 151 | frag_tag = htons(lowpan_802154_dev(ldev)->fragment_tag); |
152 | lowpan_dev_info(ldev)->fragment_tag++; | 152 | lowpan_802154_dev(ldev)->fragment_tag++; |
153 | 153 | ||
154 | frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07); | 154 | frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07); |
155 | frag_hdr[1] = dgram_size & 0xff; | 155 | frag_hdr[1] = dgram_size & 0xff; |
@@ -208,7 +208,7 @@ err: | |||
208 | static int lowpan_header(struct sk_buff *skb, struct net_device *ldev, | 208 | static int lowpan_header(struct sk_buff *skb, struct net_device *ldev, |
209 | u16 *dgram_size, u16 *dgram_offset) | 209 | u16 *dgram_size, u16 *dgram_offset) |
210 | { | 210 | { |
211 | struct wpan_dev *wpan_dev = lowpan_dev_info(ldev)->wdev->ieee802154_ptr; | 211 | struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr; |
212 | struct ieee802154_addr sa, da; | 212 | struct ieee802154_addr sa, da; |
213 | struct ieee802154_mac_cb *cb = mac_cb_init(skb); | 213 | struct ieee802154_mac_cb *cb = mac_cb_init(skb); |
214 | struct lowpan_addr_info info; | 214 | struct lowpan_addr_info info; |
@@ -248,8 +248,8 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev, | |||
248 | cb->ackreq = wpan_dev->ackreq; | 248 | cb->ackreq = wpan_dev->ackreq; |
249 | } | 249 | } |
250 | 250 | ||
251 | return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa, | 251 | return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev, &da, |
252 | 0); | 252 | &sa, 0); |
253 | } | 253 | } |
254 | 254 | ||
255 | netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) | 255 | netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) |
@@ -283,7 +283,7 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) | |||
283 | max_single = ieee802154_max_payload(&wpan_hdr); | 283 | max_single = ieee802154_max_payload(&wpan_hdr); |
284 | 284 | ||
285 | if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) { | 285 | if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) { |
286 | skb->dev = lowpan_dev_info(ldev)->wdev; | 286 | skb->dev = lowpan_802154_dev(ldev)->wdev; |
287 | ldev->stats.tx_packets++; | 287 | ldev->stats.tx_packets++; |
288 | ldev->stats.tx_bytes += dgram_size; | 288 | ldev->stats.tx_bytes += dgram_size; |
289 | return dev_queue_xmit(skb); | 289 | return dev_queue_xmit(skb); |