aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Aring <aar@pengutronix.de>2016-03-04 04:10:20 -0500
committerMarcel Holtmann <marcel@holtmann.org>2016-03-10 13:51:28 -0500
commitf16089209e1029d45ae78dd238b6ab9b2c9a886c (patch)
tree1695e58aca8a4fbb3b6277c62876600519d0b826
parent75c6aca4765dbe3d0c1507ab5052f2e373dc2331 (diff)
mac802154: use put and get unaligned functions
This patch removes the swap pointer and memmove functionality. Instead we use the well known put/get unaligned access with specific byte order handling. Signed-off-by: Alexander Aring <aar@pengutronix.de> Suggested-by: Marc Kleine-Budde <mkl@pengutronix.de> Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
-rw-r--r--include/net/mac802154.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 2e3cdd2048d2..6cd7a70706a9 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -16,10 +16,10 @@
16#ifndef NET_MAC802154_H 16#ifndef NET_MAC802154_H
17#define NET_MAC802154_H 17#define NET_MAC802154_H
18 18
19#include <asm/unaligned.h>
19#include <net/af_ieee802154.h> 20#include <net/af_ieee802154.h>
20#include <linux/ieee802154.h> 21#include <linux/ieee802154.h>
21#include <linux/skbuff.h> 22#include <linux/skbuff.h>
22#include <linux/unaligned/memmove.h>
23 23
24#include <net/cfg802154.h> 24#include <net/cfg802154.h>
25 25
@@ -254,7 +254,7 @@ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
254 return cpu_to_le16(0); 254 return cpu_to_le16(0);
255 } 255 }
256 256
257 return (__force __le16)__get_unaligned_memmove16(skb_mac_header(skb)); 257 return get_unaligned_le16(skb_mac_header(skb));
258} 258}
259 259
260/** 260/**
@@ -264,7 +264,7 @@ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
264 */ 264 */
265static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) 265static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
266{ 266{
267 __put_unaligned_memmove64(swab64p(be64_src), le64_dst); 267 put_unaligned_le64(get_unaligned_be64(be64_src), le64_dst);
268} 268}
269 269
270/** 270/**
@@ -274,7 +274,7 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
274 */ 274 */
275static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) 275static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
276{ 276{
277 __put_unaligned_memmove64(swab64p(le64_src), be64_dst); 277 put_unaligned_be64(get_unaligned_le64(le64_src), be64_dst);
278} 278}
279 279
280/** 280/**
@@ -284,7 +284,7 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
284 */ 284 */
285static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src) 285static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
286{ 286{
287 __put_unaligned_memmove16(swab16p(le16_src), be16_dst); 287 put_unaligned_be16(get_unaligned_le16(le16_src), be16_dst);
288} 288}
289 289
290/** 290/**