aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Aring <alex.aring@gmail.com>2014-10-26 04:37:02 -0400
committerMarcel Holtmann <marcel@holtmann.org>2014-10-26 12:23:58 -0400
commitfe24371d6645b766c59ec664c59d0a9c310ad455 (patch)
treebc7f7553e60e9dcaee1827740a500b5227ba81cd
parent50c6fb9965907732b4f5c45bd3bacf4b4f3463b9 (diff)
mac802154: tx: remove kmalloc in xmit hotpath
This patch removes the kmalloc allocation for workqueue data. This patch replaces the kmalloc and uses the control block of skb. The control block has enough space and isn't use by any other layer in this case. Signed-off-by: Alexander Aring <alex.aring@gmail.com> Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
-rw-r--r--net/mac802154/tx.c56
1 files changed, 27 insertions, 29 deletions
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 2eb06c2cf96d..513e760a8557 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -30,7 +30,7 @@
30/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process 30/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
31 * packets through the workqueue. 31 * packets through the workqueue.
32 */ 32 */
33struct xmit_work { 33struct wpan_xmit_cb {
34 struct sk_buff *skb; 34 struct sk_buff *skb;
35 struct work_struct work; 35 struct work_struct work;
36 struct ieee802154_local *local; 36 struct ieee802154_local *local;
@@ -38,50 +38,54 @@ struct xmit_work {
38 u8 page; 38 u8 page;
39}; 39};
40 40
41static inline struct wpan_xmit_cb *wpan_xmit_cb(const struct sk_buff *skb)
42{
43 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct wpan_xmit_cb));
44
45 return (struct wpan_xmit_cb *)skb->cb;
46}
47
41static void mac802154_xmit_worker(struct work_struct *work) 48static void mac802154_xmit_worker(struct work_struct *work)
42{ 49{
43 struct xmit_work *xw = container_of(work, struct xmit_work, work); 50 struct wpan_xmit_cb *cb = container_of(work, struct wpan_xmit_cb, work);
44 struct ieee802154_sub_if_data *sdata; 51 struct ieee802154_sub_if_data *sdata;
45 int res; 52 int res;
46 53
47 mutex_lock(&xw->local->phy->pib_lock); 54 mutex_lock(&cb->local->phy->pib_lock);
48 if (xw->local->phy->current_channel != xw->chan || 55 if (cb->local->phy->current_channel != cb->chan ||
49 xw->local->phy->current_page != xw->page) { 56 cb->local->phy->current_page != cb->page) {
50 res = xw->local->ops->set_channel(&xw->local->hw, 57 res = cb->local->ops->set_channel(&cb->local->hw, cb->page,
51 xw->page, 58 cb->chan);
52 xw->chan);
53 if (res) { 59 if (res) {
54 pr_debug("set_channel failed\n"); 60 pr_debug("set_channel failed\n");
55 goto out; 61 goto out;
56 } 62 }
57 63
58 xw->local->phy->current_channel = xw->chan; 64 cb->local->phy->current_channel = cb->chan;
59 xw->local->phy->current_page = xw->page; 65 cb->local->phy->current_page = cb->page;
60 } 66 }
61 67
62 res = xw->local->ops->xmit(&xw->local->hw, xw->skb); 68 res = cb->local->ops->xmit(&cb->local->hw, cb->skb);
63 if (res) 69 if (res)
64 pr_debug("transmission failed\n"); 70 pr_debug("transmission failed\n");
65 71
66out: 72out:
67 mutex_unlock(&xw->local->phy->pib_lock); 73 mutex_unlock(&cb->local->phy->pib_lock);
68 74
69 /* Restart the netif queue on each sub_if_data object. */ 75 /* Restart the netif queue on each sub_if_data object. */
70 rcu_read_lock(); 76 rcu_read_lock();
71 list_for_each_entry_rcu(sdata, &xw->local->interfaces, list) 77 list_for_each_entry_rcu(sdata, &cb->local->interfaces, list)
72 netif_wake_queue(sdata->dev); 78 netif_wake_queue(sdata->dev);
73 rcu_read_unlock(); 79 rcu_read_unlock();
74 80
75 dev_kfree_skb(xw->skb); 81 dev_kfree_skb(cb->skb);
76
77 kfree(xw);
78} 82}
79 83
80static netdev_tx_t mac802154_tx(struct ieee802154_local *local, 84static netdev_tx_t mac802154_tx(struct ieee802154_local *local,
81 struct sk_buff *skb, u8 page, u8 chan) 85 struct sk_buff *skb, u8 page, u8 chan)
82{ 86{
83 struct xmit_work *work;
84 struct ieee802154_sub_if_data *sdata; 87 struct ieee802154_sub_if_data *sdata;
88 struct wpan_xmit_cb *cb = wpan_xmit_cb(skb);
85 89
86 if (!(local->phy->channels_supported[page] & (1 << chan))) { 90 if (!(local->phy->channels_supported[page] & (1 << chan))) {
87 WARN_ON(1); 91 WARN_ON(1);
@@ -101,25 +105,19 @@ static netdev_tx_t mac802154_tx(struct ieee802154_local *local,
101 if (skb_cow_head(skb, local->hw.extra_tx_headroom)) 105 if (skb_cow_head(skb, local->hw.extra_tx_headroom))
102 goto err_tx; 106 goto err_tx;
103 107
104 work = kzalloc(sizeof(*work), GFP_ATOMIC);
105 if (!work) {
106 kfree_skb(skb);
107 return NETDEV_TX_BUSY;
108 }
109
110 /* Stop the netif queue on each sub_if_data object. */ 108 /* Stop the netif queue on each sub_if_data object. */
111 rcu_read_lock(); 109 rcu_read_lock();
112 list_for_each_entry_rcu(sdata, &local->interfaces, list) 110 list_for_each_entry_rcu(sdata, &local->interfaces, list)
113 netif_stop_queue(sdata->dev); 111 netif_stop_queue(sdata->dev);
114 rcu_read_unlock(); 112 rcu_read_unlock();
115 113
116 INIT_WORK(&work->work, mac802154_xmit_worker); 114 INIT_WORK(&cb->work, mac802154_xmit_worker);
117 work->skb = skb; 115 cb->skb = skb;
118 work->local = local; 116 cb->local = local;
119 work->page = page; 117 cb->page = page;
120 work->chan = chan; 118 cb->chan = chan;
121 119
122 queue_work(local->workqueue, &work->work); 120 queue_work(local->workqueue, &cb->work);
123 121
124 return NETDEV_TX_OK; 122 return NETDEV_TX_OK;
125 123