aboutsummaryrefslogtreecommitdiffstats
path: root/net/irda/irlap_frame.c
diff options
context:
space:
mode:
authorSamuel Ortiz <samuel@sortiz.org>2006-07-21 17:50:41 -0400
committerDavid S. Miller <davem@davemloft.net>2006-07-21 17:50:41 -0400
commit485fb2c998a37d5c3c6aa082aa451e66db90f34a (patch)
tree6adc0e5c935072d82143e7354bc633eac7feed9b /net/irda/irlap_frame.c
parentb82631581372dc00b3507cedc3ad47af29efe962 (diff)
[IrDA]: Use alloc_skb() in IrDA TX path
As pointed out by Christoph Hellwig, dev_alloc_skb() is not intended to be used for allocating TX sk_buff. The IrDA stack was exclusively calling dev_alloc_skb() on the TX path, and this patch fixes that. Signed-off-by: Samuel Ortiz <samuel@sortiz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/irda/irlap_frame.c')
-rw-r--r--net/irda/irlap_frame.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 3e9a06abbdd0..fa5c144ecc0b 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -117,7 +117,7 @@ void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos)
117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
118 118
119 /* Allocate frame */ 119 /* Allocate frame */
120 tx_skb = dev_alloc_skb(64); 120 tx_skb = alloc_skb(64, GFP_ATOMIC);
121 if (!tx_skb) 121 if (!tx_skb)
122 return; 122 return;
123 123
@@ -210,7 +210,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
211 211
212 /* Allocate frame */ 212 /* Allocate frame */
213 tx_skb = dev_alloc_skb(64); 213 tx_skb = alloc_skb(64, GFP_ATOMIC);
214 if (!tx_skb) 214 if (!tx_skb)
215 return; 215 return;
216 216
@@ -250,7 +250,7 @@ void irlap_send_dm_frame( struct irlap_cb *self)
250 IRDA_ASSERT(self != NULL, return;); 250 IRDA_ASSERT(self != NULL, return;);
251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
252 252
253 tx_skb = dev_alloc_skb(32); 253 tx_skb = alloc_skb(32, GFP_ATOMIC);
254 if (!tx_skb) 254 if (!tx_skb)
255 return; 255 return;
256 256
@@ -282,7 +282,7 @@ void irlap_send_disc_frame(struct irlap_cb *self)
282 IRDA_ASSERT(self != NULL, return;); 282 IRDA_ASSERT(self != NULL, return;);
283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
284 284
285 tx_skb = dev_alloc_skb(16); 285 tx_skb = alloc_skb(16, GFP_ATOMIC);
286 if (!tx_skb) 286 if (!tx_skb)
287 return; 287 return;
288 288
@@ -315,7 +315,7 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
316 IRDA_ASSERT(discovery != NULL, return;); 316 IRDA_ASSERT(discovery != NULL, return;);
317 317
318 tx_skb = dev_alloc_skb(64); 318 tx_skb = alloc_skb(64, GFP_ATOMIC);
319 if (!tx_skb) 319 if (!tx_skb)
320 return; 320 return;
321 321
@@ -576,7 +576,7 @@ void irlap_send_rr_frame(struct irlap_cb *self, int command)
576 struct sk_buff *tx_skb; 576 struct sk_buff *tx_skb;
577 __u8 *frame; 577 __u8 *frame;
578 578
579 tx_skb = dev_alloc_skb(16); 579 tx_skb = alloc_skb(16, GFP_ATOMIC);
580 if (!tx_skb) 580 if (!tx_skb)
581 return; 581 return;
582 582
@@ -601,7 +601,7 @@ void irlap_send_rd_frame(struct irlap_cb *self)
601 struct sk_buff *tx_skb; 601 struct sk_buff *tx_skb;
602 __u8 *frame; 602 __u8 *frame;
603 603
604 tx_skb = dev_alloc_skb(16); 604 tx_skb = alloc_skb(16, GFP_ATOMIC);
605 if (!tx_skb) 605 if (!tx_skb)
606 return; 606 return;
607 607
@@ -1215,7 +1215,7 @@ void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
1215 struct test_frame *frame; 1215 struct test_frame *frame;
1216 __u8 *info; 1216 __u8 *info;
1217 1217
1218 tx_skb = dev_alloc_skb(cmd->len+sizeof(struct test_frame)); 1218 tx_skb = alloc_skb(cmd->len+sizeof(struct test_frame), GFP_ATOMIC);
1219 if (!tx_skb) 1219 if (!tx_skb)
1220 return; 1220 return;
1221 1221