aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/isdn/mISDN/hwchannel.c
diff options
context:
space:
mode:
authorKarsten Keil <kkeil@linux-pingi.de>2012-05-15 19:51:05 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-16 15:23:28 -0400
commit7206e659f689558b41aa058c3040b081cb281d03 (patch)
tree2be5bdef5a0bbb9c5763b8c1b1e6fe04b7184cc5 /drivers/isdn/mISDN/hwchannel.c
parent37952cfa3afdfa5cec39d9d76e80bc3a0e6a910c (diff)
mISDN: Reduce RX buffer allocation for transparent data
We did allways allocate maxsize buffers, but for transparent data we know the actual size. Use a common function to calculate size and detect overflows. Signed-off-by: Karsten Keil <kkeil@linux-pingi.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/isdn/mISDN/hwchannel.c')
-rw-r--r--drivers/isdn/mISDN/hwchannel.c75
1 files changed, 63 insertions, 12 deletions
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index 5c5ab478f66a..3c2145d8c3f8 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -201,20 +201,30 @@ recv_Bchannel(struct bchannel *bch, unsigned int id)
201{ 201{
202 struct mISDNhead *hh; 202 struct mISDNhead *hh;
203 203
204 hh = mISDN_HEAD_P(bch->rx_skb); 204 /* if allocation did fail upper functions still may call us */
205 hh->prim = PH_DATA_IND; 205 if (unlikely(!bch->rx_skb))
206 hh->id = id;
207 if (bch->rcount >= 64) {
208 printk(KERN_WARNING "B-channel %p receive queue overflow, "
209 "flushing!\n", bch);
210 skb_queue_purge(&bch->rqueue);
211 bch->rcount = 0;
212 return; 206 return;
207 if (unlikely(!bch->rx_skb->len)) {
208 /* we have no data to send - this may happen after recovery
209 * from overflow or too small allocation.
210 * We need to free the buffer here */
211 dev_kfree_skb(bch->rx_skb);
212 bch->rx_skb = NULL;
213 } else {
214 hh = mISDN_HEAD_P(bch->rx_skb);
215 hh->prim = PH_DATA_IND;
216 hh->id = id;
217 if (bch->rcount >= 64) {
218 printk(KERN_WARNING
219 "B%d receive queue overflow - flushing!\n",
220 bch->nr);
221 skb_queue_purge(&bch->rqueue);
222 }
223 bch->rcount++;
224 skb_queue_tail(&bch->rqueue, bch->rx_skb);
225 bch->rx_skb = NULL;
226 schedule_event(bch, FLG_RECVQUEUE);
213 } 227 }
214 bch->rcount++;
215 skb_queue_tail(&bch->rqueue, bch->rx_skb);
216 bch->rx_skb = NULL;
217 schedule_event(bch, FLG_RECVQUEUE);
218} 228}
219EXPORT_SYMBOL(recv_Bchannel); 229EXPORT_SYMBOL(recv_Bchannel);
220 230
@@ -399,3 +409,44 @@ bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
399 } 409 }
400} 410}
401EXPORT_SYMBOL(bchannel_senddata); 411EXPORT_SYMBOL(bchannel_senddata);
412
413/* The function allocates a new receive skb on demand with a size for the
414 * requirements of the current protocol. It returns the tailroom of the
415 * receive skb or an error.
416 */
417int
418bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
419{
420 int len;
421
422 if (bch->rx_skb) {
423 len = skb_tailroom(bch->rx_skb);
424 if (len < reqlen) {
425 pr_warning("B%d no space for %d (only %d) bytes\n",
426 bch->nr, reqlen, len);
427 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
428 /* send what we have now and try a new buffer */
429 recv_Bchannel(bch, 0);
430 } else {
431 /* on HDLC we have to drop too big frames */
432 return -EMSGSIZE;
433 }
434 } else {
435 return len;
436 }
437 }
438 if (unlikely(reqlen > bch->maxlen))
439 return -EMSGSIZE;
440 if (test_bit(FLG_TRANSPARENT, &bch->Flags))
441 len = reqlen;
442 else /* with HDLC we do not know the length yet */
443 len = bch->maxlen;
444 bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
445 if (!bch->rx_skb) {
446 pr_warning("B%d receive no memory for %d bytes\n",
447 bch->nr, len);
448 len = -ENOMEM;
449 }
450 return len;
451}
452EXPORT_SYMBOL(bchannel_get_rxbuf);