diff options
Diffstat (limited to 'drivers')
30 files changed, 633 insertions, 407 deletions
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index 6f907ebed2d5..6d34f405a2f3 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <linux/wait.h> | 37 | #include <linux/wait.h> |
38 | 38 | ||
39 | #include <linux/skbuff.h> | 39 | #include <linux/skbuff.h> |
40 | #include <asm/io.h> | 40 | #include <linux/io.h> |
41 | 41 | ||
42 | #include <pcmcia/cs_types.h> | 42 | #include <pcmcia/cs_types.h> |
43 | #include <pcmcia/cs.h> | 43 | #include <pcmcia/cs.h> |
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index 40aec0fb8596..42d69d4de05c 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c | |||
@@ -244,7 +244,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data, | |||
244 | if (rel) { | 244 | if (rel) { |
245 | hdr[0] |= 0x80 + bcsp->msgq_txseq; | 245 | hdr[0] |= 0x80 + bcsp->msgq_txseq; |
246 | BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq); | 246 | BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq); |
247 | bcsp->msgq_txseq = ++(bcsp->msgq_txseq) & 0x07; | 247 | bcsp->msgq_txseq = (bcsp->msgq_txseq + 1) & 0x07; |
248 | } | 248 | } |
249 | 249 | ||
250 | if (bcsp->use_crc) | 250 | if (bcsp->use_crc) |
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c index c5016bd2d94f..c3b1dc3a13a0 100644 --- a/drivers/isdn/gigaset/asyncdata.c +++ b/drivers/isdn/gigaset/asyncdata.c | |||
@@ -126,26 +126,6 @@ static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf) | |||
126 | return numbytes; | 126 | return numbytes; |
127 | } | 127 | } |
128 | 128 | ||
129 | /* set up next receive skb for data mode | ||
130 | */ | ||
131 | static void new_rcv_skb(struct bc_state *bcs) | ||
132 | { | ||
133 | struct cardstate *cs = bcs->cs; | ||
134 | unsigned short hw_hdr_len = cs->hw_hdr_len; | ||
135 | |||
136 | if (bcs->ignore) { | ||
137 | bcs->skb = NULL; | ||
138 | return; | ||
139 | } | ||
140 | |||
141 | bcs->skb = dev_alloc_skb(SBUFSIZE + hw_hdr_len); | ||
142 | if (bcs->skb == NULL) { | ||
143 | dev_warn(cs->dev, "could not allocate new skb\n"); | ||
144 | return; | ||
145 | } | ||
146 | skb_reserve(bcs->skb, hw_hdr_len); | ||
147 | } | ||
148 | |||
149 | /* process a block of received bytes in HDLC data mode | 129 | /* process a block of received bytes in HDLC data mode |
150 | * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC) | 130 | * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC) |
151 | * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes. | 131 | * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes. |
@@ -159,8 +139,8 @@ static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf) | |||
159 | struct cardstate *cs = inbuf->cs; | 139 | struct cardstate *cs = inbuf->cs; |
160 | struct bc_state *bcs = cs->bcs; | 140 | struct bc_state *bcs = cs->bcs; |
161 | int inputstate = bcs->inputstate; | 141 | int inputstate = bcs->inputstate; |
162 | __u16 fcs = bcs->fcs; | 142 | __u16 fcs = bcs->rx_fcs; |
163 | struct sk_buff *skb = bcs->skb; | 143 | struct sk_buff *skb = bcs->rx_skb; |
164 | unsigned char *src = inbuf->data + inbuf->head; | 144 | unsigned char *src = inbuf->data + inbuf->head; |
165 | unsigned procbytes = 0; | 145 | unsigned procbytes = 0; |
166 | unsigned char c; | 146 | unsigned char c; |
@@ -245,8 +225,7 @@ byte_stuff: | |||
245 | 225 | ||
246 | /* prepare reception of next frame */ | 226 | /* prepare reception of next frame */ |
247 | inputstate &= ~INS_have_data; | 227 | inputstate &= ~INS_have_data; |
248 | new_rcv_skb(bcs); | 228 | skb = gigaset_new_rx_skb(bcs); |
249 | skb = bcs->skb; | ||
250 | } else { | 229 | } else { |
251 | /* empty frame (7E 7E) */ | 230 | /* empty frame (7E 7E) */ |
252 | #ifdef CONFIG_GIGASET_DEBUG | 231 | #ifdef CONFIG_GIGASET_DEBUG |
@@ -255,8 +234,7 @@ byte_stuff: | |||
255 | if (!skb) { | 234 | if (!skb) { |
256 | /* skipped (?) */ | 235 | /* skipped (?) */ |
257 | gigaset_isdn_rcv_err(bcs); | 236 | gigaset_isdn_rcv_err(bcs); |
258 | new_rcv_skb(bcs); | 237 | skb = gigaset_new_rx_skb(bcs); |
259 | skb = bcs->skb; | ||
260 | } | 238 | } |
261 | } | 239 | } |
262 | 240 | ||
@@ -279,11 +257,11 @@ byte_stuff: | |||
279 | #endif | 257 | #endif |
280 | inputstate |= INS_have_data; | 258 | inputstate |= INS_have_data; |
281 | if (skb) { | 259 | if (skb) { |
282 | if (skb->len == SBUFSIZE) { | 260 | if (skb->len >= bcs->rx_bufsize) { |
283 | dev_warn(cs->dev, "received packet too long\n"); | 261 | dev_warn(cs->dev, "received packet too long\n"); |
284 | dev_kfree_skb_any(skb); | 262 | dev_kfree_skb_any(skb); |
285 | /* skip remainder of packet */ | 263 | /* skip remainder of packet */ |
286 | bcs->skb = skb = NULL; | 264 | bcs->rx_skb = skb = NULL; |
287 | } else { | 265 | } else { |
288 | *__skb_put(skb, 1) = c; | 266 | *__skb_put(skb, 1) = c; |
289 | fcs = crc_ccitt_byte(fcs, c); | 267 | fcs = crc_ccitt_byte(fcs, c); |
@@ -292,7 +270,7 @@ byte_stuff: | |||
292 | } | 270 | } |
293 | 271 | ||
294 | bcs->inputstate = inputstate; | 272 | bcs->inputstate = inputstate; |
295 | bcs->fcs = fcs; | 273 | bcs->rx_fcs = fcs; |
296 | return procbytes; | 274 | return procbytes; |
297 | } | 275 | } |
298 | 276 | ||
@@ -308,18 +286,18 @@ static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf) | |||
308 | struct cardstate *cs = inbuf->cs; | 286 | struct cardstate *cs = inbuf->cs; |
309 | struct bc_state *bcs = cs->bcs; | 287 | struct bc_state *bcs = cs->bcs; |
310 | int inputstate = bcs->inputstate; | 288 | int inputstate = bcs->inputstate; |
311 | struct sk_buff *skb = bcs->skb; | 289 | struct sk_buff *skb = bcs->rx_skb; |
312 | unsigned char *src = inbuf->data + inbuf->head; | 290 | unsigned char *src = inbuf->data + inbuf->head; |
313 | unsigned procbytes = 0; | 291 | unsigned procbytes = 0; |
314 | unsigned char c; | 292 | unsigned char c; |
315 | 293 | ||
316 | if (!skb) { | 294 | if (!skb) { |
317 | /* skip this block */ | 295 | /* skip this block */ |
318 | new_rcv_skb(bcs); | 296 | gigaset_new_rx_skb(bcs); |
319 | return numbytes; | 297 | return numbytes; |
320 | } | 298 | } |
321 | 299 | ||
322 | while (procbytes < numbytes && skb->len < SBUFSIZE) { | 300 | while (procbytes < numbytes && skb->len < bcs->rx_bufsize) { |
323 | c = *src++; | 301 | c = *src++; |
324 | procbytes++; | 302 | procbytes++; |
325 | 303 | ||
@@ -343,7 +321,7 @@ static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf) | |||
343 | if (inputstate & INS_have_data) { | 321 | if (inputstate & INS_have_data) { |
344 | gigaset_skb_rcvd(bcs, skb); | 322 | gigaset_skb_rcvd(bcs, skb); |
345 | inputstate &= ~INS_have_data; | 323 | inputstate &= ~INS_have_data; |
346 | new_rcv_skb(bcs); | 324 | gigaset_new_rx_skb(bcs); |
347 | } | 325 | } |
348 | 326 | ||
349 | bcs->inputstate = inputstate; | 327 | bcs->inputstate = inputstate; |
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index 8f78f15c8ef7..6fbe8999c419 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c | |||
@@ -70,7 +70,7 @@ | |||
70 | #define MAX_NUMBER_DIGITS 20 | 70 | #define MAX_NUMBER_DIGITS 20 |
71 | #define MAX_FMT_IE_LEN 20 | 71 | #define MAX_FMT_IE_LEN 20 |
72 | 72 | ||
73 | /* values for gigaset_capi_appl.connected */ | 73 | /* values for bcs->apconnstate */ |
74 | #define APCONN_NONE 0 /* inactive/listening */ | 74 | #define APCONN_NONE 0 /* inactive/listening */ |
75 | #define APCONN_SETUP 1 /* connecting */ | 75 | #define APCONN_SETUP 1 /* connecting */ |
76 | #define APCONN_ACTIVE 2 /* B channel up */ | 76 | #define APCONN_ACTIVE 2 /* B channel up */ |
@@ -80,10 +80,10 @@ struct gigaset_capi_appl { | |||
80 | struct list_head ctrlist; | 80 | struct list_head ctrlist; |
81 | struct gigaset_capi_appl *bcnext; | 81 | struct gigaset_capi_appl *bcnext; |
82 | u16 id; | 82 | u16 id; |
83 | struct capi_register_params rp; | ||
83 | u16 nextMessageNumber; | 84 | u16 nextMessageNumber; |
84 | u32 listenInfoMask; | 85 | u32 listenInfoMask; |
85 | u32 listenCIPmask; | 86 | u32 listenCIPmask; |
86 | int connected; | ||
87 | }; | 87 | }; |
88 | 88 | ||
89 | /* CAPI specific controller data structure */ | 89 | /* CAPI specific controller data structure */ |
@@ -319,6 +319,39 @@ static const char *format_ie(const char *ie) | |||
319 | return result; | 319 | return result; |
320 | } | 320 | } |
321 | 321 | ||
322 | /* | ||
323 | * emit DATA_B3_CONF message | ||
324 | */ | ||
325 | static void send_data_b3_conf(struct cardstate *cs, struct capi_ctr *ctr, | ||
326 | u16 appl, u16 msgid, int channel, | ||
327 | u16 handle, u16 info) | ||
328 | { | ||
329 | struct sk_buff *cskb; | ||
330 | u8 *msg; | ||
331 | |||
332 | cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC); | ||
333 | if (!cskb) { | ||
334 | dev_err(cs->dev, "%s: out of memory\n", __func__); | ||
335 | return; | ||
336 | } | ||
337 | /* frequent message, avoid _cmsg overhead */ | ||
338 | msg = __skb_put(cskb, CAPI_DATA_B3_CONF_LEN); | ||
339 | CAPIMSG_SETLEN(msg, CAPI_DATA_B3_CONF_LEN); | ||
340 | CAPIMSG_SETAPPID(msg, appl); | ||
341 | CAPIMSG_SETCOMMAND(msg, CAPI_DATA_B3); | ||
342 | CAPIMSG_SETSUBCOMMAND(msg, CAPI_CONF); | ||
343 | CAPIMSG_SETMSGID(msg, msgid); | ||
344 | CAPIMSG_SETCONTROLLER(msg, ctr->cnr); | ||
345 | CAPIMSG_SETPLCI_PART(msg, channel); | ||
346 | CAPIMSG_SETNCCI_PART(msg, 1); | ||
347 | CAPIMSG_SETHANDLE_CONF(msg, handle); | ||
348 | CAPIMSG_SETINFO_CONF(msg, info); | ||
349 | |||
350 | /* emit message */ | ||
351 | dump_rawmsg(DEBUG_MCMD, __func__, msg); | ||
352 | capi_ctr_handle_message(ctr, appl, cskb); | ||
353 | } | ||
354 | |||
322 | 355 | ||
323 | /* | 356 | /* |
324 | * driver interface functions | 357 | * driver interface functions |
@@ -339,7 +372,6 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb) | |||
339 | struct gigaset_capi_ctr *iif = cs->iif; | 372 | struct gigaset_capi_ctr *iif = cs->iif; |
340 | struct gigaset_capi_appl *ap = bcs->ap; | 373 | struct gigaset_capi_appl *ap = bcs->ap; |
341 | unsigned char *req = skb_mac_header(dskb); | 374 | unsigned char *req = skb_mac_header(dskb); |
342 | struct sk_buff *cskb; | ||
343 | u16 flags; | 375 | u16 flags; |
344 | 376 | ||
345 | /* update statistics */ | 377 | /* update statistics */ |
@@ -351,39 +383,22 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb) | |||
351 | } | 383 | } |
352 | 384 | ||
353 | /* don't send further B3 messages if disconnected */ | 385 | /* don't send further B3 messages if disconnected */ |
354 | if (ap->connected < APCONN_ACTIVE) { | 386 | if (bcs->apconnstate < APCONN_ACTIVE) { |
355 | gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack"); | 387 | gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack"); |
356 | return; | 388 | return; |
357 | } | 389 | } |
358 | 390 | ||
359 | /* ToDo: honor unset "delivery confirmation" bit */ | 391 | /* |
392 | * send DATA_B3_CONF if "delivery confirmation" bit was set in request; | ||
393 | * otherwise it has already been sent by do_data_b3_req() | ||
394 | */ | ||
360 | flags = CAPIMSG_FLAGS(req); | 395 | flags = CAPIMSG_FLAGS(req); |
361 | 396 | if (flags & CAPI_FLAGS_DELIVERY_CONFIRMATION) | |
362 | /* build DATA_B3_CONF message */ | 397 | send_data_b3_conf(cs, &iif->ctr, ap->id, CAPIMSG_MSGID(req), |
363 | cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC); | 398 | bcs->channel + 1, CAPIMSG_HANDLE_REQ(req), |
364 | if (!cskb) { | 399 | (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION) ? |
365 | dev_err(cs->dev, "%s: out of memory\n", __func__); | 400 | CapiFlagsNotSupportedByProtocol : |
366 | return; | 401 | CAPI_NOERROR); |
367 | } | ||
368 | /* frequent message, avoid _cmsg overhead */ | ||
369 | CAPIMSG_SETLEN(cskb->data, CAPI_DATA_B3_CONF_LEN); | ||
370 | CAPIMSG_SETAPPID(cskb->data, ap->id); | ||
371 | CAPIMSG_SETCOMMAND(cskb->data, CAPI_DATA_B3); | ||
372 | CAPIMSG_SETSUBCOMMAND(cskb->data, CAPI_CONF); | ||
373 | CAPIMSG_SETMSGID(cskb->data, CAPIMSG_MSGID(req)); | ||
374 | CAPIMSG_SETCONTROLLER(cskb->data, iif->ctr.cnr); | ||
375 | CAPIMSG_SETPLCI_PART(cskb->data, bcs->channel + 1); | ||
376 | CAPIMSG_SETNCCI_PART(cskb->data, 1); | ||
377 | CAPIMSG_SETHANDLE_CONF(cskb->data, CAPIMSG_HANDLE_REQ(req)); | ||
378 | if (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION) | ||
379 | CAPIMSG_SETINFO_CONF(cskb->data, | ||
380 | CapiFlagsNotSupportedByProtocol); | ||
381 | else | ||
382 | CAPIMSG_SETINFO_CONF(cskb->data, CAPI_NOERROR); | ||
383 | |||
384 | /* emit message */ | ||
385 | dump_rawmsg(DEBUG_LLDATA, "DATA_B3_CONF", cskb->data); | ||
386 | capi_ctr_handle_message(&iif->ctr, ap->id, cskb); | ||
387 | } | 402 | } |
388 | EXPORT_SYMBOL_GPL(gigaset_skb_sent); | 403 | EXPORT_SYMBOL_GPL(gigaset_skb_sent); |
389 | 404 | ||
@@ -412,7 +427,7 @@ void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb) | |||
412 | } | 427 | } |
413 | 428 | ||
414 | /* don't send further B3 messages if disconnected */ | 429 | /* don't send further B3 messages if disconnected */ |
415 | if (ap->connected < APCONN_ACTIVE) { | 430 | if (bcs->apconnstate < APCONN_ACTIVE) { |
416 | gig_dbg(DEBUG_LLDATA, "disconnected, discarding data"); | 431 | gig_dbg(DEBUG_LLDATA, "disconnected, discarding data"); |
417 | dev_kfree_skb_any(skb); | 432 | dev_kfree_skb_any(skb); |
418 | return; | 433 | return; |
@@ -484,6 +499,7 @@ int gigaset_isdn_icall(struct at_state_t *at_state) | |||
484 | u32 actCIPmask; | 499 | u32 actCIPmask; |
485 | struct sk_buff *skb; | 500 | struct sk_buff *skb; |
486 | unsigned int msgsize; | 501 | unsigned int msgsize; |
502 | unsigned long flags; | ||
487 | int i; | 503 | int i; |
488 | 504 | ||
489 | /* | 505 | /* |
@@ -608,7 +624,14 @@ int gigaset_isdn_icall(struct at_state_t *at_state) | |||
608 | format_ie(iif->hcmsg.CalledPartyNumber)); | 624 | format_ie(iif->hcmsg.CalledPartyNumber)); |
609 | 625 | ||
610 | /* scan application list for matching listeners */ | 626 | /* scan application list for matching listeners */ |
611 | bcs->ap = NULL; | 627 | spin_lock_irqsave(&bcs->aplock, flags); |
628 | if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) { | ||
629 | dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n", | ||
630 | __func__, bcs->ap, bcs->apconnstate); | ||
631 | bcs->ap = NULL; | ||
632 | bcs->apconnstate = APCONN_NONE; | ||
633 | } | ||
634 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
612 | actCIPmask = 1 | (1 << iif->hcmsg.CIPValue); | 635 | actCIPmask = 1 | (1 << iif->hcmsg.CIPValue); |
613 | list_for_each_entry(ap, &iif->appls, ctrlist) | 636 | list_for_each_entry(ap, &iif->appls, ctrlist) |
614 | if (actCIPmask & ap->listenCIPmask) { | 637 | if (actCIPmask & ap->listenCIPmask) { |
@@ -626,10 +649,12 @@ int gigaset_isdn_icall(struct at_state_t *at_state) | |||
626 | dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); | 649 | dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); |
627 | 650 | ||
628 | /* add to listeners on this B channel, update state */ | 651 | /* add to listeners on this B channel, update state */ |
652 | spin_lock_irqsave(&bcs->aplock, flags); | ||
629 | ap->bcnext = bcs->ap; | 653 | ap->bcnext = bcs->ap; |
630 | bcs->ap = ap; | 654 | bcs->ap = ap; |
631 | bcs->chstate |= CHS_NOTIFY_LL; | 655 | bcs->chstate |= CHS_NOTIFY_LL; |
632 | ap->connected = APCONN_SETUP; | 656 | bcs->apconnstate = APCONN_SETUP; |
657 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
633 | 658 | ||
634 | /* emit message */ | 659 | /* emit message */ |
635 | capi_ctr_handle_message(&iif->ctr, ap->id, skb); | 660 | capi_ctr_handle_message(&iif->ctr, ap->id, skb); |
@@ -654,7 +679,7 @@ static void send_disconnect_ind(struct bc_state *bcs, | |||
654 | struct gigaset_capi_ctr *iif = cs->iif; | 679 | struct gigaset_capi_ctr *iif = cs->iif; |
655 | struct sk_buff *skb; | 680 | struct sk_buff *skb; |
656 | 681 | ||
657 | if (ap->connected == APCONN_NONE) | 682 | if (bcs->apconnstate == APCONN_NONE) |
658 | return; | 683 | return; |
659 | 684 | ||
660 | capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND, | 685 | capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND, |
@@ -668,7 +693,6 @@ static void send_disconnect_ind(struct bc_state *bcs, | |||
668 | } | 693 | } |
669 | capi_cmsg2message(&iif->hcmsg, __skb_put(skb, CAPI_DISCONNECT_IND_LEN)); | 694 | capi_cmsg2message(&iif->hcmsg, __skb_put(skb, CAPI_DISCONNECT_IND_LEN)); |
670 | dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); | 695 | dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); |
671 | ap->connected = APCONN_NONE; | ||
672 | capi_ctr_handle_message(&iif->ctr, ap->id, skb); | 696 | capi_ctr_handle_message(&iif->ctr, ap->id, skb); |
673 | } | 697 | } |
674 | 698 | ||
@@ -685,9 +709,9 @@ static void send_disconnect_b3_ind(struct bc_state *bcs, | |||
685 | struct sk_buff *skb; | 709 | struct sk_buff *skb; |
686 | 710 | ||
687 | /* nothing to do if no logical connection active */ | 711 | /* nothing to do if no logical connection active */ |
688 | if (ap->connected < APCONN_ACTIVE) | 712 | if (bcs->apconnstate < APCONN_ACTIVE) |
689 | return; | 713 | return; |
690 | ap->connected = APCONN_SETUP; | 714 | bcs->apconnstate = APCONN_SETUP; |
691 | 715 | ||
692 | capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND, | 716 | capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND, |
693 | ap->nextMessageNumber++, | 717 | ap->nextMessageNumber++, |
@@ -714,14 +738,25 @@ void gigaset_isdn_connD(struct bc_state *bcs) | |||
714 | { | 738 | { |
715 | struct cardstate *cs = bcs->cs; | 739 | struct cardstate *cs = bcs->cs; |
716 | struct gigaset_capi_ctr *iif = cs->iif; | 740 | struct gigaset_capi_ctr *iif = cs->iif; |
717 | struct gigaset_capi_appl *ap = bcs->ap; | 741 | struct gigaset_capi_appl *ap; |
718 | struct sk_buff *skb; | 742 | struct sk_buff *skb; |
719 | unsigned int msgsize; | 743 | unsigned int msgsize; |
744 | unsigned long flags; | ||
720 | 745 | ||
746 | spin_lock_irqsave(&bcs->aplock, flags); | ||
747 | ap = bcs->ap; | ||
721 | if (!ap) { | 748 | if (!ap) { |
749 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
722 | dev_err(cs->dev, "%s: no application\n", __func__); | 750 | dev_err(cs->dev, "%s: no application\n", __func__); |
723 | return; | 751 | return; |
724 | } | 752 | } |
753 | if (bcs->apconnstate == APCONN_NONE) { | ||
754 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
755 | dev_warn(cs->dev, "%s: application %u not connected\n", | ||
756 | __func__, ap->id); | ||
757 | return; | ||
758 | } | ||
759 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
725 | while (ap->bcnext) { | 760 | while (ap->bcnext) { |
726 | /* this should never happen */ | 761 | /* this should never happen */ |
727 | dev_warn(cs->dev, "%s: dropping extra application %u\n", | 762 | dev_warn(cs->dev, "%s: dropping extra application %u\n", |
@@ -730,11 +765,6 @@ void gigaset_isdn_connD(struct bc_state *bcs) | |||
730 | CapiCallGivenToOtherApplication); | 765 | CapiCallGivenToOtherApplication); |
731 | ap->bcnext = ap->bcnext->bcnext; | 766 | ap->bcnext = ap->bcnext->bcnext; |
732 | } | 767 | } |
733 | if (ap->connected == APCONN_NONE) { | ||
734 | dev_warn(cs->dev, "%s: application %u not connected\n", | ||
735 | __func__, ap->id); | ||
736 | return; | ||
737 | } | ||
738 | 768 | ||
739 | /* prepare CONNECT_ACTIVE_IND message | 769 | /* prepare CONNECT_ACTIVE_IND message |
740 | * Note: LLC not supported by device | 770 | * Note: LLC not supported by device |
@@ -772,17 +802,24 @@ void gigaset_isdn_connD(struct bc_state *bcs) | |||
772 | void gigaset_isdn_hupD(struct bc_state *bcs) | 802 | void gigaset_isdn_hupD(struct bc_state *bcs) |
773 | { | 803 | { |
774 | struct gigaset_capi_appl *ap; | 804 | struct gigaset_capi_appl *ap; |
805 | unsigned long flags; | ||
775 | 806 | ||
776 | /* | 807 | /* |
777 | * ToDo: pass on reason code reported by device | 808 | * ToDo: pass on reason code reported by device |
778 | * (requires ev-layer state machine extension to collect | 809 | * (requires ev-layer state machine extension to collect |
779 | * ZCAU device reply) | 810 | * ZCAU device reply) |
780 | */ | 811 | */ |
781 | for (ap = bcs->ap; ap != NULL; ap = ap->bcnext) { | 812 | spin_lock_irqsave(&bcs->aplock, flags); |
813 | while (bcs->ap != NULL) { | ||
814 | ap = bcs->ap; | ||
815 | bcs->ap = ap->bcnext; | ||
816 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
782 | send_disconnect_b3_ind(bcs, ap); | 817 | send_disconnect_b3_ind(bcs, ap); |
783 | send_disconnect_ind(bcs, ap, 0); | 818 | send_disconnect_ind(bcs, ap, 0); |
819 | spin_lock_irqsave(&bcs->aplock, flags); | ||
784 | } | 820 | } |
785 | bcs->ap = NULL; | 821 | bcs->apconnstate = APCONN_NONE; |
822 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
786 | } | 823 | } |
787 | 824 | ||
788 | /** | 825 | /** |
@@ -796,24 +833,21 @@ void gigaset_isdn_connB(struct bc_state *bcs) | |||
796 | { | 833 | { |
797 | struct cardstate *cs = bcs->cs; | 834 | struct cardstate *cs = bcs->cs; |
798 | struct gigaset_capi_ctr *iif = cs->iif; | 835 | struct gigaset_capi_ctr *iif = cs->iif; |
799 | struct gigaset_capi_appl *ap = bcs->ap; | 836 | struct gigaset_capi_appl *ap; |
800 | struct sk_buff *skb; | 837 | struct sk_buff *skb; |
838 | unsigned long flags; | ||
801 | unsigned int msgsize; | 839 | unsigned int msgsize; |
802 | u8 command; | 840 | u8 command; |
803 | 841 | ||
842 | spin_lock_irqsave(&bcs->aplock, flags); | ||
843 | ap = bcs->ap; | ||
804 | if (!ap) { | 844 | if (!ap) { |
845 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
805 | dev_err(cs->dev, "%s: no application\n", __func__); | 846 | dev_err(cs->dev, "%s: no application\n", __func__); |
806 | return; | 847 | return; |
807 | } | 848 | } |
808 | while (ap->bcnext) { | 849 | if (!bcs->apconnstate) { |
809 | /* this should never happen */ | 850 | spin_unlock_irqrestore(&bcs->aplock, flags); |
810 | dev_warn(cs->dev, "%s: dropping extra application %u\n", | ||
811 | __func__, ap->bcnext->id); | ||
812 | send_disconnect_ind(bcs, ap->bcnext, | ||
813 | CapiCallGivenToOtherApplication); | ||
814 | ap->bcnext = ap->bcnext->bcnext; | ||
815 | } | ||
816 | if (!ap->connected) { | ||
817 | dev_warn(cs->dev, "%s: application %u not connected\n", | 851 | dev_warn(cs->dev, "%s: application %u not connected\n", |
818 | __func__, ap->id); | 852 | __func__, ap->id); |
819 | return; | 853 | return; |
@@ -825,13 +859,26 @@ void gigaset_isdn_connB(struct bc_state *bcs) | |||
825 | * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP | 859 | * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP |
826 | * Parameters in both cases always: NCCI = 1, NCPI empty | 860 | * Parameters in both cases always: NCCI = 1, NCPI empty |
827 | */ | 861 | */ |
828 | if (ap->connected >= APCONN_ACTIVE) { | 862 | if (bcs->apconnstate >= APCONN_ACTIVE) { |
829 | command = CAPI_CONNECT_B3_ACTIVE; | 863 | command = CAPI_CONNECT_B3_ACTIVE; |
830 | msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN; | 864 | msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN; |
831 | } else { | 865 | } else { |
832 | command = CAPI_CONNECT_B3; | 866 | command = CAPI_CONNECT_B3; |
833 | msgsize = CAPI_CONNECT_B3_IND_BASELEN; | 867 | msgsize = CAPI_CONNECT_B3_IND_BASELEN; |
834 | } | 868 | } |
869 | bcs->apconnstate = APCONN_ACTIVE; | ||
870 | |||
871 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
872 | |||
873 | while (ap->bcnext) { | ||
874 | /* this should never happen */ | ||
875 | dev_warn(cs->dev, "%s: dropping extra application %u\n", | ||
876 | __func__, ap->bcnext->id); | ||
877 | send_disconnect_ind(bcs, ap->bcnext, | ||
878 | CapiCallGivenToOtherApplication); | ||
879 | ap->bcnext = ap->bcnext->bcnext; | ||
880 | } | ||
881 | |||
835 | capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND, | 882 | capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND, |
836 | ap->nextMessageNumber++, | 883 | ap->nextMessageNumber++, |
837 | iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16)); | 884 | iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16)); |
@@ -842,7 +889,6 @@ void gigaset_isdn_connB(struct bc_state *bcs) | |||
842 | } | 889 | } |
843 | capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); | 890 | capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); |
844 | dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); | 891 | dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); |
845 | ap->connected = APCONN_ACTIVE; | ||
846 | capi_ctr_handle_message(&iif->ctr, ap->id, skb); | 892 | capi_ctr_handle_message(&iif->ctr, ap->id, skb); |
847 | } | 893 | } |
848 | 894 | ||
@@ -945,8 +991,64 @@ static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl, | |||
945 | return; | 991 | return; |
946 | } | 992 | } |
947 | ap->id = appl; | 993 | ap->id = appl; |
994 | ap->rp = *rp; | ||
948 | 995 | ||
949 | list_add(&ap->ctrlist, &iif->appls); | 996 | list_add(&ap->ctrlist, &iif->appls); |
997 | dev_info(cs->dev, "application %u registered\n", ap->id); | ||
998 | } | ||
999 | |||
1000 | /* | ||
1001 | * remove CAPI application from channel | ||
1002 | * helper function to keep indentation levels down and stay in 80 columns | ||
1003 | */ | ||
1004 | |||
1005 | static inline void remove_appl_from_channel(struct bc_state *bcs, | ||
1006 | struct gigaset_capi_appl *ap) | ||
1007 | { | ||
1008 | struct cardstate *cs = bcs->cs; | ||
1009 | struct gigaset_capi_appl *bcap; | ||
1010 | unsigned long flags; | ||
1011 | int prevconnstate; | ||
1012 | |||
1013 | spin_lock_irqsave(&bcs->aplock, flags); | ||
1014 | bcap = bcs->ap; | ||
1015 | if (bcap == NULL) { | ||
1016 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1017 | return; | ||
1018 | } | ||
1019 | |||
1020 | /* check first application on channel */ | ||
1021 | if (bcap == ap) { | ||
1022 | bcs->ap = ap->bcnext; | ||
1023 | if (bcs->ap != NULL) { | ||
1024 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1025 | return; | ||
1026 | } | ||
1027 | |||
1028 | /* none left, clear channel state */ | ||
1029 | prevconnstate = bcs->apconnstate; | ||
1030 | bcs->apconnstate = APCONN_NONE; | ||
1031 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1032 | |||
1033 | if (prevconnstate == APCONN_ACTIVE) { | ||
1034 | dev_notice(cs->dev, "%s: hanging up channel %u\n", | ||
1035 | __func__, bcs->channel); | ||
1036 | gigaset_add_event(cs, &bcs->at_state, | ||
1037 | EV_HUP, NULL, 0, NULL); | ||
1038 | gigaset_schedule_event(cs); | ||
1039 | } | ||
1040 | return; | ||
1041 | } | ||
1042 | |||
1043 | /* check remaining list */ | ||
1044 | do { | ||
1045 | if (bcap->bcnext == ap) { | ||
1046 | bcap->bcnext = bcap->bcnext->bcnext; | ||
1047 | return; | ||
1048 | } | ||
1049 | bcap = bcap->bcnext; | ||
1050 | } while (bcap != NULL); | ||
1051 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
950 | } | 1052 | } |
951 | 1053 | ||
952 | /* | 1054 | /* |
@@ -958,19 +1060,19 @@ static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl) | |||
958 | = container_of(ctr, struct gigaset_capi_ctr, ctr); | 1060 | = container_of(ctr, struct gigaset_capi_ctr, ctr); |
959 | struct cardstate *cs = iif->ctr.driverdata; | 1061 | struct cardstate *cs = iif->ctr.driverdata; |
960 | struct gigaset_capi_appl *ap, *tmp; | 1062 | struct gigaset_capi_appl *ap, *tmp; |
1063 | unsigned ch; | ||
961 | 1064 | ||
962 | list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist) | 1065 | list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist) |
963 | if (ap->id == appl) { | 1066 | if (ap->id == appl) { |
964 | if (ap->connected != APCONN_NONE) { | 1067 | /* remove from any channels */ |
965 | dev_err(cs->dev, | 1068 | for (ch = 0; ch < cs->channels; ch++) |
966 | "%s: application %u still connected\n", | 1069 | remove_appl_from_channel(&cs->bcs[ch], ap); |
967 | __func__, ap->id); | 1070 | |
968 | /* ToDo: clear active connection */ | 1071 | /* remove from registration list */ |
969 | } | ||
970 | list_del(&ap->ctrlist); | 1072 | list_del(&ap->ctrlist); |
971 | kfree(ap); | 1073 | kfree(ap); |
1074 | dev_info(cs->dev, "application %u released\n", appl); | ||
972 | } | 1075 | } |
973 | |||
974 | } | 1076 | } |
975 | 1077 | ||
976 | /* | 1078 | /* |
@@ -1149,7 +1251,8 @@ static void do_connect_req(struct gigaset_capi_ctr *iif, | |||
1149 | char **commands; | 1251 | char **commands; |
1150 | char *s; | 1252 | char *s; |
1151 | u8 *pp; | 1253 | u8 *pp; |
1152 | int i, l; | 1254 | unsigned long flags; |
1255 | int i, l, lbc, lhlc; | ||
1153 | u16 info; | 1256 | u16 info; |
1154 | 1257 | ||
1155 | /* decode message */ | 1258 | /* decode message */ |
@@ -1164,8 +1267,18 @@ static void do_connect_req(struct gigaset_capi_ctr *iif, | |||
1164 | send_conf(iif, ap, skb, CapiNoPlciAvailable); | 1267 | send_conf(iif, ap, skb, CapiNoPlciAvailable); |
1165 | return; | 1268 | return; |
1166 | } | 1269 | } |
1270 | spin_lock_irqsave(&bcs->aplock, flags); | ||
1271 | if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) | ||
1272 | dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n", | ||
1273 | __func__, bcs->ap, bcs->apconnstate); | ||
1167 | ap->bcnext = NULL; | 1274 | ap->bcnext = NULL; |
1168 | bcs->ap = ap; | 1275 | bcs->ap = ap; |
1276 | bcs->apconnstate = APCONN_SETUP; | ||
1277 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1278 | |||
1279 | bcs->rx_bufsize = ap->rp.datablklen; | ||
1280 | dev_kfree_skb(bcs->rx_skb); | ||
1281 | gigaset_new_rx_skb(bcs); | ||
1169 | cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8; | 1282 | cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8; |
1170 | 1283 | ||
1171 | /* build command table */ | 1284 | /* build command table */ |
@@ -1273,42 +1386,59 @@ static void do_connect_req(struct gigaset_capi_ctr *iif, | |||
1273 | goto error; | 1386 | goto error; |
1274 | } | 1387 | } |
1275 | 1388 | ||
1276 | /* check/encode parameter: BC */ | 1389 | /* |
1277 | if (cmsg->BC && cmsg->BC[0]) { | 1390 | * check/encode parameters: BC & HLC |
1278 | /* explicit BC overrides CIP */ | 1391 | * must be encoded together as device doesn't accept HLC separately |
1279 | l = 2*cmsg->BC[0] + 7; | 1392 | * explicit parameters override values derived from CIP |
1393 | */ | ||
1394 | |||
1395 | /* determine lengths */ | ||
1396 | if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */ | ||
1397 | lbc = 2*cmsg->BC[0]; | ||
1398 | else if (cip2bchlc[cmsg->CIPValue].bc) /* BC derived from CIP */ | ||
1399 | lbc = strlen(cip2bchlc[cmsg->CIPValue].bc); | ||
1400 | else /* no BC */ | ||
1401 | lbc = 0; | ||
1402 | if (cmsg->HLC && cmsg->HLC[0]) /* HLC specified explicitly */ | ||
1403 | lhlc = 2*cmsg->HLC[0]; | ||
1404 | else if (cip2bchlc[cmsg->CIPValue].hlc) /* HLC derived from CIP */ | ||
1405 | lhlc = strlen(cip2bchlc[cmsg->CIPValue].hlc); | ||
1406 | else /* no HLC */ | ||
1407 | lhlc = 0; | ||
1408 | |||
1409 | if (lbc) { | ||
1410 | /* have BC: allocate and assemble command string */ | ||
1411 | l = lbc + 7; /* "^SBC=" + value + "\r" + null byte */ | ||
1412 | if (lhlc) | ||
1413 | l += lhlc + 7; /* ";^SHLC=" + value */ | ||
1280 | commands[AT_BC] = kmalloc(l, GFP_KERNEL); | 1414 | commands[AT_BC] = kmalloc(l, GFP_KERNEL); |
1281 | if (!commands[AT_BC]) | 1415 | if (!commands[AT_BC]) |
1282 | goto oom; | 1416 | goto oom; |
1283 | strcpy(commands[AT_BC], "^SBC="); | 1417 | strcpy(commands[AT_BC], "^SBC="); |
1284 | decode_ie(cmsg->BC, commands[AT_BC]+5); | 1418 | if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */ |
1419 | decode_ie(cmsg->BC, commands[AT_BC] + 5); | ||
1420 | else /* BC derived from CIP */ | ||
1421 | strcpy(commands[AT_BC] + 5, | ||
1422 | cip2bchlc[cmsg->CIPValue].bc); | ||
1423 | if (lhlc) { | ||
1424 | strcpy(commands[AT_BC] + lbc + 5, ";^SHLC="); | ||
1425 | if (cmsg->HLC && cmsg->HLC[0]) | ||
1426 | /* HLC specified explicitly */ | ||
1427 | decode_ie(cmsg->HLC, | ||
1428 | commands[AT_BC] + lbc + 12); | ||
1429 | else /* HLC derived from CIP */ | ||
1430 | strcpy(commands[AT_BC] + lbc + 12, | ||
1431 | cip2bchlc[cmsg->CIPValue].hlc); | ||
1432 | } | ||
1285 | strcpy(commands[AT_BC] + l - 2, "\r"); | 1433 | strcpy(commands[AT_BC] + l - 2, "\r"); |
1286 | } else if (cip2bchlc[cmsg->CIPValue].bc) { | 1434 | } else { |
1287 | l = strlen(cip2bchlc[cmsg->CIPValue].bc) + 7; | 1435 | /* no BC */ |
1288 | commands[AT_BC] = kmalloc(l, GFP_KERNEL); | 1436 | if (lhlc) { |
1289 | if (!commands[AT_BC]) | 1437 | dev_notice(cs->dev, "%s: cannot set HLC without BC\n", |
1290 | goto oom; | 1438 | "CONNECT_REQ"); |
1291 | snprintf(commands[AT_BC], l, "^SBC=%s\r", | 1439 | info = CapiIllMessageParmCoding; /* ? */ |
1292 | cip2bchlc[cmsg->CIPValue].bc); | 1440 | goto error; |
1293 | } | 1441 | } |
1294 | |||
1295 | /* check/encode parameter: HLC */ | ||
1296 | if (cmsg->HLC && cmsg->HLC[0]) { | ||
1297 | /* explicit HLC overrides CIP */ | ||
1298 | l = 2*cmsg->HLC[0] + 7; | ||
1299 | commands[AT_HLC] = kmalloc(l, GFP_KERNEL); | ||
1300 | if (!commands[AT_HLC]) | ||
1301 | goto oom; | ||
1302 | strcpy(commands[AT_HLC], "^SHLC="); | ||
1303 | decode_ie(cmsg->HLC, commands[AT_HLC]+5); | ||
1304 | strcpy(commands[AT_HLC] + l - 2, "\r"); | ||
1305 | } else if (cip2bchlc[cmsg->CIPValue].hlc) { | ||
1306 | l = strlen(cip2bchlc[cmsg->CIPValue].hlc) + 7; | ||
1307 | commands[AT_HLC] = kmalloc(l, GFP_KERNEL); | ||
1308 | if (!commands[AT_HLC]) | ||
1309 | goto oom; | ||
1310 | snprintf(commands[AT_HLC], l, "^SHLC=%s\r", | ||
1311 | cip2bchlc[cmsg->CIPValue].hlc); | ||
1312 | } | 1442 | } |
1313 | 1443 | ||
1314 | /* check/encode parameter: B Protocol */ | 1444 | /* check/encode parameter: B Protocol */ |
@@ -1322,13 +1452,13 @@ static void do_connect_req(struct gigaset_capi_ctr *iif, | |||
1322 | bcs->proto2 = L2_HDLC; | 1452 | bcs->proto2 = L2_HDLC; |
1323 | break; | 1453 | break; |
1324 | case 1: | 1454 | case 1: |
1325 | bcs->proto2 = L2_BITSYNC; | 1455 | bcs->proto2 = L2_VOICE; |
1326 | break; | 1456 | break; |
1327 | default: | 1457 | default: |
1328 | dev_warn(cs->dev, | 1458 | dev_warn(cs->dev, |
1329 | "B1 Protocol %u unsupported, using Transparent\n", | 1459 | "B1 Protocol %u unsupported, using Transparent\n", |
1330 | cmsg->B1protocol); | 1460 | cmsg->B1protocol); |
1331 | bcs->proto2 = L2_BITSYNC; | 1461 | bcs->proto2 = L2_VOICE; |
1332 | } | 1462 | } |
1333 | if (cmsg->B2protocol != 1) | 1463 | if (cmsg->B2protocol != 1) |
1334 | dev_warn(cs->dev, | 1464 | dev_warn(cs->dev, |
@@ -1382,7 +1512,6 @@ static void do_connect_req(struct gigaset_capi_ctr *iif, | |||
1382 | goto error; | 1512 | goto error; |
1383 | } | 1513 | } |
1384 | gigaset_schedule_event(cs); | 1514 | gigaset_schedule_event(cs); |
1385 | ap->connected = APCONN_SETUP; | ||
1386 | send_conf(iif, ap, skb, CapiSuccess); | 1515 | send_conf(iif, ap, skb, CapiSuccess); |
1387 | return; | 1516 | return; |
1388 | 1517 | ||
@@ -1410,6 +1539,7 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif, | |||
1410 | _cmsg *cmsg = &iif->acmsg; | 1539 | _cmsg *cmsg = &iif->acmsg; |
1411 | struct bc_state *bcs; | 1540 | struct bc_state *bcs; |
1412 | struct gigaset_capi_appl *oap; | 1541 | struct gigaset_capi_appl *oap; |
1542 | unsigned long flags; | ||
1413 | int channel; | 1543 | int channel; |
1414 | 1544 | ||
1415 | /* decode message */ | 1545 | /* decode message */ |
@@ -1429,12 +1559,24 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif, | |||
1429 | switch (cmsg->Reject) { | 1559 | switch (cmsg->Reject) { |
1430 | case 0: /* Accept */ | 1560 | case 0: /* Accept */ |
1431 | /* drop all competing applications, keep only this one */ | 1561 | /* drop all competing applications, keep only this one */ |
1432 | for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) | 1562 | spin_lock_irqsave(&bcs->aplock, flags); |
1433 | if (oap != ap) | 1563 | while (bcs->ap != NULL) { |
1564 | oap = bcs->ap; | ||
1565 | bcs->ap = oap->bcnext; | ||
1566 | if (oap != ap) { | ||
1567 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1434 | send_disconnect_ind(bcs, oap, | 1568 | send_disconnect_ind(bcs, oap, |
1435 | CapiCallGivenToOtherApplication); | 1569 | CapiCallGivenToOtherApplication); |
1570 | spin_lock_irqsave(&bcs->aplock, flags); | ||
1571 | } | ||
1572 | } | ||
1436 | ap->bcnext = NULL; | 1573 | ap->bcnext = NULL; |
1437 | bcs->ap = ap; | 1574 | bcs->ap = ap; |
1575 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1576 | |||
1577 | bcs->rx_bufsize = ap->rp.datablklen; | ||
1578 | dev_kfree_skb(bcs->rx_skb); | ||
1579 | gigaset_new_rx_skb(bcs); | ||
1438 | bcs->chstate |= CHS_NOTIFY_LL; | 1580 | bcs->chstate |= CHS_NOTIFY_LL; |
1439 | 1581 | ||
1440 | /* check/encode B channel protocol */ | 1582 | /* check/encode B channel protocol */ |
@@ -1448,13 +1590,13 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif, | |||
1448 | bcs->proto2 = L2_HDLC; | 1590 | bcs->proto2 = L2_HDLC; |
1449 | break; | 1591 | break; |
1450 | case 1: | 1592 | case 1: |
1451 | bcs->proto2 = L2_BITSYNC; | 1593 | bcs->proto2 = L2_VOICE; |
1452 | break; | 1594 | break; |
1453 | default: | 1595 | default: |
1454 | dev_warn(cs->dev, | 1596 | dev_warn(cs->dev, |
1455 | "B1 Protocol %u unsupported, using Transparent\n", | 1597 | "B1 Protocol %u unsupported, using Transparent\n", |
1456 | cmsg->B1protocol); | 1598 | cmsg->B1protocol); |
1457 | bcs->proto2 = L2_BITSYNC; | 1599 | bcs->proto2 = L2_VOICE; |
1458 | } | 1600 | } |
1459 | if (cmsg->B2protocol != 1) | 1601 | if (cmsg->B2protocol != 1) |
1460 | dev_warn(cs->dev, | 1602 | dev_warn(cs->dev, |
@@ -1502,31 +1644,45 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif, | |||
1502 | send_disconnect_ind(bcs, ap, 0); | 1644 | send_disconnect_ind(bcs, ap, 0); |
1503 | 1645 | ||
1504 | /* remove it from the list of listening apps */ | 1646 | /* remove it from the list of listening apps */ |
1647 | spin_lock_irqsave(&bcs->aplock, flags); | ||
1505 | if (bcs->ap == ap) { | 1648 | if (bcs->ap == ap) { |
1506 | bcs->ap = ap->bcnext; | 1649 | bcs->ap = ap->bcnext; |
1507 | if (bcs->ap == NULL) | 1650 | if (bcs->ap == NULL) { |
1508 | /* last one: stop ev-layer hupD notifications */ | 1651 | /* last one: stop ev-layer hupD notifications */ |
1652 | bcs->apconnstate = APCONN_NONE; | ||
1509 | bcs->chstate &= ~CHS_NOTIFY_LL; | 1653 | bcs->chstate &= ~CHS_NOTIFY_LL; |
1654 | } | ||
1655 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1510 | return; | 1656 | return; |
1511 | } | 1657 | } |
1512 | for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) { | 1658 | for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) { |
1513 | if (oap->bcnext == ap) { | 1659 | if (oap->bcnext == ap) { |
1514 | oap->bcnext = oap->bcnext->bcnext; | 1660 | oap->bcnext = oap->bcnext->bcnext; |
1661 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1515 | return; | 1662 | return; |
1516 | } | 1663 | } |
1517 | } | 1664 | } |
1665 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1518 | dev_err(cs->dev, "%s: application %u not found\n", | 1666 | dev_err(cs->dev, "%s: application %u not found\n", |
1519 | __func__, ap->id); | 1667 | __func__, ap->id); |
1520 | return; | 1668 | return; |
1521 | 1669 | ||
1522 | default: /* Reject */ | 1670 | default: /* Reject */ |
1523 | /* drop all competing applications, keep only this one */ | 1671 | /* drop all competing applications, keep only this one */ |
1524 | for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) | 1672 | spin_lock_irqsave(&bcs->aplock, flags); |
1525 | if (oap != ap) | 1673 | while (bcs->ap != NULL) { |
1674 | oap = bcs->ap; | ||
1675 | bcs->ap = oap->bcnext; | ||
1676 | if (oap != ap) { | ||
1677 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1526 | send_disconnect_ind(bcs, oap, | 1678 | send_disconnect_ind(bcs, oap, |
1527 | CapiCallGivenToOtherApplication); | 1679 | CapiCallGivenToOtherApplication); |
1680 | spin_lock_irqsave(&bcs->aplock, flags); | ||
1681 | } | ||
1682 | } | ||
1528 | ap->bcnext = NULL; | 1683 | ap->bcnext = NULL; |
1529 | bcs->ap = ap; | 1684 | bcs->ap = ap; |
1685 | spin_unlock_irqrestore(&bcs->aplock, flags); | ||
1530 | 1686 | ||
1531 | /* reject call - will trigger DISCONNECT_IND for this app */ | 1687 | /* reject call - will trigger DISCONNECT_IND for this app */ |
1532 | dev_info(cs->dev, "%s: Reject=%x\n", | 1688 | dev_info(cs->dev, "%s: Reject=%x\n", |
@@ -1549,6 +1705,7 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif, | |||
1549 | { | 1705 | { |
1550 | struct cardstate *cs = iif->ctr.driverdata; | 1706 | struct cardstate *cs = iif->ctr.driverdata; |
1551 | _cmsg *cmsg = &iif->acmsg; | 1707 | _cmsg *cmsg = &iif->acmsg; |
1708 | struct bc_state *bcs; | ||
1552 | int channel; | 1709 | int channel; |
1553 | 1710 | ||
1554 | /* decode message */ | 1711 | /* decode message */ |
@@ -1563,9 +1720,10 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif, | |||
1563 | send_conf(iif, ap, skb, CapiIllContrPlciNcci); | 1720 | send_conf(iif, ap, skb, CapiIllContrPlciNcci); |
1564 | return; | 1721 | return; |
1565 | } | 1722 | } |
1723 | bcs = &cs->bcs[channel-1]; | ||
1566 | 1724 | ||
1567 | /* mark logical connection active */ | 1725 | /* mark logical connection active */ |
1568 | ap->connected = APCONN_ACTIVE; | 1726 | bcs->apconnstate = APCONN_ACTIVE; |
1569 | 1727 | ||
1570 | /* build NCCI: always 1 (one B3 connection only) */ | 1728 | /* build NCCI: always 1 (one B3 connection only) */ |
1571 | cmsg->adr.adrNCCI |= 1 << 16; | 1729 | cmsg->adr.adrNCCI |= 1 << 16; |
@@ -1611,7 +1769,7 @@ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif, | |||
1611 | 1769 | ||
1612 | if (cmsg->Reject) { | 1770 | if (cmsg->Reject) { |
1613 | /* Reject: clear B3 connect received flag */ | 1771 | /* Reject: clear B3 connect received flag */ |
1614 | ap->connected = APCONN_SETUP; | 1772 | bcs->apconnstate = APCONN_SETUP; |
1615 | 1773 | ||
1616 | /* trigger hangup, causing eventual DISCONNECT_IND */ | 1774 | /* trigger hangup, causing eventual DISCONNECT_IND */ |
1617 | if (!gigaset_add_event(cs, &bcs->at_state, | 1775 | if (!gigaset_add_event(cs, &bcs->at_state, |
@@ -1683,11 +1841,11 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif, | |||
1683 | } | 1841 | } |
1684 | 1842 | ||
1685 | /* skip if DISCONNECT_IND already sent */ | 1843 | /* skip if DISCONNECT_IND already sent */ |
1686 | if (!ap->connected) | 1844 | if (!bcs->apconnstate) |
1687 | return; | 1845 | return; |
1688 | 1846 | ||
1689 | /* check for active logical connection */ | 1847 | /* check for active logical connection */ |
1690 | if (ap->connected >= APCONN_ACTIVE) { | 1848 | if (bcs->apconnstate >= APCONN_ACTIVE) { |
1691 | /* | 1849 | /* |
1692 | * emit DISCONNECT_B3_IND with cause 0x3301 | 1850 | * emit DISCONNECT_B3_IND with cause 0x3301 |
1693 | * use separate cmsg structure, as the content of iif->acmsg | 1851 | * use separate cmsg structure, as the content of iif->acmsg |
@@ -1736,6 +1894,7 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif, | |||
1736 | { | 1894 | { |
1737 | struct cardstate *cs = iif->ctr.driverdata; | 1895 | struct cardstate *cs = iif->ctr.driverdata; |
1738 | _cmsg *cmsg = &iif->acmsg; | 1896 | _cmsg *cmsg = &iif->acmsg; |
1897 | struct bc_state *bcs; | ||
1739 | int channel; | 1898 | int channel; |
1740 | 1899 | ||
1741 | /* decode message */ | 1900 | /* decode message */ |
@@ -1751,17 +1910,17 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif, | |||
1751 | send_conf(iif, ap, skb, CapiIllContrPlciNcci); | 1910 | send_conf(iif, ap, skb, CapiIllContrPlciNcci); |
1752 | return; | 1911 | return; |
1753 | } | 1912 | } |
1913 | bcs = &cs->bcs[channel-1]; | ||
1754 | 1914 | ||
1755 | /* reject if logical connection not active */ | 1915 | /* reject if logical connection not active */ |
1756 | if (ap->connected < APCONN_ACTIVE) { | 1916 | if (bcs->apconnstate < APCONN_ACTIVE) { |
1757 | send_conf(iif, ap, skb, | 1917 | send_conf(iif, ap, skb, |
1758 | CapiMessageNotSupportedInCurrentState); | 1918 | CapiMessageNotSupportedInCurrentState); |
1759 | return; | 1919 | return; |
1760 | } | 1920 | } |
1761 | 1921 | ||
1762 | /* trigger hangup, causing eventual DISCONNECT_B3_IND */ | 1922 | /* trigger hangup, causing eventual DISCONNECT_B3_IND */ |
1763 | if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state, | 1923 | if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) { |
1764 | EV_HUP, NULL, 0, NULL)) { | ||
1765 | send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); | 1924 | send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); |
1766 | return; | 1925 | return; |
1767 | } | 1926 | } |
@@ -1782,11 +1941,14 @@ static void do_data_b3_req(struct gigaset_capi_ctr *iif, | |||
1782 | struct sk_buff *skb) | 1941 | struct sk_buff *skb) |
1783 | { | 1942 | { |
1784 | struct cardstate *cs = iif->ctr.driverdata; | 1943 | struct cardstate *cs = iif->ctr.driverdata; |
1944 | struct bc_state *bcs; | ||
1785 | int channel = CAPIMSG_PLCI_PART(skb->data); | 1945 | int channel = CAPIMSG_PLCI_PART(skb->data); |
1786 | u16 ncci = CAPIMSG_NCCI_PART(skb->data); | 1946 | u16 ncci = CAPIMSG_NCCI_PART(skb->data); |
1787 | u16 msglen = CAPIMSG_LEN(skb->data); | 1947 | u16 msglen = CAPIMSG_LEN(skb->data); |
1788 | u16 datalen = CAPIMSG_DATALEN(skb->data); | 1948 | u16 datalen = CAPIMSG_DATALEN(skb->data); |
1789 | u16 flags = CAPIMSG_FLAGS(skb->data); | 1949 | u16 flags = CAPIMSG_FLAGS(skb->data); |
1950 | u16 msgid = CAPIMSG_MSGID(skb->data); | ||
1951 | u16 handle = CAPIMSG_HANDLE_REQ(skb->data); | ||
1790 | 1952 | ||
1791 | /* frequent message, avoid _cmsg overhead */ | 1953 | /* frequent message, avoid _cmsg overhead */ |
1792 | dump_rawmsg(DEBUG_LLDATA, "DATA_B3_REQ", skb->data); | 1954 | dump_rawmsg(DEBUG_LLDATA, "DATA_B3_REQ", skb->data); |
@@ -1802,6 +1964,7 @@ static void do_data_b3_req(struct gigaset_capi_ctr *iif, | |||
1802 | send_conf(iif, ap, skb, CapiIllContrPlciNcci); | 1964 | send_conf(iif, ap, skb, CapiIllContrPlciNcci); |
1803 | return; | 1965 | return; |
1804 | } | 1966 | } |
1967 | bcs = &cs->bcs[channel-1]; | ||
1805 | if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64) | 1968 | if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64) |
1806 | dev_notice(cs->dev, "%s: unexpected length %d\n", | 1969 | dev_notice(cs->dev, "%s: unexpected length %d\n", |
1807 | "DATA_B3_REQ", msglen); | 1970 | "DATA_B3_REQ", msglen); |
@@ -1821,7 +1984,7 @@ static void do_data_b3_req(struct gigaset_capi_ctr *iif, | |||
1821 | } | 1984 | } |
1822 | 1985 | ||
1823 | /* reject if logical connection not active */ | 1986 | /* reject if logical connection not active */ |
1824 | if (ap->connected < APCONN_ACTIVE) { | 1987 | if (bcs->apconnstate < APCONN_ACTIVE) { |
1825 | send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); | 1988 | send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); |
1826 | return; | 1989 | return; |
1827 | } | 1990 | } |
@@ -1832,17 +1995,19 @@ static void do_data_b3_req(struct gigaset_capi_ctr *iif, | |||
1832 | skb_pull(skb, msglen); | 1995 | skb_pull(skb, msglen); |
1833 | 1996 | ||
1834 | /* pass to device-specific module */ | 1997 | /* pass to device-specific module */ |
1835 | if (cs->ops->send_skb(&cs->bcs[channel-1], skb) < 0) { | 1998 | if (cs->ops->send_skb(bcs, skb) < 0) { |
1836 | send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); | 1999 | send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); |
1837 | return; | 2000 | return; |
1838 | } | 2001 | } |
1839 | 2002 | ||
1840 | /* DATA_B3_CONF reply will be sent by gigaset_skb_sent() */ | ||
1841 | |||
1842 | /* | 2003 | /* |
1843 | * ToDo: honor unset "delivery confirmation" bit | 2004 | * DATA_B3_CONF will be sent by gigaset_skb_sent() only if "delivery |
1844 | * (send DATA_B3_CONF immediately?) | 2005 | * confirmation" bit is set; otherwise we have to send it now |
1845 | */ | 2006 | */ |
2007 | if (!(flags & CAPI_FLAGS_DELIVERY_CONFIRMATION)) | ||
2008 | send_data_b3_conf(cs, &iif->ctr, ap->id, msgid, channel, handle, | ||
2009 | flags ? CapiFlagsNotSupportedByProtocol | ||
2010 | : CAPI_NOERROR); | ||
1846 | } | 2011 | } |
1847 | 2012 | ||
1848 | /* | 2013 | /* |
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index f6f45f221920..5d4befb81057 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c | |||
@@ -399,8 +399,8 @@ static void gigaset_freebcs(struct bc_state *bcs) | |||
399 | gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); | 399 | gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); |
400 | clear_at_state(&bcs->at_state); | 400 | clear_at_state(&bcs->at_state); |
401 | gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel); | 401 | gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel); |
402 | dev_kfree_skb(bcs->skb); | 402 | dev_kfree_skb(bcs->rx_skb); |
403 | bcs->skb = NULL; | 403 | bcs->rx_skb = NULL; |
404 | 404 | ||
405 | for (i = 0; i < AT_NUM; ++i) { | 405 | for (i = 0; i < AT_NUM; ++i) { |
406 | kfree(bcs->commands[i]); | 406 | kfree(bcs->commands[i]); |
@@ -634,19 +634,10 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs, | |||
634 | bcs->emptycount = 0; | 634 | bcs->emptycount = 0; |
635 | #endif | 635 | #endif |
636 | 636 | ||
637 | gig_dbg(DEBUG_INIT, "allocating bcs[%d]->skb", channel); | 637 | bcs->rx_bufsize = 0; |
638 | bcs->fcs = PPP_INITFCS; | 638 | bcs->rx_skb = NULL; |
639 | bcs->rx_fcs = PPP_INITFCS; | ||
639 | bcs->inputstate = 0; | 640 | bcs->inputstate = 0; |
640 | if (cs->ignoreframes) { | ||
641 | bcs->skb = NULL; | ||
642 | } else { | ||
643 | bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len); | ||
644 | if (bcs->skb != NULL) | ||
645 | skb_reserve(bcs->skb, cs->hw_hdr_len); | ||
646 | else | ||
647 | pr_err("out of memory\n"); | ||
648 | } | ||
649 | |||
650 | bcs->channel = channel; | 641 | bcs->channel = channel; |
651 | bcs->cs = cs; | 642 | bcs->cs = cs; |
652 | 643 | ||
@@ -658,16 +649,15 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs, | |||
658 | for (i = 0; i < AT_NUM; ++i) | 649 | for (i = 0; i < AT_NUM; ++i) |
659 | bcs->commands[i] = NULL; | 650 | bcs->commands[i] = NULL; |
660 | 651 | ||
652 | spin_lock_init(&bcs->aplock); | ||
653 | bcs->ap = NULL; | ||
654 | bcs->apconnstate = 0; | ||
655 | |||
661 | gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel); | 656 | gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel); |
662 | if (cs->ops->initbcshw(bcs)) | 657 | if (cs->ops->initbcshw(bcs)) |
663 | return bcs; | 658 | return bcs; |
664 | 659 | ||
665 | gig_dbg(DEBUG_INIT, " failed"); | 660 | gig_dbg(DEBUG_INIT, " failed"); |
666 | |||
667 | gig_dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel); | ||
668 | dev_kfree_skb(bcs->skb); | ||
669 | bcs->skb = NULL; | ||
670 | |||
671 | return NULL; | 661 | return NULL; |
672 | } | 662 | } |
673 | 663 | ||
@@ -839,14 +829,12 @@ void gigaset_bcs_reinit(struct bc_state *bcs) | |||
839 | bcs->emptycount = 0; | 829 | bcs->emptycount = 0; |
840 | #endif | 830 | #endif |
841 | 831 | ||
842 | bcs->fcs = PPP_INITFCS; | 832 | bcs->rx_fcs = PPP_INITFCS; |
843 | bcs->chstate = 0; | 833 | bcs->chstate = 0; |
844 | 834 | ||
845 | bcs->ignore = cs->ignoreframes; | 835 | bcs->ignore = cs->ignoreframes; |
846 | if (bcs->ignore) { | 836 | dev_kfree_skb(bcs->rx_skb); |
847 | dev_kfree_skb(bcs->skb); | 837 | bcs->rx_skb = NULL; |
848 | bcs->skb = NULL; | ||
849 | } | ||
850 | 838 | ||
851 | cs->ops->reinitbcshw(bcs); | 839 | cs->ops->reinitbcshw(bcs); |
852 | } | 840 | } |
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index 206c380c5235..ceaef9a04a42 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c | |||
@@ -282,9 +282,7 @@ struct reply_t gigaset_tab_cid[] = | |||
282 | /* dial */ | 282 | /* dial */ |
283 | {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} }, | 283 | {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} }, |
284 | {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC} }, | 284 | {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC} }, |
285 | {RSP_OK, 601, 601, -1, 602, 5, {ACT_CMD+AT_HLC} }, | 285 | {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD+AT_PROTO} }, |
286 | {RSP_NULL, 602, 602, -1, 603, 5, {ACT_CMD+AT_PROTO} }, | ||
287 | {RSP_OK, 602, 602, -1, 603, 5, {ACT_CMD+AT_PROTO} }, | ||
288 | {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD+AT_TYPE} }, | 286 | {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD+AT_TYPE} }, |
289 | {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD+AT_MSN} }, | 287 | {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD+AT_MSN} }, |
290 | {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} }, | 288 | {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} }, |
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h index 05947f9c1849..8738b0821fc9 100644 --- a/drivers/isdn/gigaset/gigaset.h +++ b/drivers/isdn/gigaset/gigaset.h | |||
@@ -45,10 +45,6 @@ | |||
45 | #define MAX_EVENTS 64 /* size of event queue */ | 45 | #define MAX_EVENTS 64 /* size of event queue */ |
46 | 46 | ||
47 | #define RBUFSIZE 8192 | 47 | #define RBUFSIZE 8192 |
48 | #define SBUFSIZE 4096 /* sk_buff payload size */ | ||
49 | |||
50 | #define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */ | ||
51 | #define MAX_BUF_SIZE (SBUFSIZE - 2) /* Max. size of a data packet from LL */ | ||
52 | 48 | ||
53 | /* compile time options */ | 49 | /* compile time options */ |
54 | #define GIG_MAJOR 0 | 50 | #define GIG_MAJOR 0 |
@@ -190,10 +186,9 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, | |||
190 | #define AT_BC 3 | 186 | #define AT_BC 3 |
191 | #define AT_PROTO 4 | 187 | #define AT_PROTO 4 |
192 | #define AT_TYPE 5 | 188 | #define AT_TYPE 5 |
193 | #define AT_HLC 6 | 189 | #define AT_CLIP 6 |
194 | #define AT_CLIP 7 | ||
195 | /* total number */ | 190 | /* total number */ |
196 | #define AT_NUM 8 | 191 | #define AT_NUM 7 |
197 | 192 | ||
198 | /* variables in struct at_state_t */ | 193 | /* variables in struct at_state_t */ |
199 | #define VAR_ZSAU 0 | 194 | #define VAR_ZSAU 0 |
@@ -380,8 +375,10 @@ struct bc_state { | |||
380 | 375 | ||
381 | struct at_state_t at_state; | 376 | struct at_state_t at_state; |
382 | 377 | ||
383 | __u16 fcs; | 378 | /* receive buffer */ |
384 | struct sk_buff *skb; | 379 | unsigned rx_bufsize; /* max size accepted by application */ |
380 | struct sk_buff *rx_skb; | ||
381 | __u16 rx_fcs; | ||
385 | int inputstate; /* see INS_XXXX */ | 382 | int inputstate; /* see INS_XXXX */ |
386 | 383 | ||
387 | int channel; | 384 | int channel; |
@@ -406,7 +403,9 @@ struct bc_state { | |||
406 | struct bas_bc_state *bas; /* usb hardware driver (base) */ | 403 | struct bas_bc_state *bas; /* usb hardware driver (base) */ |
407 | } hw; | 404 | } hw; |
408 | 405 | ||
409 | void *ap; /* LL application structure */ | 406 | void *ap; /* associated LL application */ |
407 | int apconnstate; /* LL application connection state */ | ||
408 | spinlock_t aplock; | ||
410 | }; | 409 | }; |
411 | 410 | ||
412 | struct cardstate { | 411 | struct cardstate { |
@@ -801,8 +800,23 @@ static inline void gigaset_bchannel_up(struct bc_state *bcs) | |||
801 | gigaset_schedule_event(bcs->cs); | 800 | gigaset_schedule_event(bcs->cs); |
802 | } | 801 | } |
803 | 802 | ||
804 | /* handling routines for sk_buff */ | 803 | /* set up next receive skb for data mode */ |
805 | /* ============================= */ | 804 | static inline struct sk_buff *gigaset_new_rx_skb(struct bc_state *bcs) |
805 | { | ||
806 | struct cardstate *cs = bcs->cs; | ||
807 | unsigned short hw_hdr_len = cs->hw_hdr_len; | ||
808 | |||
809 | if (bcs->ignore) { | ||
810 | bcs->rx_skb = NULL; | ||
811 | } else { | ||
812 | bcs->rx_skb = dev_alloc_skb(bcs->rx_bufsize + hw_hdr_len); | ||
813 | if (bcs->rx_skb == NULL) | ||
814 | dev_warn(cs->dev, "could not allocate skb\n"); | ||
815 | else | ||
816 | skb_reserve(bcs->rx_skb, hw_hdr_len); | ||
817 | } | ||
818 | return bcs->rx_skb; | ||
819 | } | ||
806 | 820 | ||
807 | /* append received bytes to inbuf */ | 821 | /* append received bytes to inbuf */ |
808 | int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, | 822 | int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, |
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index c22e5ace8276..f01c3c2e2e46 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c | |||
@@ -16,7 +16,10 @@ | |||
16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
17 | #include <linux/isdnif.h> | 17 | #include <linux/isdnif.h> |
18 | 18 | ||
19 | #define SBUFSIZE 4096 /* sk_buff payload size */ | ||
20 | #define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */ | ||
19 | #define HW_HDR_LEN 2 /* Header size used to store ack info */ | 21 | #define HW_HDR_LEN 2 /* Header size used to store ack info */ |
22 | #define MAX_BUF_SIZE (SBUFSIZE - HW_HDR_LEN) /* max data packet from LL */ | ||
20 | 23 | ||
21 | /* == Handling of I4L IO =====================================================*/ | 24 | /* == Handling of I4L IO =====================================================*/ |
22 | 25 | ||
@@ -231,6 +234,15 @@ static int command_from_LL(isdn_ctrl *cntrl) | |||
231 | dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n"); | 234 | dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n"); |
232 | return -EBUSY; | 235 | return -EBUSY; |
233 | } | 236 | } |
237 | switch (bcs->proto2) { | ||
238 | case L2_HDLC: | ||
239 | bcs->rx_bufsize = SBUFSIZE; | ||
240 | break; | ||
241 | default: /* assume transparent */ | ||
242 | bcs->rx_bufsize = TRANSBUFSIZE; | ||
243 | } | ||
244 | dev_kfree_skb(bcs->rx_skb); | ||
245 | gigaset_new_rx_skb(bcs); | ||
234 | 246 | ||
235 | commands = kzalloc(AT_NUM*(sizeof *commands), GFP_ATOMIC); | 247 | commands = kzalloc(AT_NUM*(sizeof *commands), GFP_ATOMIC); |
236 | if (!commands) { | 248 | if (!commands) { |
@@ -314,6 +326,15 @@ static int command_from_LL(isdn_ctrl *cntrl) | |||
314 | return -EINVAL; | 326 | return -EINVAL; |
315 | } | 327 | } |
316 | bcs = cs->bcs + ch; | 328 | bcs = cs->bcs + ch; |
329 | switch (bcs->proto2) { | ||
330 | case L2_HDLC: | ||
331 | bcs->rx_bufsize = SBUFSIZE; | ||
332 | break; | ||
333 | default: /* assume transparent */ | ||
334 | bcs->rx_bufsize = TRANSBUFSIZE; | ||
335 | } | ||
336 | dev_kfree_skb(bcs->rx_skb); | ||
337 | gigaset_new_rx_skb(bcs); | ||
317 | if (!gigaset_add_event(cs, &bcs->at_state, | 338 | if (!gigaset_add_event(cs, &bcs->at_state, |
318 | EV_ACCEPT, NULL, 0, NULL)) | 339 | EV_ACCEPT, NULL, 0, NULL)) |
319 | return -ENOMEM; | 340 | return -ENOMEM; |
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c index 16fd3bd48883..2dfd346fc889 100644 --- a/drivers/isdn/gigaset/isocdata.c +++ b/drivers/isdn/gigaset/isocdata.c | |||
@@ -500,19 +500,18 @@ int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len) | |||
500 | */ | 500 | */ |
501 | static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs) | 501 | static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs) |
502 | { | 502 | { |
503 | bcs->fcs = crc_ccitt_byte(bcs->fcs, c); | 503 | bcs->rx_fcs = crc_ccitt_byte(bcs->rx_fcs, c); |
504 | if (unlikely(bcs->skb == NULL)) { | 504 | if (bcs->rx_skb == NULL) |
505 | /* skipping */ | 505 | /* skipping */ |
506 | return; | 506 | return; |
507 | } | 507 | if (bcs->rx_skb->len >= bcs->rx_bufsize) { |
508 | if (unlikely(bcs->skb->len == SBUFSIZE)) { | ||
509 | dev_warn(bcs->cs->dev, "received oversized packet discarded\n"); | 508 | dev_warn(bcs->cs->dev, "received oversized packet discarded\n"); |
510 | bcs->hw.bas->giants++; | 509 | bcs->hw.bas->giants++; |
511 | dev_kfree_skb_any(bcs->skb); | 510 | dev_kfree_skb_any(bcs->rx_skb); |
512 | bcs->skb = NULL; | 511 | bcs->rx_skb = NULL; |
513 | return; | 512 | return; |
514 | } | 513 | } |
515 | *__skb_put(bcs->skb, 1) = c; | 514 | *__skb_put(bcs->rx_skb, 1) = c; |
516 | } | 515 | } |
517 | 516 | ||
518 | /* hdlc_flush | 517 | /* hdlc_flush |
@@ -521,18 +520,13 @@ static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs) | |||
521 | static inline void hdlc_flush(struct bc_state *bcs) | 520 | static inline void hdlc_flush(struct bc_state *bcs) |
522 | { | 521 | { |
523 | /* clear skb or allocate new if not skipping */ | 522 | /* clear skb or allocate new if not skipping */ |
524 | if (likely(bcs->skb != NULL)) | 523 | if (bcs->rx_skb != NULL) |
525 | skb_trim(bcs->skb, 0); | 524 | skb_trim(bcs->rx_skb, 0); |
526 | else if (!bcs->ignore) { | 525 | else |
527 | bcs->skb = dev_alloc_skb(SBUFSIZE + bcs->cs->hw_hdr_len); | 526 | gigaset_new_rx_skb(bcs); |
528 | if (bcs->skb) | ||
529 | skb_reserve(bcs->skb, bcs->cs->hw_hdr_len); | ||
530 | else | ||
531 | dev_err(bcs->cs->dev, "could not allocate skb\n"); | ||
532 | } | ||
533 | 527 | ||
534 | /* reset packet state */ | 528 | /* reset packet state */ |
535 | bcs->fcs = PPP_INITFCS; | 529 | bcs->rx_fcs = PPP_INITFCS; |
536 | } | 530 | } |
537 | 531 | ||
538 | /* hdlc_done | 532 | /* hdlc_done |
@@ -549,7 +543,7 @@ static inline void hdlc_done(struct bc_state *bcs) | |||
549 | hdlc_flush(bcs); | 543 | hdlc_flush(bcs); |
550 | return; | 544 | return; |
551 | } | 545 | } |
552 | procskb = bcs->skb; | 546 | procskb = bcs->rx_skb; |
553 | if (procskb == NULL) { | 547 | if (procskb == NULL) { |
554 | /* previous error */ | 548 | /* previous error */ |
555 | gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__); | 549 | gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__); |
@@ -560,8 +554,8 @@ static inline void hdlc_done(struct bc_state *bcs) | |||
560 | bcs->hw.bas->runts++; | 554 | bcs->hw.bas->runts++; |
561 | dev_kfree_skb_any(procskb); | 555 | dev_kfree_skb_any(procskb); |
562 | gigaset_isdn_rcv_err(bcs); | 556 | gigaset_isdn_rcv_err(bcs); |
563 | } else if (bcs->fcs != PPP_GOODFCS) { | 557 | } else if (bcs->rx_fcs != PPP_GOODFCS) { |
564 | dev_notice(cs->dev, "frame check error (0x%04x)\n", bcs->fcs); | 558 | dev_notice(cs->dev, "frame check error\n"); |
565 | bcs->hw.bas->fcserrs++; | 559 | bcs->hw.bas->fcserrs++; |
566 | dev_kfree_skb_any(procskb); | 560 | dev_kfree_skb_any(procskb); |
567 | gigaset_isdn_rcv_err(bcs); | 561 | gigaset_isdn_rcv_err(bcs); |
@@ -574,13 +568,8 @@ static inline void hdlc_done(struct bc_state *bcs) | |||
574 | bcs->hw.bas->goodbytes += len; | 568 | bcs->hw.bas->goodbytes += len; |
575 | gigaset_skb_rcvd(bcs, procskb); | 569 | gigaset_skb_rcvd(bcs, procskb); |
576 | } | 570 | } |
577 | 571 | gigaset_new_rx_skb(bcs); | |
578 | bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len); | 572 | bcs->rx_fcs = PPP_INITFCS; |
579 | if (bcs->skb) | ||
580 | skb_reserve(bcs->skb, cs->hw_hdr_len); | ||
581 | else | ||
582 | dev_err(cs->dev, "could not allocate skb\n"); | ||
583 | bcs->fcs = PPP_INITFCS; | ||
584 | } | 573 | } |
585 | 574 | ||
586 | /* hdlc_frag | 575 | /* hdlc_frag |
@@ -597,8 +586,8 @@ static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits) | |||
597 | dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits); | 586 | dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits); |
598 | bcs->hw.bas->alignerrs++; | 587 | bcs->hw.bas->alignerrs++; |
599 | gigaset_isdn_rcv_err(bcs); | 588 | gigaset_isdn_rcv_err(bcs); |
600 | __skb_trim(bcs->skb, 0); | 589 | __skb_trim(bcs->rx_skb, 0); |
601 | bcs->fcs = PPP_INITFCS; | 590 | bcs->rx_fcs = PPP_INITFCS; |
602 | } | 591 | } |
603 | 592 | ||
604 | /* bit counts lookup table for HDLC bit unstuffing | 593 | /* bit counts lookup table for HDLC bit unstuffing |
@@ -847,7 +836,6 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count, | |||
847 | static inline void trans_receive(unsigned char *src, unsigned count, | 836 | static inline void trans_receive(unsigned char *src, unsigned count, |
848 | struct bc_state *bcs) | 837 | struct bc_state *bcs) |
849 | { | 838 | { |
850 | struct cardstate *cs = bcs->cs; | ||
851 | struct sk_buff *skb; | 839 | struct sk_buff *skb; |
852 | int dobytes; | 840 | int dobytes; |
853 | unsigned char *dst; | 841 | unsigned char *dst; |
@@ -857,17 +845,11 @@ static inline void trans_receive(unsigned char *src, unsigned count, | |||
857 | hdlc_flush(bcs); | 845 | hdlc_flush(bcs); |
858 | return; | 846 | return; |
859 | } | 847 | } |
860 | skb = bcs->skb; | 848 | skb = bcs->rx_skb; |
861 | if (unlikely(skb == NULL)) { | 849 | if (skb == NULL) |
862 | bcs->skb = skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len); | 850 | skb = gigaset_new_rx_skb(bcs); |
863 | if (!skb) { | ||
864 | dev_err(cs->dev, "could not allocate skb\n"); | ||
865 | return; | ||
866 | } | ||
867 | skb_reserve(skb, cs->hw_hdr_len); | ||
868 | } | ||
869 | bcs->hw.bas->goodbytes += skb->len; | 851 | bcs->hw.bas->goodbytes += skb->len; |
870 | dobytes = TRANSBUFSIZE - skb->len; | 852 | dobytes = bcs->rx_bufsize - skb->len; |
871 | while (count > 0) { | 853 | while (count > 0) { |
872 | dst = skb_put(skb, count < dobytes ? count : dobytes); | 854 | dst = skb_put(skb, count < dobytes ? count : dobytes); |
873 | while (count > 0 && dobytes > 0) { | 855 | while (count > 0 && dobytes > 0) { |
@@ -879,14 +861,10 @@ static inline void trans_receive(unsigned char *src, unsigned count, | |||
879 | dump_bytes(DEBUG_STREAM_DUMP, | 861 | dump_bytes(DEBUG_STREAM_DUMP, |
880 | "rcv data", skb->data, skb->len); | 862 | "rcv data", skb->data, skb->len); |
881 | gigaset_skb_rcvd(bcs, skb); | 863 | gigaset_skb_rcvd(bcs, skb); |
882 | bcs->skb = skb = | 864 | skb = gigaset_new_rx_skb(bcs); |
883 | dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len); | 865 | if (skb == NULL) |
884 | if (!skb) { | ||
885 | dev_err(cs->dev, "could not allocate skb\n"); | ||
886 | return; | 866 | return; |
887 | } | 867 | dobytes = bcs->rx_bufsize; |
888 | skb_reserve(skb, cs->hw_hdr_len); | ||
889 | dobytes = TRANSBUFSIZE; | ||
890 | } | 868 | } |
891 | } | 869 | } |
892 | } | 870 | } |
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c index 72eb92647c1b..feec8d89d719 100644 --- a/drivers/isdn/hysdn/hysdn_net.c +++ b/drivers/isdn/hysdn/hysdn_net.c | |||
@@ -187,12 +187,13 @@ void | |||
187 | hysdn_rx_netpkt(hysdn_card * card, unsigned char *buf, unsigned short len) | 187 | hysdn_rx_netpkt(hysdn_card * card, unsigned char *buf, unsigned short len) |
188 | { | 188 | { |
189 | struct net_local *lp = card->netif; | 189 | struct net_local *lp = card->netif; |
190 | struct net_device *dev = lp->dev; | 190 | struct net_device *dev; |
191 | struct sk_buff *skb; | 191 | struct sk_buff *skb; |
192 | 192 | ||
193 | if (!lp) | 193 | if (!lp) |
194 | return; /* non existing device */ | 194 | return; /* non existing device */ |
195 | 195 | ||
196 | dev = lp->dev; | ||
196 | dev->stats.rx_bytes += len; | 197 | dev->stats.rx_bytes += len; |
197 | 198 | ||
198 | skb = dev_alloc_skb(len); | 199 | skb = dev_alloc_skb(len); |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 25c14c6236f5..3662d6e446a9 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -333,7 +333,8 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) | |||
333 | 333 | ||
334 | if ((client_info->assigned) && | 334 | if ((client_info->assigned) && |
335 | (client_info->ip_src == arp->ip_dst) && | 335 | (client_info->ip_src == arp->ip_dst) && |
336 | (client_info->ip_dst == arp->ip_src)) { | 336 | (client_info->ip_dst == arp->ip_src) && |
337 | (compare_ether_addr_64bits(client_info->mac_dst, arp->mac_src))) { | ||
337 | /* update the clients MAC address */ | 338 | /* update the clients MAC address */ |
338 | memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN); | 339 | memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN); |
339 | client_info->ntt = 1; | 340 | client_info->ntt = 1; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 9bb9bfa225b6..822808810a13 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -176,7 +176,7 @@ static int arp_ip_count; | |||
176 | static int bond_mode = BOND_MODE_ROUNDROBIN; | 176 | static int bond_mode = BOND_MODE_ROUNDROBIN; |
177 | static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; | 177 | static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; |
178 | static int lacp_fast; | 178 | static int lacp_fast; |
179 | 179 | static int disable_netpoll = 1; | |
180 | 180 | ||
181 | const struct bond_parm_tbl bond_lacp_tbl[] = { | 181 | const struct bond_parm_tbl bond_lacp_tbl[] = { |
182 | { "slow", AD_LACP_SLOW}, | 182 | { "slow", AD_LACP_SLOW}, |
@@ -1766,15 +1766,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1766 | bond_set_carrier(bond); | 1766 | bond_set_carrier(bond); |
1767 | 1767 | ||
1768 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1768 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1769 | if (slaves_support_netpoll(bond_dev)) { | 1769 | /* |
1770 | bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | 1770 | * Netpoll and bonding is broken, make sure it is not initialized |
1771 | if (bond_dev->npinfo) | 1771 | * until it is fixed. |
1772 | slave_dev->npinfo = bond_dev->npinfo; | 1772 | */ |
1773 | } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { | 1773 | if (disable_netpoll) { |
1774 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; | 1774 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; |
1775 | pr_info("New slave device %s does not support netpoll\n", | 1775 | } else { |
1776 | slave_dev->name); | 1776 | if (slaves_support_netpoll(bond_dev)) { |
1777 | pr_info("Disabling netpoll support for %s\n", bond_dev->name); | 1777 | bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; |
1778 | if (bond_dev->npinfo) | ||
1779 | slave_dev->npinfo = bond_dev->npinfo; | ||
1780 | } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { | ||
1781 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; | ||
1782 | pr_info("New slave device %s does not support netpoll\n", | ||
1783 | slave_dev->name); | ||
1784 | pr_info("Disabling netpoll support for %s\n", bond_dev->name); | ||
1785 | } | ||
1778 | } | 1786 | } |
1779 | #endif | 1787 | #endif |
1780 | read_unlock(&bond->lock); | 1788 | read_unlock(&bond->lock); |
@@ -1977,8 +1985,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1977 | 1985 | ||
1978 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1986 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1979 | read_lock_bh(&bond->lock); | 1987 | read_lock_bh(&bond->lock); |
1980 | if (slaves_support_netpoll(bond_dev)) | 1988 | |
1981 | bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | 1989 | /* Make sure netpoll over stays disabled until fixed. */ |
1990 | if (!disable_netpoll) | ||
1991 | if (slaves_support_netpoll(bond_dev)) | ||
1992 | bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; | ||
1982 | read_unlock_bh(&bond->lock); | 1993 | read_unlock_bh(&bond->lock); |
1983 | if (slave_dev->netdev_ops->ndo_netpoll_cleanup) | 1994 | if (slave_dev->netdev_ops->ndo_netpoll_cleanup) |
1984 | slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev); | 1995 | slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev); |
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 1756d28250d4..38de1a4f825f 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -1181,7 +1181,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
1181 | if (netif_msg_drv(priv)) | 1181 | if (netif_msg_drv(priv)) |
1182 | printk(KERN_ERR "%s: Could not attach to PHY\n", | 1182 | printk(KERN_ERR "%s: Could not attach to PHY\n", |
1183 | dev->name); | 1183 | dev->name); |
1184 | return PTR_ERR(priv->phy); | 1184 | rc = PTR_ERR(priv->phy); |
1185 | goto fail; | ||
1185 | } | 1186 | } |
1186 | 1187 | ||
1187 | if ((rc = register_netdev(dev))) { | 1188 | if ((rc = register_netdev(dev))) { |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 55099a50cca4..b235aa16290f 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -3741,10 +3741,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3741 | /* signal that we are down to the interrupt handler */ | 3741 | /* signal that we are down to the interrupt handler */ |
3742 | set_bit(__IXGBE_DOWN, &adapter->state); | 3742 | set_bit(__IXGBE_DOWN, &adapter->state); |
3743 | 3743 | ||
3744 | /* power down the optics */ | ||
3745 | if (hw->phy.multispeed_fiber) | ||
3746 | hw->mac.ops.disable_tx_laser(hw); | ||
3747 | |||
3748 | /* disable receive for all VFs and wait one second */ | 3744 | /* disable receive for all VFs and wait one second */ |
3749 | if (adapter->num_vfs) { | 3745 | if (adapter->num_vfs) { |
3750 | /* ping all the active vfs to let them know we are going down */ | 3746 | /* ping all the active vfs to let them know we are going down */ |
@@ -3799,6 +3795,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3799 | (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & | 3795 | (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & |
3800 | ~IXGBE_DMATXCTL_TE)); | 3796 | ~IXGBE_DMATXCTL_TE)); |
3801 | 3797 | ||
3798 | /* power down the optics */ | ||
3799 | if (hw->phy.multispeed_fiber) | ||
3800 | hw->mac.ops.disable_tx_laser(hw); | ||
3801 | |||
3802 | /* clear n-tuple filters that are cached */ | 3802 | /* clear n-tuple filters that are cached */ |
3803 | ethtool_ntuple_flush(netdev); | 3803 | ethtool_ntuple_flush(netdev); |
3804 | 3804 | ||
@@ -4058,7 +4058,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
4058 | 4058 | ||
4059 | done: | 4059 | done: |
4060 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ | 4060 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ |
4061 | adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; | 4061 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); |
4062 | } | 4062 | } |
4063 | 4063 | ||
4064 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | 4064 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, |
@@ -5246,7 +5246,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
5246 | ixgbe_free_all_tx_resources(adapter); | 5246 | ixgbe_free_all_tx_resources(adapter); |
5247 | ixgbe_free_all_rx_resources(adapter); | 5247 | ixgbe_free_all_rx_resources(adapter); |
5248 | } | 5248 | } |
5249 | ixgbe_clear_interrupt_scheme(adapter); | ||
5250 | 5249 | ||
5251 | #ifdef CONFIG_PM | 5250 | #ifdef CONFIG_PM |
5252 | retval = pci_save_state(pdev); | 5251 | retval = pci_save_state(pdev); |
@@ -5281,6 +5280,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
5281 | 5280 | ||
5282 | *enable_wake = !!wufc; | 5281 | *enable_wake = !!wufc; |
5283 | 5282 | ||
5283 | ixgbe_clear_interrupt_scheme(adapter); | ||
5284 | |||
5284 | ixgbe_release_hw_control(adapter); | 5285 | ixgbe_release_hw_control(adapter); |
5285 | 5286 | ||
5286 | pci_disable_device(pdev); | 5287 | pci_disable_device(pdev); |
@@ -6071,7 +6072,6 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
6071 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, | 6072 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, |
6072 | int queue, u32 tx_flags) | 6073 | int queue, u32 tx_flags) |
6073 | { | 6074 | { |
6074 | /* Right now, we support IPv4 only */ | ||
6075 | struct ixgbe_atr_input atr_input; | 6075 | struct ixgbe_atr_input atr_input; |
6076 | struct tcphdr *th; | 6076 | struct tcphdr *th; |
6077 | struct iphdr *iph = ip_hdr(skb); | 6077 | struct iphdr *iph = ip_hdr(skb); |
@@ -6080,6 +6080,9 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, | |||
6080 | u32 src_ipv4_addr, dst_ipv4_addr; | 6080 | u32 src_ipv4_addr, dst_ipv4_addr; |
6081 | u8 l4type = 0; | 6081 | u8 l4type = 0; |
6082 | 6082 | ||
6083 | /* Right now, we support IPv4 only */ | ||
6084 | if (skb->protocol != htons(ETH_P_IP)) | ||
6085 | return; | ||
6083 | /* check if we're UDP or TCP */ | 6086 | /* check if we're UDP or TCP */ |
6084 | if (iph->protocol == IPPROTO_TCP) { | 6087 | if (iph->protocol == IPPROTO_TCP) { |
6085 | th = tcp_hdr(skb); | 6088 | th = tcp_hdr(skb); |
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index 7b12d0e8f4bd..fa303c881a48 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c | |||
@@ -985,7 +985,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
985 | np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); | 985 | np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); |
986 | if (!np) { | 986 | if (!np) { |
987 | dev_err(&op->dev, "could not find DMA node\n"); | 987 | dev_err(&op->dev, "could not find DMA node\n"); |
988 | goto nodev; | 988 | goto err_iounmap; |
989 | } | 989 | } |
990 | 990 | ||
991 | /* Setup the DMA register accesses, could be DCR or memory mapped */ | 991 | /* Setup the DMA register accesses, could be DCR or memory mapped */ |
@@ -999,7 +999,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
999 | dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); | 999 | dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); |
1000 | } else { | 1000 | } else { |
1001 | dev_err(&op->dev, "unable to map DMA registers\n"); | 1001 | dev_err(&op->dev, "unable to map DMA registers\n"); |
1002 | goto nodev; | 1002 | goto err_iounmap; |
1003 | } | 1003 | } |
1004 | } | 1004 | } |
1005 | 1005 | ||
@@ -1008,7 +1008,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
1008 | if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { | 1008 | if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { |
1009 | dev_err(&op->dev, "could not determine irqs\n"); | 1009 | dev_err(&op->dev, "could not determine irqs\n"); |
1010 | rc = -ENOMEM; | 1010 | rc = -ENOMEM; |
1011 | goto nodev; | 1011 | goto err_iounmap_2; |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | of_node_put(np); /* Finished with the DMA node; drop the reference */ | 1014 | of_node_put(np); /* Finished with the DMA node; drop the reference */ |
@@ -1018,7 +1018,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
1018 | if ((!addr) || (size != 6)) { | 1018 | if ((!addr) || (size != 6)) { |
1019 | dev_err(&op->dev, "could not find MAC address\n"); | 1019 | dev_err(&op->dev, "could not find MAC address\n"); |
1020 | rc = -ENODEV; | 1020 | rc = -ENODEV; |
1021 | goto nodev; | 1021 | goto err_iounmap_2; |
1022 | } | 1022 | } |
1023 | temac_set_mac_address(ndev, (void *)addr); | 1023 | temac_set_mac_address(ndev, (void *)addr); |
1024 | 1024 | ||
@@ -1034,7 +1034,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
1034 | rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); | 1034 | rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); |
1035 | if (rc) { | 1035 | if (rc) { |
1036 | dev_err(lp->dev, "Error creating sysfs files\n"); | 1036 | dev_err(lp->dev, "Error creating sysfs files\n"); |
1037 | goto nodev; | 1037 | goto err_iounmap_2; |
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | rc = register_netdev(lp->ndev); | 1040 | rc = register_netdev(lp->ndev); |
@@ -1047,6 +1047,11 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
1047 | 1047 | ||
1048 | err_register_ndev: | 1048 | err_register_ndev: |
1049 | sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); | 1049 | sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); |
1050 | err_iounmap_2: | ||
1051 | if (lp->sdma_regs) | ||
1052 | iounmap(lp->sdma_regs); | ||
1053 | err_iounmap: | ||
1054 | iounmap(lp->regs); | ||
1050 | nodev: | 1055 | nodev: |
1051 | free_netdev(ndev); | 1056 | free_netdev(ndev); |
1052 | ndev = NULL; | 1057 | ndev = NULL; |
@@ -1065,6 +1070,9 @@ static int __devexit temac_of_remove(struct of_device *op) | |||
1065 | of_node_put(lp->phy_node); | 1070 | of_node_put(lp->phy_node); |
1066 | lp->phy_node = NULL; | 1071 | lp->phy_node = NULL; |
1067 | dev_set_drvdata(&op->dev, NULL); | 1072 | dev_set_drvdata(&op->dev, NULL); |
1073 | iounmap(lp->regs); | ||
1074 | if (lp->sdma_regs) | ||
1075 | iounmap(lp->sdma_regs); | ||
1068 | free_netdev(ndev); | 1076 | free_netdev(ndev); |
1069 | return 0; | 1077 | return 0; |
1070 | } | 1078 | } |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 82b720f29c75..af075af20e0c 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -289,6 +289,7 @@ struct mv643xx_eth_shared_private { | |||
289 | unsigned int t_clk; | 289 | unsigned int t_clk; |
290 | int extended_rx_coal_limit; | 290 | int extended_rx_coal_limit; |
291 | int tx_bw_control; | 291 | int tx_bw_control; |
292 | int tx_csum_limit; | ||
292 | }; | 293 | }; |
293 | 294 | ||
294 | #define TX_BW_CONTROL_ABSENT 0 | 295 | #define TX_BW_CONTROL_ABSENT 0 |
@@ -776,13 +777,16 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
776 | l4i_chk = 0; | 777 | l4i_chk = 0; |
777 | 778 | ||
778 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 779 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
780 | int hdr_len; | ||
779 | int tag_bytes; | 781 | int tag_bytes; |
780 | 782 | ||
781 | BUG_ON(skb->protocol != htons(ETH_P_IP) && | 783 | BUG_ON(skb->protocol != htons(ETH_P_IP) && |
782 | skb->protocol != htons(ETH_P_8021Q)); | 784 | skb->protocol != htons(ETH_P_8021Q)); |
783 | 785 | ||
784 | tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN; | 786 | hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; |
785 | if (unlikely(tag_bytes & ~12)) { | 787 | tag_bytes = hdr_len - ETH_HLEN; |
788 | if (skb->len - hdr_len > mp->shared->tx_csum_limit || | ||
789 | unlikely(tag_bytes & ~12)) { | ||
786 | if (skb_checksum_help(skb) == 0) | 790 | if (skb_checksum_help(skb) == 0) |
787 | goto no_csum; | 791 | goto no_csum; |
788 | kfree_skb(skb); | 792 | kfree_skb(skb); |
@@ -2671,6 +2675,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
2671 | * Detect hardware parameters. | 2675 | * Detect hardware parameters. |
2672 | */ | 2676 | */ |
2673 | msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; | 2677 | msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; |
2678 | msp->tx_csum_limit = pd->tx_csum_limit ? pd->tx_csum_limit : 9 * 1024; | ||
2674 | infer_hw_params(msp); | 2679 | infer_hw_params(msp); |
2675 | 2680 | ||
2676 | platform_set_drvdata(pdev, msp); | 2681 | platform_set_drvdata(pdev, msp); |
diff --git a/drivers/net/ne.c b/drivers/net/ne.c index b8e2923a1d69..1063093b3afc 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c | |||
@@ -806,8 +806,10 @@ static int __init ne_drv_probe(struct platform_device *pdev) | |||
806 | dev->base_addr = res->start; | 806 | dev->base_addr = res->start; |
807 | dev->irq = platform_get_irq(pdev, 0); | 807 | dev->irq = platform_get_irq(pdev, 0); |
808 | } else { | 808 | } else { |
809 | if (this_dev < 0 || this_dev >= MAX_NE_CARDS) | 809 | if (this_dev < 0 || this_dev >= MAX_NE_CARDS) { |
810 | free_netdev(dev); | ||
810 | return -EINVAL; | 811 | return -EINVAL; |
812 | } | ||
811 | dev->base_addr = io[this_dev]; | 813 | dev->base_addr = io[this_dev]; |
812 | dev->irq = irq[this_dev]; | 814 | dev->irq = irq[this_dev]; |
813 | dev->mem_end = bad[this_dev]; | 815 | dev->mem_end = bad[this_dev]; |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 64e6a84bbbbe..307cd1721e91 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -1505,12 +1505,20 @@ irq_done: | |||
1505 | writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_LAN + CISREG_COR); | 1505 | writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_LAN + CISREG_COR); |
1506 | writeb(cor, smc->base + MOT_LAN + CISREG_COR); | 1506 | writeb(cor, smc->base + MOT_LAN + CISREG_COR); |
1507 | } | 1507 | } |
1508 | #ifdef DOES_NOT_WORK | 1508 | |
1509 | if (smc->base != NULL) { /* Megahertz MFC's */ | 1509 | if ((smc->base != NULL) && /* Megahertz MFC's */ |
1510 | readb(smc->base+MEGAHERTZ_ISR); | 1510 | (smc->manfid == MANFID_MEGAHERTZ) && |
1511 | readb(smc->base+MEGAHERTZ_ISR); | 1511 | (smc->cardid == PRODID_MEGAHERTZ_EM3288)) { |
1512 | |||
1513 | u_char tmp; | ||
1514 | tmp = readb(smc->base+MEGAHERTZ_ISR); | ||
1515 | tmp = readb(smc->base+MEGAHERTZ_ISR); | ||
1516 | |||
1517 | /* Retrigger interrupt if needed */ | ||
1518 | writeb(tmp, smc->base + MEGAHERTZ_ISR); | ||
1519 | writeb(tmp, smc->base + MEGAHERTZ_ISR); | ||
1512 | } | 1520 | } |
1513 | #endif | 1521 | |
1514 | spin_unlock(&smc->lock); | 1522 | spin_unlock(&smc->lock); |
1515 | return IRQ_RETVAL(handled); | 1523 | return IRQ_RETVAL(handled); |
1516 | } | 1524 | } |
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index dbd003453737..29c39ff85de5 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c | |||
@@ -226,6 +226,7 @@ module_exit(lxt_exit); | |||
226 | static struct mdio_device_id lxt_tbl[] = { | 226 | static struct mdio_device_id lxt_tbl[] = { |
227 | { 0x78100000, 0xfffffff0 }, | 227 | { 0x78100000, 0xfffffff0 }, |
228 | { 0x001378e0, 0xfffffff0 }, | 228 | { 0x001378e0, 0xfffffff0 }, |
229 | { 0x00137a10, 0xfffffff0 }, | ||
229 | { } | 230 | { } |
230 | }; | 231 | }; |
231 | 232 | ||
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index dd9e86ca7c5a..8d63f69b27d9 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -4642,8 +4642,7 @@ static void ql_timer(unsigned long data) | |||
4642 | return; | 4642 | return; |
4643 | } | 4643 | } |
4644 | 4644 | ||
4645 | qdev->timer.expires = jiffies + (5*HZ); | 4645 | mod_timer(&qdev->timer, jiffies + (5*HZ)); |
4646 | add_timer(&qdev->timer); | ||
4647 | } | 4646 | } |
4648 | 4647 | ||
4649 | static int __devinit qlge_probe(struct pci_dev *pdev, | 4648 | static int __devinit qlge_probe(struct pci_dev *pdev, |
@@ -4744,6 +4743,8 @@ static void ql_eeh_close(struct net_device *ndev) | |||
4744 | netif_stop_queue(ndev); | 4743 | netif_stop_queue(ndev); |
4745 | } | 4744 | } |
4746 | 4745 | ||
4746 | /* Disabling the timer */ | ||
4747 | del_timer_sync(&qdev->timer); | ||
4747 | if (test_bit(QL_ADAPTER_UP, &qdev->flags)) | 4748 | if (test_bit(QL_ADAPTER_UP, &qdev->flags)) |
4748 | cancel_delayed_work_sync(&qdev->asic_reset_work); | 4749 | cancel_delayed_work_sync(&qdev->asic_reset_work); |
4749 | cancel_delayed_work_sync(&qdev->mpi_reset_work); | 4750 | cancel_delayed_work_sync(&qdev->mpi_reset_work); |
@@ -4839,8 +4840,7 @@ static void qlge_io_resume(struct pci_dev *pdev) | |||
4839 | netif_err(qdev, ifup, qdev->ndev, | 4840 | netif_err(qdev, ifup, qdev->ndev, |
4840 | "Device was not running prior to EEH.\n"); | 4841 | "Device was not running prior to EEH.\n"); |
4841 | } | 4842 | } |
4842 | qdev->timer.expires = jiffies + (5*HZ); | 4843 | mod_timer(&qdev->timer, jiffies + (5*HZ)); |
4843 | add_timer(&qdev->timer); | ||
4844 | netif_device_attach(ndev); | 4844 | netif_device_attach(ndev); |
4845 | } | 4845 | } |
4846 | 4846 | ||
@@ -4902,8 +4902,7 @@ static int qlge_resume(struct pci_dev *pdev) | |||
4902 | return err; | 4902 | return err; |
4903 | } | 4903 | } |
4904 | 4904 | ||
4905 | qdev->timer.expires = jiffies + (5*HZ); | 4905 | mod_timer(&qdev->timer, jiffies + (5*HZ)); |
4906 | add_timer(&qdev->timer); | ||
4907 | netif_device_attach(ndev); | 4906 | netif_device_attach(ndev); |
4908 | 4907 | ||
4909 | return 0; | 4908 | return 0; |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 22371f1dca58..d0af924ddd67 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -3129,7 +3129,6 @@ static void tx_intr_handler(struct fifo_info *fifo_data) | |||
3129 | pkt_cnt++; | 3129 | pkt_cnt++; |
3130 | 3130 | ||
3131 | /* Updating the statistics block */ | 3131 | /* Updating the statistics block */ |
3132 | nic->dev->stats.tx_bytes += skb->len; | ||
3133 | swstats->mem_freed += skb->truesize; | 3132 | swstats->mem_freed += skb->truesize; |
3134 | dev_kfree_skb_irq(skb); | 3133 | dev_kfree_skb_irq(skb); |
3135 | 3134 | ||
@@ -4900,48 +4899,81 @@ static void s2io_updt_stats(struct s2io_nic *sp) | |||
4900 | * Return value: | 4899 | * Return value: |
4901 | * pointer to the updated net_device_stats structure. | 4900 | * pointer to the updated net_device_stats structure. |
4902 | */ | 4901 | */ |
4903 | |||
4904 | static struct net_device_stats *s2io_get_stats(struct net_device *dev) | 4902 | static struct net_device_stats *s2io_get_stats(struct net_device *dev) |
4905 | { | 4903 | { |
4906 | struct s2io_nic *sp = netdev_priv(dev); | 4904 | struct s2io_nic *sp = netdev_priv(dev); |
4907 | struct config_param *config = &sp->config; | ||
4908 | struct mac_info *mac_control = &sp->mac_control; | 4905 | struct mac_info *mac_control = &sp->mac_control; |
4909 | struct stat_block *stats = mac_control->stats_info; | 4906 | struct stat_block *stats = mac_control->stats_info; |
4910 | int i; | 4907 | u64 delta; |
4911 | 4908 | ||
4912 | /* Configure Stats for immediate updt */ | 4909 | /* Configure Stats for immediate updt */ |
4913 | s2io_updt_stats(sp); | 4910 | s2io_updt_stats(sp); |
4914 | 4911 | ||
4915 | /* Using sp->stats as a staging area, because reset (due to mtu | 4912 | /* A device reset will cause the on-adapter statistics to be zero'ed. |
4916 | change, for example) will clear some hardware counters */ | 4913 | * This can be done while running by changing the MTU. To prevent the |
4917 | dev->stats.tx_packets += le32_to_cpu(stats->tmac_frms) - | 4914 | * system from having the stats zero'ed, the driver keeps a copy of the |
4918 | sp->stats.tx_packets; | 4915 | * last update to the system (which is also zero'ed on reset). This |
4919 | sp->stats.tx_packets = le32_to_cpu(stats->tmac_frms); | 4916 | * enables the driver to accurately know the delta between the last |
4920 | 4917 | * update and the current update. | |
4921 | dev->stats.tx_errors += le32_to_cpu(stats->tmac_any_err_frms) - | 4918 | */ |
4922 | sp->stats.tx_errors; | 4919 | delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 | |
4923 | sp->stats.tx_errors = le32_to_cpu(stats->tmac_any_err_frms); | 4920 | le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets; |
4924 | 4921 | sp->stats.rx_packets += delta; | |
4925 | dev->stats.rx_errors += le64_to_cpu(stats->rmac_drop_frms) - | 4922 | dev->stats.rx_packets += delta; |
4926 | sp->stats.rx_errors; | 4923 | |
4927 | sp->stats.rx_errors = le64_to_cpu(stats->rmac_drop_frms); | 4924 | delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 | |
4928 | 4925 | le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets; | |
4929 | dev->stats.multicast = le32_to_cpu(stats->rmac_vld_mcst_frms) - | 4926 | sp->stats.tx_packets += delta; |
4930 | sp->stats.multicast; | 4927 | dev->stats.tx_packets += delta; |
4931 | sp->stats.multicast = le32_to_cpu(stats->rmac_vld_mcst_frms); | 4928 | |
4932 | 4929 | delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 | | |
4933 | dev->stats.rx_length_errors = le64_to_cpu(stats->rmac_long_frms) - | 4930 | le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes; |
4934 | sp->stats.rx_length_errors; | 4931 | sp->stats.rx_bytes += delta; |
4935 | sp->stats.rx_length_errors = le64_to_cpu(stats->rmac_long_frms); | 4932 | dev->stats.rx_bytes += delta; |
4933 | |||
4934 | delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 | | ||
4935 | le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes; | ||
4936 | sp->stats.tx_bytes += delta; | ||
4937 | dev->stats.tx_bytes += delta; | ||
4938 | |||
4939 | delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors; | ||
4940 | sp->stats.rx_errors += delta; | ||
4941 | dev->stats.rx_errors += delta; | ||
4942 | |||
4943 | delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 | | ||
4944 | le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors; | ||
4945 | sp->stats.tx_errors += delta; | ||
4946 | dev->stats.tx_errors += delta; | ||
4947 | |||
4948 | delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped; | ||
4949 | sp->stats.rx_dropped += delta; | ||
4950 | dev->stats.rx_dropped += delta; | ||
4951 | |||
4952 | delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped; | ||
4953 | sp->stats.tx_dropped += delta; | ||
4954 | dev->stats.tx_dropped += delta; | ||
4955 | |||
4956 | /* The adapter MAC interprets pause frames as multicast packets, but | ||
4957 | * does not pass them up. This erroneously increases the multicast | ||
4958 | * packet count and needs to be deducted when the multicast frame count | ||
4959 | * is queried. | ||
4960 | */ | ||
4961 | delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 | | ||
4962 | le32_to_cpu(stats->rmac_vld_mcst_frms); | ||
4963 | delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms); | ||
4964 | delta -= sp->stats.multicast; | ||
4965 | sp->stats.multicast += delta; | ||
4966 | dev->stats.multicast += delta; | ||
4936 | 4967 | ||
4937 | /* collect per-ring rx_packets and rx_bytes */ | 4968 | delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 | |
4938 | dev->stats.rx_packets = dev->stats.rx_bytes = 0; | 4969 | le32_to_cpu(stats->rmac_usized_frms)) + |
4939 | for (i = 0; i < config->rx_ring_num; i++) { | 4970 | le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors; |
4940 | struct ring_info *ring = &mac_control->rings[i]; | 4971 | sp->stats.rx_length_errors += delta; |
4972 | dev->stats.rx_length_errors += delta; | ||
4941 | 4973 | ||
4942 | dev->stats.rx_packets += ring->rx_packets; | 4974 | delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors; |
4943 | dev->stats.rx_bytes += ring->rx_bytes; | 4975 | sp->stats.rx_crc_errors += delta; |
4944 | } | 4976 | dev->stats.rx_crc_errors += delta; |
4945 | 4977 | ||
4946 | return &dev->stats; | 4978 | return &dev->stats; |
4947 | } | 4979 | } |
@@ -7494,15 +7526,11 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7494 | } | 7526 | } |
7495 | } | 7527 | } |
7496 | 7528 | ||
7497 | /* Updating statistics */ | ||
7498 | ring_data->rx_packets++; | ||
7499 | rxdp->Host_Control = 0; | 7529 | rxdp->Host_Control = 0; |
7500 | if (sp->rxd_mode == RXD_MODE_1) { | 7530 | if (sp->rxd_mode == RXD_MODE_1) { |
7501 | int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); | 7531 | int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); |
7502 | 7532 | ||
7503 | ring_data->rx_bytes += len; | ||
7504 | skb_put(skb, len); | 7533 | skb_put(skb, len); |
7505 | |||
7506 | } else if (sp->rxd_mode == RXD_MODE_3B) { | 7534 | } else if (sp->rxd_mode == RXD_MODE_3B) { |
7507 | int get_block = ring_data->rx_curr_get_info.block_index; | 7535 | int get_block = ring_data->rx_curr_get_info.block_index; |
7508 | int get_off = ring_data->rx_curr_get_info.offset; | 7536 | int get_off = ring_data->rx_curr_get_info.offset; |
@@ -7511,7 +7539,6 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7511 | unsigned char *buff = skb_push(skb, buf0_len); | 7539 | unsigned char *buff = skb_push(skb, buf0_len); |
7512 | 7540 | ||
7513 | struct buffAdd *ba = &ring_data->ba[get_block][get_off]; | 7541 | struct buffAdd *ba = &ring_data->ba[get_block][get_off]; |
7514 | ring_data->rx_bytes += buf0_len + buf2_len; | ||
7515 | memcpy(buff, ba->ba_0, buf0_len); | 7542 | memcpy(buff, ba->ba_0, buf0_len); |
7516 | skb_put(skb, buf2_len); | 7543 | skb_put(skb, buf2_len); |
7517 | } | 7544 | } |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 47c36e0994f5..5e52c75892df 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -745,10 +745,6 @@ struct ring_info { | |||
745 | 745 | ||
746 | /* Buffer Address store. */ | 746 | /* Buffer Address store. */ |
747 | struct buffAdd **ba; | 747 | struct buffAdd **ba; |
748 | |||
749 | /* per-Ring statistics */ | ||
750 | unsigned long rx_packets; | ||
751 | unsigned long rx_bytes; | ||
752 | } ____cacheline_aligned; | 748 | } ____cacheline_aligned; |
753 | 749 | ||
754 | /* Fifo specific structure */ | 750 | /* Fifo specific structure */ |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 1f3acc3a5dfd..79eee3062083 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -2671,6 +2671,7 @@ static struct platform_driver sbmac_driver = { | |||
2671 | .remove = __exit_p(sbmac_remove), | 2671 | .remove = __exit_p(sbmac_remove), |
2672 | .driver = { | 2672 | .driver = { |
2673 | .name = sbmac_string, | 2673 | .name = sbmac_string, |
2674 | .owner = THIS_MODULE, | ||
2674 | }, | 2675 | }, |
2675 | }; | 2676 | }; |
2676 | 2677 | ||
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 28d3ee175e7b..dd8a4adf48ca 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
@@ -104,10 +104,8 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg, | |||
104 | int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) | 104 | int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) |
105 | { | 105 | { |
106 | struct cdc_state *info = (void *) &dev->data; | 106 | struct cdc_state *info = (void *) &dev->data; |
107 | struct usb_cdc_notification notification; | ||
108 | int master_ifnum; | 107 | int master_ifnum; |
109 | int retval; | 108 | int retval; |
110 | int partial; | ||
111 | unsigned count; | 109 | unsigned count; |
112 | __le32 rsp; | 110 | __le32 rsp; |
113 | u32 xid = 0, msg_len, request_id; | 111 | u32 xid = 0, msg_len, request_id; |
@@ -135,17 +133,13 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) | |||
135 | if (unlikely(retval < 0 || xid == 0)) | 133 | if (unlikely(retval < 0 || xid == 0)) |
136 | return retval; | 134 | return retval; |
137 | 135 | ||
138 | /* Some devices don't respond on the control channel until | 136 | // FIXME Seems like some devices discard responses when |
139 | * polled on the status channel, so do that first. */ | 137 | // we time out and cancel our "get response" requests... |
140 | retval = usb_interrupt_msg( | 138 | // so, this is fragile. Probably need to poll for status. |
141 | dev->udev, | ||
142 | usb_rcvintpipe(dev->udev, dev->status->desc.bEndpointAddress), | ||
143 | ¬ification, sizeof(notification), &partial, | ||
144 | RNDIS_CONTROL_TIMEOUT_MS); | ||
145 | if (unlikely(retval < 0)) | ||
146 | return retval; | ||
147 | 139 | ||
148 | /* Poll the control channel; the request probably completed immediately */ | 140 | /* ignore status endpoint, just poll the control channel; |
141 | * the request probably completed immediately | ||
142 | */ | ||
149 | rsp = buf->msg_type | RNDIS_MSG_COMPLETION; | 143 | rsp = buf->msg_type | RNDIS_MSG_COMPLETION; |
150 | for (count = 0; count < 10; count++) { | 144 | for (count = 0; count < 10; count++) { |
151 | memset(buf, 0, CONTROL_BUFFER_SIZE); | 145 | memset(buf, 0, CONTROL_BUFFER_SIZE); |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 44115eea57f9..7eab4071ea26 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -1293,6 +1293,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) | |||
1293 | goto out; | 1293 | goto out; |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | /* netdev_printk() needs this so do it as early as possible */ | ||
1297 | SET_NETDEV_DEV(net, &udev->dev); | ||
1298 | |||
1296 | dev = netdev_priv(net); | 1299 | dev = netdev_priv(net); |
1297 | dev->udev = xdev; | 1300 | dev->udev = xdev; |
1298 | dev->intf = udev; | 1301 | dev->intf = udev; |
@@ -1377,8 +1380,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) | |||
1377 | dev->rx_urb_size = dev->hard_mtu; | 1380 | dev->rx_urb_size = dev->hard_mtu; |
1378 | dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); | 1381 | dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); |
1379 | 1382 | ||
1380 | SET_NETDEV_DEV(net, &udev->dev); | ||
1381 | |||
1382 | if ((dev->driver_info->flags & FLAG_WLAN) != 0) | 1383 | if ((dev->driver_info->flags & FLAG_WLAN) != 0) |
1383 | SET_NETDEV_DEVTYPE(net, &wlan_type); | 1384 | SET_NETDEV_DEVTYPE(net, &wlan_type); |
1384 | if ((dev->driver_info->flags & FLAG_WWAN) != 0) | 1385 | if ((dev->driver_info->flags & FLAG_WWAN) != 0) |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1edb7a61983c..bb6b67f6b0cc 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -415,7 +415,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) | |||
415 | static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) | 415 | static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) |
416 | { | 416 | { |
417 | int err; | 417 | int err; |
418 | bool oom = false; | 418 | bool oom; |
419 | 419 | ||
420 | do { | 420 | do { |
421 | if (vi->mergeable_rx_bufs) | 421 | if (vi->mergeable_rx_bufs) |
@@ -425,10 +425,9 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) | |||
425 | else | 425 | else |
426 | err = add_recvbuf_small(vi, gfp); | 426 | err = add_recvbuf_small(vi, gfp); |
427 | 427 | ||
428 | if (err < 0) { | 428 | oom = err == -ENOMEM; |
429 | oom = true; | 429 | if (err < 0) |
430 | break; | 430 | break; |
431 | } | ||
432 | ++vi->num; | 431 | ++vi->num; |
433 | } while (err > 0); | 432 | } while (err > 0); |
434 | if (unlikely(vi->num > vi->max)) | 433 | if (unlikely(vi->num > vi->max)) |
@@ -563,7 +562,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
563 | struct virtnet_info *vi = netdev_priv(dev); | 562 | struct virtnet_info *vi = netdev_priv(dev); |
564 | int capacity; | 563 | int capacity; |
565 | 564 | ||
566 | again: | ||
567 | /* Free up any pending old buffers before queueing new ones. */ | 565 | /* Free up any pending old buffers before queueing new ones. */ |
568 | free_old_xmit_skbs(vi); | 566 | free_old_xmit_skbs(vi); |
569 | 567 | ||
@@ -572,14 +570,20 @@ again: | |||
572 | 570 | ||
573 | /* This can happen with OOM and indirect buffers. */ | 571 | /* This can happen with OOM and indirect buffers. */ |
574 | if (unlikely(capacity < 0)) { | 572 | if (unlikely(capacity < 0)) { |
575 | netif_stop_queue(dev); | 573 | if (net_ratelimit()) { |
576 | dev_warn(&dev->dev, "Unexpected full queue\n"); | 574 | if (likely(capacity == -ENOMEM)) { |
577 | if (unlikely(!virtqueue_enable_cb(vi->svq))) { | 575 | dev_warn(&dev->dev, |
578 | virtqueue_disable_cb(vi->svq); | 576 | "TX queue failure: out of memory\n"); |
579 | netif_start_queue(dev); | 577 | } else { |
580 | goto again; | 578 | dev->stats.tx_fifo_errors++; |
579 | dev_warn(&dev->dev, | ||
580 | "Unexpected TX queue failure: %d\n", | ||
581 | capacity); | ||
582 | } | ||
581 | } | 583 | } |
582 | return NETDEV_TX_BUSY; | 584 | dev->stats.tx_dropped++; |
585 | kfree_skb(skb); | ||
586 | return NETDEV_TX_OK; | ||
583 | } | 587 | } |
584 | virtqueue_kick(vi->svq); | 588 | virtqueue_kick(vi->svq); |
585 | 589 | ||
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 45c5dc225631..ed1786598c9e 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -2262,7 +2262,8 @@ start: | |||
2262 | vxge_debug_init(VXGE_ERR, | 2262 | vxge_debug_init(VXGE_ERR, |
2263 | "%s: memory allocation failed", | 2263 | "%s: memory allocation failed", |
2264 | VXGE_DRIVER_NAME); | 2264 | VXGE_DRIVER_NAME); |
2265 | return -ENOMEM; | 2265 | ret = -ENOMEM; |
2266 | goto alloc_entries_failed; | ||
2266 | } | 2267 | } |
2267 | 2268 | ||
2268 | vdev->vxge_entries = | 2269 | vdev->vxge_entries = |
@@ -2271,8 +2272,8 @@ start: | |||
2271 | if (!vdev->vxge_entries) { | 2272 | if (!vdev->vxge_entries) { |
2272 | vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", | 2273 | vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", |
2273 | VXGE_DRIVER_NAME); | 2274 | VXGE_DRIVER_NAME); |
2274 | kfree(vdev->entries); | 2275 | ret = -ENOMEM; |
2275 | return -ENOMEM; | 2276 | goto alloc_vxge_entries_failed; |
2276 | } | 2277 | } |
2277 | 2278 | ||
2278 | for (i = 0, j = 0; i < vdev->no_of_vpath; i++) { | 2279 | for (i = 0, j = 0; i < vdev->no_of_vpath; i++) { |
@@ -2303,22 +2304,32 @@ start: | |||
2303 | vxge_debug_init(VXGE_ERR, | 2304 | vxge_debug_init(VXGE_ERR, |
2304 | "%s: MSI-X enable failed for %d vectors, ret: %d", | 2305 | "%s: MSI-X enable failed for %d vectors, ret: %d", |
2305 | VXGE_DRIVER_NAME, vdev->intr_cnt, ret); | 2306 | VXGE_DRIVER_NAME, vdev->intr_cnt, ret); |
2307 | if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) { | ||
2308 | ret = -ENODEV; | ||
2309 | goto enable_msix_failed; | ||
2310 | } | ||
2311 | |||
2306 | kfree(vdev->entries); | 2312 | kfree(vdev->entries); |
2307 | kfree(vdev->vxge_entries); | 2313 | kfree(vdev->vxge_entries); |
2308 | vdev->entries = NULL; | 2314 | vdev->entries = NULL; |
2309 | vdev->vxge_entries = NULL; | 2315 | vdev->vxge_entries = NULL; |
2310 | |||
2311 | if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) | ||
2312 | return -ENODEV; | ||
2313 | /* Try with less no of vector by reducing no of vpaths count */ | 2316 | /* Try with less no of vector by reducing no of vpaths count */ |
2314 | temp = (ret - 1)/2; | 2317 | temp = (ret - 1)/2; |
2315 | vxge_close_vpaths(vdev, temp); | 2318 | vxge_close_vpaths(vdev, temp); |
2316 | vdev->no_of_vpath = temp; | 2319 | vdev->no_of_vpath = temp; |
2317 | goto start; | 2320 | goto start; |
2318 | } else if (ret < 0) | 2321 | } else if (ret < 0) { |
2319 | return -ENODEV; | 2322 | ret = -ENODEV; |
2320 | 2323 | goto enable_msix_failed; | |
2324 | } | ||
2321 | return 0; | 2325 | return 0; |
2326 | |||
2327 | enable_msix_failed: | ||
2328 | kfree(vdev->vxge_entries); | ||
2329 | alloc_vxge_entries_failed: | ||
2330 | kfree(vdev->entries); | ||
2331 | alloc_entries_failed: | ||
2332 | return ret; | ||
2322 | } | 2333 | } |
2323 | 2334 | ||
2324 | static int vxge_enable_msix(struct vxgedev *vdev) | 2335 | static int vxge_enable_msix(struct vxgedev *vdev) |
@@ -4515,9 +4526,9 @@ vxge_starter(void) | |||
4515 | char version[32]; | 4526 | char version[32]; |
4516 | snprintf(version, 32, "%s", DRV_VERSION); | 4527 | snprintf(version, 32, "%s", DRV_VERSION); |
4517 | 4528 | ||
4518 | printk(KERN_CRIT "%s: Copyright(c) 2002-2009 Neterion Inc\n", | 4529 | printk(KERN_INFO "%s: Copyright(c) 2002-2009 Neterion Inc\n", |
4519 | VXGE_DRIVER_NAME); | 4530 | VXGE_DRIVER_NAME); |
4520 | printk(KERN_CRIT "%s: Driver version: %s\n", | 4531 | printk(KERN_INFO "%s: Driver version: %s\n", |
4521 | VXGE_DRIVER_NAME, version); | 4532 | VXGE_DRIVER_NAME, version); |
4522 | 4533 | ||
4523 | verify_bandwidth(); | 4534 | verify_bandwidth(); |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 4c218e910635..107af9e61dc1 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -98,7 +98,8 @@ static void tx_poll_start(struct vhost_net *net, struct socket *sock) | |||
98 | static void handle_tx(struct vhost_net *net) | 98 | static void handle_tx(struct vhost_net *net) |
99 | { | 99 | { |
100 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; | 100 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; |
101 | unsigned head, out, in, s; | 101 | unsigned out, in, s; |
102 | int head; | ||
102 | struct msghdr msg = { | 103 | struct msghdr msg = { |
103 | .msg_name = NULL, | 104 | .msg_name = NULL, |
104 | .msg_namelen = 0, | 105 | .msg_namelen = 0, |
@@ -135,6 +136,9 @@ static void handle_tx(struct vhost_net *net) | |||
135 | ARRAY_SIZE(vq->iov), | 136 | ARRAY_SIZE(vq->iov), |
136 | &out, &in, | 137 | &out, &in, |
137 | NULL, NULL); | 138 | NULL, NULL); |
139 | /* On error, stop handling until the next kick. */ | ||
140 | if (unlikely(head < 0)) | ||
141 | break; | ||
138 | /* Nothing new? Wait for eventfd to tell us they refilled. */ | 142 | /* Nothing new? Wait for eventfd to tell us they refilled. */ |
139 | if (head == vq->num) { | 143 | if (head == vq->num) { |
140 | wmem = atomic_read(&sock->sk->sk_wmem_alloc); | 144 | wmem = atomic_read(&sock->sk->sk_wmem_alloc); |
@@ -192,7 +196,8 @@ static void handle_tx(struct vhost_net *net) | |||
192 | static void handle_rx(struct vhost_net *net) | 196 | static void handle_rx(struct vhost_net *net) |
193 | { | 197 | { |
194 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; | 198 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; |
195 | unsigned head, out, in, log, s; | 199 | unsigned out, in, log, s; |
200 | int head; | ||
196 | struct vhost_log *vq_log; | 201 | struct vhost_log *vq_log; |
197 | struct msghdr msg = { | 202 | struct msghdr msg = { |
198 | .msg_name = NULL, | 203 | .msg_name = NULL, |
@@ -228,6 +233,9 @@ static void handle_rx(struct vhost_net *net) | |||
228 | ARRAY_SIZE(vq->iov), | 233 | ARRAY_SIZE(vq->iov), |
229 | &out, &in, | 234 | &out, &in, |
230 | vq_log, &log); | 235 | vq_log, &log); |
236 | /* On error, stop handling until the next kick. */ | ||
237 | if (unlikely(head < 0)) | ||
238 | break; | ||
231 | /* OK, now we need to know about added descriptors. */ | 239 | /* OK, now we need to know about added descriptors. */ |
232 | if (head == vq->num) { | 240 | if (head == vq->num) { |
233 | if (unlikely(vhost_enable_notify(vq))) { | 241 | if (unlikely(vhost_enable_notify(vq))) { |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 04344b711c56..248ed2db0711 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -736,12 +736,12 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, | |||
736 | mem = rcu_dereference(dev->memory); | 736 | mem = rcu_dereference(dev->memory); |
737 | while ((u64)len > s) { | 737 | while ((u64)len > s) { |
738 | u64 size; | 738 | u64 size; |
739 | if (ret >= iov_size) { | 739 | if (unlikely(ret >= iov_size)) { |
740 | ret = -ENOBUFS; | 740 | ret = -ENOBUFS; |
741 | break; | 741 | break; |
742 | } | 742 | } |
743 | reg = find_region(mem, addr, len); | 743 | reg = find_region(mem, addr, len); |
744 | if (!reg) { | 744 | if (unlikely(!reg)) { |
745 | ret = -EFAULT; | 745 | ret = -EFAULT; |
746 | break; | 746 | break; |
747 | } | 747 | } |
@@ -780,18 +780,18 @@ static unsigned next_desc(struct vring_desc *desc) | |||
780 | return next; | 780 | return next; |
781 | } | 781 | } |
782 | 782 | ||
783 | static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, | 783 | static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, |
784 | struct iovec iov[], unsigned int iov_size, | 784 | struct iovec iov[], unsigned int iov_size, |
785 | unsigned int *out_num, unsigned int *in_num, | 785 | unsigned int *out_num, unsigned int *in_num, |
786 | struct vhost_log *log, unsigned int *log_num, | 786 | struct vhost_log *log, unsigned int *log_num, |
787 | struct vring_desc *indirect) | 787 | struct vring_desc *indirect) |
788 | { | 788 | { |
789 | struct vring_desc desc; | 789 | struct vring_desc desc; |
790 | unsigned int i = 0, count, found = 0; | 790 | unsigned int i = 0, count, found = 0; |
791 | int ret; | 791 | int ret; |
792 | 792 | ||
793 | /* Sanity check */ | 793 | /* Sanity check */ |
794 | if (indirect->len % sizeof desc) { | 794 | if (unlikely(indirect->len % sizeof desc)) { |
795 | vq_err(vq, "Invalid length in indirect descriptor: " | 795 | vq_err(vq, "Invalid length in indirect descriptor: " |
796 | "len 0x%llx not multiple of 0x%zx\n", | 796 | "len 0x%llx not multiple of 0x%zx\n", |
797 | (unsigned long long)indirect->len, | 797 | (unsigned long long)indirect->len, |
@@ -801,7 +801,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
801 | 801 | ||
802 | ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, | 802 | ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, |
803 | ARRAY_SIZE(vq->indirect)); | 803 | ARRAY_SIZE(vq->indirect)); |
804 | if (ret < 0) { | 804 | if (unlikely(ret < 0)) { |
805 | vq_err(vq, "Translation failure %d in indirect.\n", ret); | 805 | vq_err(vq, "Translation failure %d in indirect.\n", ret); |
806 | return ret; | 806 | return ret; |
807 | } | 807 | } |
@@ -813,7 +813,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
813 | count = indirect->len / sizeof desc; | 813 | count = indirect->len / sizeof desc; |
814 | /* Buffers are chained via a 16 bit next field, so | 814 | /* Buffers are chained via a 16 bit next field, so |
815 | * we can have at most 2^16 of these. */ | 815 | * we can have at most 2^16 of these. */ |
816 | if (count > USHRT_MAX + 1) { | 816 | if (unlikely(count > USHRT_MAX + 1)) { |
817 | vq_err(vq, "Indirect buffer length too big: %d\n", | 817 | vq_err(vq, "Indirect buffer length too big: %d\n", |
818 | indirect->len); | 818 | indirect->len); |
819 | return -E2BIG; | 819 | return -E2BIG; |
@@ -821,19 +821,19 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
821 | 821 | ||
822 | do { | 822 | do { |
823 | unsigned iov_count = *in_num + *out_num; | 823 | unsigned iov_count = *in_num + *out_num; |
824 | if (++found > count) { | 824 | if (unlikely(++found > count)) { |
825 | vq_err(vq, "Loop detected: last one at %u " | 825 | vq_err(vq, "Loop detected: last one at %u " |
826 | "indirect size %u\n", | 826 | "indirect size %u\n", |
827 | i, count); | 827 | i, count); |
828 | return -EINVAL; | 828 | return -EINVAL; |
829 | } | 829 | } |
830 | if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect, | 830 | if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect, |
831 | sizeof desc)) { | 831 | sizeof desc))) { |
832 | vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", | 832 | vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", |
833 | i, (size_t)indirect->addr + i * sizeof desc); | 833 | i, (size_t)indirect->addr + i * sizeof desc); |
834 | return -EINVAL; | 834 | return -EINVAL; |
835 | } | 835 | } |
836 | if (desc.flags & VRING_DESC_F_INDIRECT) { | 836 | if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) { |
837 | vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", | 837 | vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", |
838 | i, (size_t)indirect->addr + i * sizeof desc); | 838 | i, (size_t)indirect->addr + i * sizeof desc); |
839 | return -EINVAL; | 839 | return -EINVAL; |
@@ -841,7 +841,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
841 | 841 | ||
842 | ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, | 842 | ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, |
843 | iov_size - iov_count); | 843 | iov_size - iov_count); |
844 | if (ret < 0) { | 844 | if (unlikely(ret < 0)) { |
845 | vq_err(vq, "Translation failure %d indirect idx %d\n", | 845 | vq_err(vq, "Translation failure %d indirect idx %d\n", |
846 | ret, i); | 846 | ret, i); |
847 | return ret; | 847 | return ret; |
@@ -857,7 +857,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
857 | } else { | 857 | } else { |
858 | /* If it's an output descriptor, they're all supposed | 858 | /* If it's an output descriptor, they're all supposed |
859 | * to come before any input descriptors. */ | 859 | * to come before any input descriptors. */ |
860 | if (*in_num) { | 860 | if (unlikely(*in_num)) { |
861 | vq_err(vq, "Indirect descriptor " | 861 | vq_err(vq, "Indirect descriptor " |
862 | "has out after in: idx %d\n", i); | 862 | "has out after in: idx %d\n", i); |
863 | return -EINVAL; | 863 | return -EINVAL; |
@@ -873,12 +873,13 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
873 | * number of output then some number of input descriptors, it's actually two | 873 | * number of output then some number of input descriptors, it's actually two |
874 | * iovecs, but we pack them into one and note how many of each there were. | 874 | * iovecs, but we pack them into one and note how many of each there were. |
875 | * | 875 | * |
876 | * This function returns the descriptor number found, or vq->num (which | 876 | * This function returns the descriptor number found, or vq->num (which is |
877 | * is never a valid descriptor number) if none was found. */ | 877 | * never a valid descriptor number) if none was found. A negative code is |
878 | unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, | 878 | * returned on error. */ |
879 | struct iovec iov[], unsigned int iov_size, | 879 | int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, |
880 | unsigned int *out_num, unsigned int *in_num, | 880 | struct iovec iov[], unsigned int iov_size, |
881 | struct vhost_log *log, unsigned int *log_num) | 881 | unsigned int *out_num, unsigned int *in_num, |
882 | struct vhost_log *log, unsigned int *log_num) | ||
882 | { | 883 | { |
883 | struct vring_desc desc; | 884 | struct vring_desc desc; |
884 | unsigned int i, head, found = 0; | 885 | unsigned int i, head, found = 0; |
@@ -887,16 +888,16 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
887 | 888 | ||
888 | /* Check it isn't doing very strange things with descriptor numbers. */ | 889 | /* Check it isn't doing very strange things with descriptor numbers. */ |
889 | last_avail_idx = vq->last_avail_idx; | 890 | last_avail_idx = vq->last_avail_idx; |
890 | if (get_user(vq->avail_idx, &vq->avail->idx)) { | 891 | if (unlikely(get_user(vq->avail_idx, &vq->avail->idx))) { |
891 | vq_err(vq, "Failed to access avail idx at %p\n", | 892 | vq_err(vq, "Failed to access avail idx at %p\n", |
892 | &vq->avail->idx); | 893 | &vq->avail->idx); |
893 | return vq->num; | 894 | return -EFAULT; |
894 | } | 895 | } |
895 | 896 | ||
896 | if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) { | 897 | if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { |
897 | vq_err(vq, "Guest moved used index from %u to %u", | 898 | vq_err(vq, "Guest moved used index from %u to %u", |
898 | last_avail_idx, vq->avail_idx); | 899 | last_avail_idx, vq->avail_idx); |
899 | return vq->num; | 900 | return -EFAULT; |
900 | } | 901 | } |
901 | 902 | ||
902 | /* If there's nothing new since last we looked, return invalid. */ | 903 | /* If there's nothing new since last we looked, return invalid. */ |
@@ -908,18 +909,19 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
908 | 909 | ||
909 | /* Grab the next descriptor number they're advertising, and increment | 910 | /* Grab the next descriptor number they're advertising, and increment |
910 | * the index we've seen. */ | 911 | * the index we've seen. */ |
911 | if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) { | 912 | if (unlikely(get_user(head, |
913 | &vq->avail->ring[last_avail_idx % vq->num]))) { | ||
912 | vq_err(vq, "Failed to read head: idx %d address %p\n", | 914 | vq_err(vq, "Failed to read head: idx %d address %p\n", |
913 | last_avail_idx, | 915 | last_avail_idx, |
914 | &vq->avail->ring[last_avail_idx % vq->num]); | 916 | &vq->avail->ring[last_avail_idx % vq->num]); |
915 | return vq->num; | 917 | return -EFAULT; |
916 | } | 918 | } |
917 | 919 | ||
918 | /* If their number is silly, that's an error. */ | 920 | /* If their number is silly, that's an error. */ |
919 | if (head >= vq->num) { | 921 | if (unlikely(head >= vq->num)) { |
920 | vq_err(vq, "Guest says index %u > %u is available", | 922 | vq_err(vq, "Guest says index %u > %u is available", |
921 | head, vq->num); | 923 | head, vq->num); |
922 | return vq->num; | 924 | return -EINVAL; |
923 | } | 925 | } |
924 | 926 | ||
925 | /* When we start there are none of either input nor output. */ | 927 | /* When we start there are none of either input nor output. */ |
@@ -930,41 +932,41 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
930 | i = head; | 932 | i = head; |
931 | do { | 933 | do { |
932 | unsigned iov_count = *in_num + *out_num; | 934 | unsigned iov_count = *in_num + *out_num; |
933 | if (i >= vq->num) { | 935 | if (unlikely(i >= vq->num)) { |
934 | vq_err(vq, "Desc index is %u > %u, head = %u", | 936 | vq_err(vq, "Desc index is %u > %u, head = %u", |
935 | i, vq->num, head); | 937 | i, vq->num, head); |
936 | return vq->num; | 938 | return -EINVAL; |
937 | } | 939 | } |
938 | if (++found > vq->num) { | 940 | if (unlikely(++found > vq->num)) { |
939 | vq_err(vq, "Loop detected: last one at %u " | 941 | vq_err(vq, "Loop detected: last one at %u " |
940 | "vq size %u head %u\n", | 942 | "vq size %u head %u\n", |
941 | i, vq->num, head); | 943 | i, vq->num, head); |
942 | return vq->num; | 944 | return -EINVAL; |
943 | } | 945 | } |
944 | ret = copy_from_user(&desc, vq->desc + i, sizeof desc); | 946 | ret = copy_from_user(&desc, vq->desc + i, sizeof desc); |
945 | if (ret) { | 947 | if (unlikely(ret)) { |
946 | vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", | 948 | vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", |
947 | i, vq->desc + i); | 949 | i, vq->desc + i); |
948 | return vq->num; | 950 | return -EFAULT; |
949 | } | 951 | } |
950 | if (desc.flags & VRING_DESC_F_INDIRECT) { | 952 | if (desc.flags & VRING_DESC_F_INDIRECT) { |
951 | ret = get_indirect(dev, vq, iov, iov_size, | 953 | ret = get_indirect(dev, vq, iov, iov_size, |
952 | out_num, in_num, | 954 | out_num, in_num, |
953 | log, log_num, &desc); | 955 | log, log_num, &desc); |
954 | if (ret < 0) { | 956 | if (unlikely(ret < 0)) { |
955 | vq_err(vq, "Failure detected " | 957 | vq_err(vq, "Failure detected " |
956 | "in indirect descriptor at idx %d\n", i); | 958 | "in indirect descriptor at idx %d\n", i); |
957 | return vq->num; | 959 | return ret; |
958 | } | 960 | } |
959 | continue; | 961 | continue; |
960 | } | 962 | } |
961 | 963 | ||
962 | ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, | 964 | ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, |
963 | iov_size - iov_count); | 965 | iov_size - iov_count); |
964 | if (ret < 0) { | 966 | if (unlikely(ret < 0)) { |
965 | vq_err(vq, "Translation failure %d descriptor idx %d\n", | 967 | vq_err(vq, "Translation failure %d descriptor idx %d\n", |
966 | ret, i); | 968 | ret, i); |
967 | return vq->num; | 969 | return ret; |
968 | } | 970 | } |
969 | if (desc.flags & VRING_DESC_F_WRITE) { | 971 | if (desc.flags & VRING_DESC_F_WRITE) { |
970 | /* If this is an input descriptor, | 972 | /* If this is an input descriptor, |
@@ -978,10 +980,10 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
978 | } else { | 980 | } else { |
979 | /* If it's an output descriptor, they're all supposed | 981 | /* If it's an output descriptor, they're all supposed |
980 | * to come before any input descriptors. */ | 982 | * to come before any input descriptors. */ |
981 | if (*in_num) { | 983 | if (unlikely(*in_num)) { |
982 | vq_err(vq, "Descriptor has out after in: " | 984 | vq_err(vq, "Descriptor has out after in: " |
983 | "idx %d\n", i); | 985 | "idx %d\n", i); |
984 | return vq->num; | 986 | return -EINVAL; |
985 | } | 987 | } |
986 | *out_num += ret; | 988 | *out_num += ret; |
987 | } | 989 | } |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 44591ba9b07a..11ee13dba0f7 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -120,10 +120,10 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg); | |||
120 | int vhost_vq_access_ok(struct vhost_virtqueue *vq); | 120 | int vhost_vq_access_ok(struct vhost_virtqueue *vq); |
121 | int vhost_log_access_ok(struct vhost_dev *); | 121 | int vhost_log_access_ok(struct vhost_dev *); |
122 | 122 | ||
123 | unsigned vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *, | 123 | int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *, |
124 | struct iovec iov[], unsigned int iov_count, | 124 | struct iovec iov[], unsigned int iov_count, |
125 | unsigned int *out_num, unsigned int *in_num, | 125 | unsigned int *out_num, unsigned int *in_num, |
126 | struct vhost_log *log, unsigned int *log_num); | 126 | struct vhost_log *log, unsigned int *log_num); |
127 | void vhost_discard_vq_desc(struct vhost_virtqueue *); | 127 | void vhost_discard_vq_desc(struct vhost_virtqueue *); |
128 | 128 | ||
129 | int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len); | 129 | int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len); |