diff options
-rw-r--r-- | net/caif/caif_socket.c | 21 |
1 files changed, 6 insertions, 15 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 77e99568acda..732897d64f41 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -138,7 +138,7 @@ void caif_flow_ctrl(struct sock *sk, int mode) | |||
138 | { | 138 | { |
139 | struct caifsock *cf_sk; | 139 | struct caifsock *cf_sk; |
140 | cf_sk = container_of(sk, struct caifsock, sk); | 140 | cf_sk = container_of(sk, struct caifsock, sk); |
141 | if (cf_sk->layer.dn) | 141 | if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) |
142 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); | 142 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); |
143 | } | 143 | } |
144 | 144 | ||
@@ -162,9 +162,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
162 | atomic_read(&cf_sk->sk.sk_rmem_alloc), | 162 | atomic_read(&cf_sk->sk.sk_rmem_alloc), |
163 | sk_rcvbuf_lowwater(cf_sk)); | 163 | sk_rcvbuf_lowwater(cf_sk)); |
164 | set_rx_flow_off(cf_sk); | 164 | set_rx_flow_off(cf_sk); |
165 | if (cf_sk->layer.dn) | 165 | dbfs_atomic_inc(&cnt.num_rx_flow_off); |
166 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | 166 | caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); |
167 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
168 | } | 167 | } |
169 | 168 | ||
170 | err = sk_filter(sk, skb); | 169 | err = sk_filter(sk, skb); |
@@ -175,9 +174,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
175 | trace_printk("CAIF: %s():" | 174 | trace_printk("CAIF: %s():" |
176 | " sending flow OFF due to rmem_schedule\n", | 175 | " sending flow OFF due to rmem_schedule\n", |
177 | __func__); | 176 | __func__); |
178 | if (cf_sk->layer.dn) | 177 | dbfs_atomic_inc(&cnt.num_rx_flow_off); |
179 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | 178 | caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); |
180 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
181 | } | 179 | } |
182 | skb->dev = NULL; | 180 | skb->dev = NULL; |
183 | skb_set_owner_r(skb, sk); | 181 | skb_set_owner_r(skb, sk); |
@@ -285,16 +283,13 @@ static void caif_check_flow_release(struct sock *sk) | |||
285 | { | 283 | { |
286 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | 284 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
287 | 285 | ||
288 | if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL) | ||
289 | return; | ||
290 | if (rx_flow_is_on(cf_sk)) | 286 | if (rx_flow_is_on(cf_sk)) |
291 | return; | 287 | return; |
292 | 288 | ||
293 | if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { | 289 | if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { |
294 | dbfs_atomic_inc(&cnt.num_rx_flow_on); | 290 | dbfs_atomic_inc(&cnt.num_rx_flow_on); |
295 | set_rx_flow_on(cf_sk); | 291 | set_rx_flow_on(cf_sk); |
296 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | 292 | caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); |
297 | CAIF_MODEMCMD_FLOW_ON_REQ); | ||
298 | } | 293 | } |
299 | } | 294 | } |
300 | /* | 295 | /* |
@@ -1018,10 +1013,6 @@ static unsigned int caif_poll(struct file *file, | |||
1018 | (sk->sk_shutdown & RCV_SHUTDOWN)) | 1013 | (sk->sk_shutdown & RCV_SHUTDOWN)) |
1019 | mask |= POLLIN | POLLRDNORM; | 1014 | mask |= POLLIN | POLLRDNORM; |
1020 | 1015 | ||
1021 | /* Connection-based need to check for termination and startup */ | ||
1022 | if (sk->sk_state == CAIF_DISCONNECTED) | ||
1023 | mask |= POLLHUP; | ||
1024 | |||
1025 | /* | 1016 | /* |
1026 | * we set writable also when the other side has shut down the | 1017 | * we set writable also when the other side has shut down the |
1027 | * connection. This prevents stuck sockets. | 1018 | * connection. This prevents stuck sockets. |