aboutsummaryrefslogtreecommitdiffstats
path: root/net/caif/caif_socket.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/caif/caif_socket.c')
-rw-r--r--net/caif/caif_socket.c91
1 files changed, 35 insertions, 56 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index c3a70c5c893a..3d0e09584fae 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -60,7 +60,7 @@ struct debug_fs_counter {
60 atomic_t num_rx_flow_off; 60 atomic_t num_rx_flow_off;
61 atomic_t num_rx_flow_on; 61 atomic_t num_rx_flow_on;
62}; 62};
63struct debug_fs_counter cnt; 63static struct debug_fs_counter cnt;
64#define dbfs_atomic_inc(v) atomic_inc(v) 64#define dbfs_atomic_inc(v) atomic_inc(v)
65#define dbfs_atomic_dec(v) atomic_dec(v) 65#define dbfs_atomic_dec(v) atomic_dec(v)
66#else 66#else
@@ -128,17 +128,17 @@ static void caif_read_unlock(struct sock *sk)
128 mutex_unlock(&cf_sk->readlock); 128 mutex_unlock(&cf_sk->readlock);
129} 129}
130 130
131int sk_rcvbuf_lowwater(struct caifsock *cf_sk) 131static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
132{ 132{
133 /* A quarter of full buffer is used a low water mark */ 133 /* A quarter of full buffer is used a low water mark */
134 return cf_sk->sk.sk_rcvbuf / 4; 134 return cf_sk->sk.sk_rcvbuf / 4;
135} 135}
136 136
137void caif_flow_ctrl(struct sock *sk, int mode) 137static void caif_flow_ctrl(struct sock *sk, int mode)
138{ 138{
139 struct caifsock *cf_sk; 139 struct caifsock *cf_sk;
140 cf_sk = container_of(sk, struct caifsock, sk); 140 cf_sk = container_of(sk, struct caifsock, sk);
141 if (cf_sk->layer.dn) 141 if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
142 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); 142 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
143} 143}
144 144
@@ -146,7 +146,7 @@ void caif_flow_ctrl(struct sock *sk, int mode)
146 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 146 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
147 * not dropped, but CAIF is sending flow off instead. 147 * not dropped, but CAIF is sending flow off instead.
148 */ 148 */
149int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 149static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
150{ 150{
151 int err; 151 int err;
152 int skb_len; 152 int skb_len;
@@ -162,9 +162,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
162 atomic_read(&cf_sk->sk.sk_rmem_alloc), 162 atomic_read(&cf_sk->sk.sk_rmem_alloc),
163 sk_rcvbuf_lowwater(cf_sk)); 163 sk_rcvbuf_lowwater(cf_sk));
164 set_rx_flow_off(cf_sk); 164 set_rx_flow_off(cf_sk);
165 if (cf_sk->layer.dn) 165 dbfs_atomic_inc(&cnt.num_rx_flow_off);
166 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 166 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
167 CAIF_MODEMCMD_FLOW_OFF_REQ);
168 } 167 }
169 168
170 err = sk_filter(sk, skb); 169 err = sk_filter(sk, skb);
@@ -175,9 +174,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
175 trace_printk("CAIF: %s():" 174 trace_printk("CAIF: %s():"
176 " sending flow OFF due to rmem_schedule\n", 175 " sending flow OFF due to rmem_schedule\n",
177 __func__); 176 __func__);
178 if (cf_sk->layer.dn) 177 dbfs_atomic_inc(&cnt.num_rx_flow_off);
179 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 178 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
180 CAIF_MODEMCMD_FLOW_OFF_REQ);
181 } 179 }
182 skb->dev = NULL; 180 skb->dev = NULL;
183 skb_set_owner_r(skb, sk); 181 skb_set_owner_r(skb, sk);
@@ -285,65 +283,51 @@ static void caif_check_flow_release(struct sock *sk)
285{ 283{
286 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 284 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
287 285
288 if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL)
289 return;
290 if (rx_flow_is_on(cf_sk)) 286 if (rx_flow_is_on(cf_sk))
291 return; 287 return;
292 288
293 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { 289 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
294 dbfs_atomic_inc(&cnt.num_rx_flow_on); 290 dbfs_atomic_inc(&cnt.num_rx_flow_on);
295 set_rx_flow_on(cf_sk); 291 set_rx_flow_on(cf_sk);
296 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 292 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
297 CAIF_MODEMCMD_FLOW_ON_REQ);
298 } 293 }
299} 294}
295
300/* 296/*
301 * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer 297 * Copied from unix_dgram_recvmsg, but removed credit checks,
302 * has sufficient size. 298 * changed locking, address handling and added MSG_TRUNC.
303 */ 299 */
304
305static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, 300static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
306 struct msghdr *m, size_t buf_len, int flags) 301 struct msghdr *m, size_t len, int flags)
307 302
308{ 303{
309 struct sock *sk = sock->sk; 304 struct sock *sk = sock->sk;
310 struct sk_buff *skb; 305 struct sk_buff *skb;
311 int ret = 0; 306 int ret;
312 int len; 307 int copylen;
313 308
314 if (unlikely(!buf_len)) 309 ret = -EOPNOTSUPP;
315 return -EINVAL; 310 if (m->msg_flags&MSG_OOB)
311 goto read_error;
316 312
317 skb = skb_recv_datagram(sk, flags, 0 , &ret); 313 skb = skb_recv_datagram(sk, flags, 0 , &ret);
318 if (!skb) 314 if (!skb)
319 goto read_error; 315 goto read_error;
320 316 copylen = skb->len;
321 len = skb->len; 317 if (len < copylen) {
322 318 m->msg_flags |= MSG_TRUNC;
323 if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) { 319 copylen = len;
324 len = buf_len;
325 /*
326 * Push skb back on receive queue if buffer too small.
327 * This has a built-in race where multi-threaded receive
328 * may get packet in wrong order, but multiple read does
329 * not really guarantee ordered delivery anyway.
330 * Let's optimize for speed without taking locks.
331 */
332
333 skb_queue_head(&sk->sk_receive_queue, skb);
334 ret = -EMSGSIZE;
335 goto read_error;
336 } 320 }
337 321
338 ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); 322 ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
339 if (ret) 323 if (ret)
340 goto read_error; 324 goto out_free;
341 325
326 ret = (flags & MSG_TRUNC) ? skb->len : copylen;
327out_free:
342 skb_free_datagram(sk, skb); 328 skb_free_datagram(sk, skb);
343
344 caif_check_flow_release(sk); 329 caif_check_flow_release(sk);
345 330 return ret;
346 return len;
347 331
348read_error: 332read_error:
349 return ret; 333 return ret;
@@ -920,17 +904,17 @@ wait_connect:
920 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 904 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
921 905
922 release_sock(sk); 906 release_sock(sk);
923 err = wait_event_interruptible_timeout(*sk_sleep(sk), 907 err = -ERESTARTSYS;
908 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
924 sk->sk_state != CAIF_CONNECTING, 909 sk->sk_state != CAIF_CONNECTING,
925 timeo); 910 timeo);
926 lock_sock(sk); 911 lock_sock(sk);
927 if (err < 0) 912 if (timeo < 0)
928 goto out; /* -ERESTARTSYS */ 913 goto out; /* -ERESTARTSYS */
929 if (err == 0 && sk->sk_state != CAIF_CONNECTED) {
930 err = -ETIMEDOUT;
931 goto out;
932 }
933 914
915 err = -ETIMEDOUT;
916 if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
917 goto out;
934 if (sk->sk_state != CAIF_CONNECTED) { 918 if (sk->sk_state != CAIF_CONNECTED) {
935 sock->state = SS_UNCONNECTED; 919 sock->state = SS_UNCONNECTED;
936 err = sock_error(sk); 920 err = sock_error(sk);
@@ -945,7 +929,6 @@ out:
945 return err; 929 return err;
946} 930}
947 931
948
949/* 932/*
950 * caif_release() - Disconnect a CAIF Socket 933 * caif_release() - Disconnect a CAIF Socket
951 * Copied and modified af_irda.c:irda_release(). 934 * Copied and modified af_irda.c:irda_release().
@@ -1019,10 +1002,6 @@ static unsigned int caif_poll(struct file *file,
1019 (sk->sk_shutdown & RCV_SHUTDOWN)) 1002 (sk->sk_shutdown & RCV_SHUTDOWN))
1020 mask |= POLLIN | POLLRDNORM; 1003 mask |= POLLIN | POLLRDNORM;
1021 1004
1022 /* Connection-based need to check for termination and startup */
1023 if (sk->sk_state == CAIF_DISCONNECTED)
1024 mask |= POLLHUP;
1025
1026 /* 1005 /*
1027 * we set writable also when the other side has shut down the 1006 * we set writable also when the other side has shut down the
1028 * connection. This prevents stuck sockets. 1007 * connection. This prevents stuck sockets.
@@ -1194,7 +1173,7 @@ static struct net_proto_family caif_family_ops = {
1194 .owner = THIS_MODULE, 1173 .owner = THIS_MODULE,
1195}; 1174};
1196 1175
1197int af_caif_init(void) 1176static int af_caif_init(void)
1198{ 1177{
1199 int err = sock_register(&caif_family_ops); 1178 int err = sock_register(&caif_family_ops);
1200 if (!err) 1179 if (!err)