diff options
author | Sjur Braendeland <sjur.brandeland@stericsson.com> | 2010-03-30 09:56:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-03-30 22:08:48 -0400 |
commit | e6f95ec8db312491235b4f06343fbd991a82ce20 (patch) | |
tree | 83bb84da9709b54eb9a7a6c8cf7e5345fe1678ba | |
parent | c72dfae2f77620e5b3fcee1beeee7e536a42b2ad (diff) |
net-caif: add CAIF socket implementation
Implementation of CAIF sockets for protocol and address family
PF_CAIF and AF_CAIF.
CAIF socket is connection oriented implementing SOCK_SEQPACKET
and SOCK_STREAM interface with supporting blocking and non-blocking mode.
Signed-off-by: Sjur Braendeland <sjur.brandeland@stericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/caif/caif_socket.c | 1391 |
1 files changed, 1391 insertions, 0 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c new file mode 100644 index 000000000000..cdf62b9fefac --- /dev/null +++ b/net/caif/caif_socket.c | |||
@@ -0,0 +1,1391 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland sjur.brandeland@stericsson.com | ||
4 | * Per Sigmond per.sigmond@stericsson.com | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | */ | ||
7 | |||
8 | #include <linux/fs.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/mutex.h> | ||
14 | #include <linux/list.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/poll.h> | ||
17 | #include <linux/tcp.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | #include <linux/caif/caif_socket.h> | ||
22 | #include <net/caif/caif_layer.h> | ||
23 | #include <net/caif/caif_dev.h> | ||
24 | #include <net/caif/cfpkt.h> | ||
25 | |||
26 | MODULE_LICENSE("GPL"); | ||
27 | |||
28 | #define CHNL_SKT_READ_QUEUE_HIGH 200 | ||
29 | #define CHNL_SKT_READ_QUEUE_LOW 100 | ||
30 | |||
31 | static int caif_sockbuf_size = 40000; | ||
32 | static atomic_t caif_nr_socks = ATOMIC_INIT(0); | ||
33 | |||
34 | #define CONN_STATE_OPEN_BIT 1 | ||
35 | #define CONN_STATE_PENDING_BIT 2 | ||
36 | #define CONN_STATE_PEND_DESTROY_BIT 3 | ||
37 | #define CONN_REMOTE_SHUTDOWN_BIT 4 | ||
38 | |||
39 | #define TX_FLOW_ON_BIT 1 | ||
40 | #define RX_FLOW_ON_BIT 2 | ||
41 | |||
42 | #define STATE_IS_OPEN(cf_sk) test_bit(CONN_STATE_OPEN_BIT,\ | ||
43 | (void *) &(cf_sk)->conn_state) | ||
44 | #define STATE_IS_REMOTE_SHUTDOWN(cf_sk) test_bit(CONN_REMOTE_SHUTDOWN_BIT,\ | ||
45 | (void *) &(cf_sk)->conn_state) | ||
46 | #define STATE_IS_PENDING(cf_sk) test_bit(CONN_STATE_PENDING_BIT,\ | ||
47 | (void *) &(cf_sk)->conn_state) | ||
48 | #define STATE_IS_PENDING_DESTROY(cf_sk) test_bit(CONN_STATE_PEND_DESTROY_BIT,\ | ||
49 | (void *) &(cf_sk)->conn_state) | ||
50 | |||
51 | #define SET_STATE_PENDING_DESTROY(cf_sk) set_bit(CONN_STATE_PEND_DESTROY_BIT,\ | ||
52 | (void *) &(cf_sk)->conn_state) | ||
53 | #define SET_STATE_OPEN(cf_sk) set_bit(CONN_STATE_OPEN_BIT,\ | ||
54 | (void *) &(cf_sk)->conn_state) | ||
55 | #define SET_STATE_CLOSED(cf_sk) clear_bit(CONN_STATE_OPEN_BIT,\ | ||
56 | (void *) &(cf_sk)->conn_state) | ||
57 | #define SET_PENDING_ON(cf_sk) set_bit(CONN_STATE_PENDING_BIT,\ | ||
58 | (void *) &(cf_sk)->conn_state) | ||
59 | #define SET_PENDING_OFF(cf_sk) clear_bit(CONN_STATE_PENDING_BIT,\ | ||
60 | (void *) &(cf_sk)->conn_state) | ||
61 | #define SET_REMOTE_SHUTDOWN(cf_sk) set_bit(CONN_REMOTE_SHUTDOWN_BIT,\ | ||
62 | (void *) &(cf_sk)->conn_state) | ||
63 | |||
64 | #define SET_REMOTE_SHUTDOWN_OFF(dev) clear_bit(CONN_REMOTE_SHUTDOWN_BIT,\ | ||
65 | (void *) &(dev)->conn_state) | ||
66 | #define RX_FLOW_IS_ON(cf_sk) test_bit(RX_FLOW_ON_BIT,\ | ||
67 | (void *) &(cf_sk)->flow_state) | ||
68 | #define TX_FLOW_IS_ON(cf_sk) test_bit(TX_FLOW_ON_BIT,\ | ||
69 | (void *) &(cf_sk)->flow_state) | ||
70 | |||
71 | #define SET_RX_FLOW_OFF(cf_sk) clear_bit(RX_FLOW_ON_BIT,\ | ||
72 | (void *) &(cf_sk)->flow_state) | ||
73 | #define SET_RX_FLOW_ON(cf_sk) set_bit(RX_FLOW_ON_BIT,\ | ||
74 | (void *) &(cf_sk)->flow_state) | ||
75 | #define SET_TX_FLOW_OFF(cf_sk) clear_bit(TX_FLOW_ON_BIT,\ | ||
76 | (void *) &(cf_sk)->flow_state) | ||
77 | #define SET_TX_FLOW_ON(cf_sk) set_bit(TX_FLOW_ON_BIT,\ | ||
78 | (void *) &(cf_sk)->flow_state) | ||
79 | |||
80 | #define SKT_READ_FLAG 0x01 | ||
81 | #define SKT_WRITE_FLAG 0x02 | ||
82 | static struct dentry *debugfsdir; | ||
83 | #include <linux/debugfs.h> | ||
84 | |||
85 | #ifdef CONFIG_DEBUG_FS | ||
86 | struct debug_fs_counter { | ||
87 | atomic_t num_open; | ||
88 | atomic_t num_close; | ||
89 | atomic_t num_init; | ||
90 | atomic_t num_init_resp; | ||
91 | atomic_t num_init_fail_resp; | ||
92 | atomic_t num_deinit; | ||
93 | atomic_t num_deinit_resp; | ||
94 | atomic_t num_remote_shutdown_ind; | ||
95 | atomic_t num_tx_flow_off_ind; | ||
96 | atomic_t num_tx_flow_on_ind; | ||
97 | atomic_t num_rx_flow_off; | ||
98 | atomic_t num_rx_flow_on; | ||
99 | atomic_t skb_in_use; | ||
100 | atomic_t skb_alloc; | ||
101 | atomic_t skb_free; | ||
102 | }; | ||
103 | static struct debug_fs_counter cnt; | ||
104 | #define dbfs_atomic_inc(v) atomic_inc(v) | ||
105 | #define dbfs_atomic_dec(v) atomic_dec(v) | ||
106 | #else | ||
107 | #define dbfs_atomic_inc(v) | ||
108 | #define dbfs_atomic_dec(v) | ||
109 | #endif | ||
110 | |||
111 | /* The AF_CAIF socket */ | ||
112 | struct caifsock { | ||
113 | /* NOTE: sk has to be the first member */ | ||
114 | struct sock sk; | ||
115 | struct cflayer layer; | ||
116 | char name[CAIF_LAYER_NAME_SZ]; | ||
117 | u32 conn_state; | ||
118 | u32 flow_state; | ||
119 | struct cfpktq *pktq; | ||
120 | int file_mode; | ||
121 | struct caif_connect_request conn_req; | ||
122 | int read_queue_len; | ||
123 | /* protect updates of read_queue_len */ | ||
124 | spinlock_t read_queue_len_lock; | ||
125 | struct dentry *debugfs_socket_dir; | ||
126 | }; | ||
127 | |||
128 | static void drain_queue(struct caifsock *cf_sk); | ||
129 | |||
130 | /* Packet Receive Callback function called from CAIF Stack */ | ||
131 | static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt) | ||
132 | { | ||
133 | struct caifsock *cf_sk; | ||
134 | int read_queue_high; | ||
135 | cf_sk = container_of(layr, struct caifsock, layer); | ||
136 | |||
137 | if (!STATE_IS_OPEN(cf_sk)) { | ||
138 | /*FIXME: This should be allowed finally!*/ | ||
139 | pr_debug("CAIF: %s(): called after close request\n", __func__); | ||
140 | cfpkt_destroy(pkt); | ||
141 | return 0; | ||
142 | } | ||
143 | /* NOTE: This function may be called in Tasklet context! */ | ||
144 | |||
145 | /* The queue has its own lock */ | ||
146 | cfpkt_queue(cf_sk->pktq, pkt, 0); | ||
147 | |||
148 | spin_lock(&cf_sk->read_queue_len_lock); | ||
149 | cf_sk->read_queue_len++; | ||
150 | |||
151 | read_queue_high = (cf_sk->read_queue_len > CHNL_SKT_READ_QUEUE_HIGH); | ||
152 | spin_unlock(&cf_sk->read_queue_len_lock); | ||
153 | |||
154 | if (RX_FLOW_IS_ON(cf_sk) && read_queue_high) { | ||
155 | dbfs_atomic_inc(&cnt.num_rx_flow_off); | ||
156 | SET_RX_FLOW_OFF(cf_sk); | ||
157 | |||
158 | /* Send flow off (NOTE: must not sleep) */ | ||
159 | pr_debug("CAIF: %s():" | ||
160 | " sending flow OFF (queue len = %d)\n", | ||
161 | __func__, | ||
162 | cf_sk->read_queue_len); | ||
163 | caif_assert(cf_sk->layer.dn); | ||
164 | caif_assert(cf_sk->layer.dn->ctrlcmd); | ||
165 | |||
166 | (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | ||
167 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
168 | } | ||
169 | |||
170 | /* Signal reader that data is available. */ | ||
171 | |||
172 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* Packet Flow Control Callback function called from CAIF */ | ||
178 | static void caif_sktflowctrl_cb(struct cflayer *layr, | ||
179 | enum caif_ctrlcmd flow, | ||
180 | int phyid) | ||
181 | { | ||
182 | struct caifsock *cf_sk; | ||
183 | |||
184 | /* NOTE: This function may be called in Tasklet context! */ | ||
185 | pr_debug("CAIF: %s(): flowctrl func called: %s.\n", | ||
186 | __func__, | ||
187 | flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : | ||
188 | flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" : | ||
189 | flow == CAIF_CTRLCMD_INIT_RSP ? "INIT_RSP" : | ||
190 | flow == CAIF_CTRLCMD_DEINIT_RSP ? "DEINIT_RSP" : | ||
191 | flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "INIT_FAIL_RSP" : | ||
192 | flow == | ||
193 | CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? "REMOTE_SHUTDOWN" : | ||
194 | "UKNOWN CTRL COMMAND"); | ||
195 | |||
196 | if (layr == NULL) | ||
197 | return; | ||
198 | |||
199 | cf_sk = container_of(layr, struct caifsock, layer); | ||
200 | |||
201 | switch (flow) { | ||
202 | case CAIF_CTRLCMD_FLOW_ON_IND: | ||
203 | dbfs_atomic_inc(&cnt.num_tx_flow_on_ind); | ||
204 | /* Signal reader that data is available. */ | ||
205 | SET_TX_FLOW_ON(cf_sk); | ||
206 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
207 | break; | ||
208 | |||
209 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
210 | dbfs_atomic_inc(&cnt.num_tx_flow_off_ind); | ||
211 | SET_TX_FLOW_OFF(cf_sk); | ||
212 | break; | ||
213 | |||
214 | case CAIF_CTRLCMD_INIT_RSP: | ||
215 | dbfs_atomic_inc(&cnt.num_init_resp); | ||
216 | /* Signal reader that data is available. */ | ||
217 | caif_assert(STATE_IS_OPEN(cf_sk)); | ||
218 | SET_PENDING_OFF(cf_sk); | ||
219 | SET_TX_FLOW_ON(cf_sk); | ||
220 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
221 | break; | ||
222 | |||
223 | case CAIF_CTRLCMD_DEINIT_RSP: | ||
224 | dbfs_atomic_inc(&cnt.num_deinit_resp); | ||
225 | caif_assert(!STATE_IS_OPEN(cf_sk)); | ||
226 | SET_PENDING_OFF(cf_sk); | ||
227 | if (!STATE_IS_PENDING_DESTROY(cf_sk)) { | ||
228 | if (cf_sk->sk.sk_sleep != NULL) | ||
229 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
230 | } | ||
231 | dbfs_atomic_inc(&cnt.num_deinit); | ||
232 | sock_put(&cf_sk->sk); | ||
233 | break; | ||
234 | |||
235 | case CAIF_CTRLCMD_INIT_FAIL_RSP: | ||
236 | dbfs_atomic_inc(&cnt.num_init_fail_resp); | ||
237 | caif_assert(STATE_IS_OPEN(cf_sk)); | ||
238 | SET_STATE_CLOSED(cf_sk); | ||
239 | SET_PENDING_OFF(cf_sk); | ||
240 | SET_TX_FLOW_OFF(cf_sk); | ||
241 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
242 | break; | ||
243 | |||
244 | case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: | ||
245 | dbfs_atomic_inc(&cnt.num_remote_shutdown_ind); | ||
246 | SET_REMOTE_SHUTDOWN(cf_sk); | ||
247 | /* Use sk_shutdown to indicate remote shutdown indication */ | ||
248 | cf_sk->sk.sk_shutdown |= RCV_SHUTDOWN; | ||
249 | cf_sk->file_mode = 0; | ||
250 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
251 | break; | ||
252 | |||
253 | default: | ||
254 | pr_debug("CAIF: %s(): Unexpected flow command %d\n", | ||
255 | __func__, flow); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | static void skb_destructor(struct sk_buff *skb) | ||
260 | { | ||
261 | dbfs_atomic_inc(&cnt.skb_free); | ||
262 | dbfs_atomic_dec(&cnt.skb_in_use); | ||
263 | } | ||
264 | |||
265 | |||
266 | static int caif_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
267 | struct msghdr *m, size_t buf_len, int flags) | ||
268 | |||
269 | { | ||
270 | struct sock *sk = sock->sk; | ||
271 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
272 | struct cfpkt *pkt = NULL; | ||
273 | size_t len; | ||
274 | int result; | ||
275 | struct sk_buff *skb; | ||
276 | ssize_t ret = -EIO; | ||
277 | int read_queue_low; | ||
278 | |||
279 | if (cf_sk == NULL) { | ||
280 | pr_debug("CAIF: %s(): private_data not set!\n", | ||
281 | __func__); | ||
282 | ret = -EBADFD; | ||
283 | goto read_error; | ||
284 | } | ||
285 | |||
286 | /* Don't do multiple iovec entries yet */ | ||
287 | if (m->msg_iovlen != 1) | ||
288 | return -EOPNOTSUPP; | ||
289 | |||
290 | if (unlikely(!buf_len)) | ||
291 | return -EINVAL; | ||
292 | |||
293 | lock_sock(&(cf_sk->sk)); | ||
294 | |||
295 | caif_assert(cf_sk->pktq); | ||
296 | |||
297 | if (!STATE_IS_OPEN(cf_sk)) { | ||
298 | /* Socket is closed or closing. */ | ||
299 | if (!STATE_IS_PENDING(cf_sk)) { | ||
300 | pr_debug("CAIF: %s(): socket is closed (by remote)\n", | ||
301 | __func__); | ||
302 | ret = -EPIPE; | ||
303 | } else { | ||
304 | pr_debug("CAIF: %s(): socket is closing..\n", __func__); | ||
305 | ret = -EBADF; | ||
306 | } | ||
307 | goto read_error; | ||
308 | } | ||
309 | /* Socket is open or opening. */ | ||
310 | if (STATE_IS_PENDING(cf_sk)) { | ||
311 | pr_debug("CAIF: %s(): socket is opening...\n", __func__); | ||
312 | |||
313 | if (flags & MSG_DONTWAIT) { | ||
314 | /* We can't block. */ | ||
315 | pr_debug("CAIF: %s():state pending and MSG_DONTWAIT\n", | ||
316 | __func__); | ||
317 | ret = -EAGAIN; | ||
318 | goto read_error; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Blocking mode; state is pending and we need to wait | ||
323 | * for its conclusion. | ||
324 | */ | ||
325 | release_sock(&cf_sk->sk); | ||
326 | |||
327 | result = | ||
328 | wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
329 | !STATE_IS_PENDING(cf_sk)); | ||
330 | |||
331 | lock_sock(&(cf_sk->sk)); | ||
332 | |||
333 | if (result == -ERESTARTSYS) { | ||
334 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
335 | " woken by a signal (1)", __func__); | ||
336 | ret = -ERESTARTSYS; | ||
337 | goto read_error; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) || | ||
342 | !STATE_IS_OPEN(cf_sk) || | ||
343 | STATE_IS_PENDING(cf_sk)) { | ||
344 | |||
345 | pr_debug("CAIF: %s(): socket closed\n", | ||
346 | __func__); | ||
347 | ret = -ESHUTDOWN; | ||
348 | goto read_error; | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Block if we don't have any received buffers. | ||
353 | * The queue has its own lock. | ||
354 | */ | ||
355 | while ((pkt = cfpkt_qpeek(cf_sk->pktq)) == NULL) { | ||
356 | |||
357 | if (flags & MSG_DONTWAIT) { | ||
358 | pr_debug("CAIF: %s(): MSG_DONTWAIT\n", __func__); | ||
359 | ret = -EAGAIN; | ||
360 | goto read_error; | ||
361 | } | ||
362 | trace_printk("CAIF: %s() wait_event\n", __func__); | ||
363 | |||
364 | /* Let writers in. */ | ||
365 | release_sock(&cf_sk->sk); | ||
366 | |||
367 | /* Block reader until data arrives or socket is closed. */ | ||
368 | if (wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
369 | cfpkt_qpeek(cf_sk->pktq) | ||
370 | || STATE_IS_REMOTE_SHUTDOWN(cf_sk) | ||
371 | || !STATE_IS_OPEN(cf_sk)) == | ||
372 | -ERESTARTSYS) { | ||
373 | pr_debug("CAIF: %s():" | ||
374 | " wait_event_interruptible woken by " | ||
375 | "a signal, signal_pending(current) = %d\n", | ||
376 | __func__, | ||
377 | signal_pending(current)); | ||
378 | return -ERESTARTSYS; | ||
379 | } | ||
380 | |||
381 | trace_printk("CAIF: %s() awake\n", __func__); | ||
382 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) { | ||
383 | pr_debug("CAIF: %s(): " | ||
384 | "received remote_shutdown indication\n", | ||
385 | __func__); | ||
386 | ret = -ESHUTDOWN; | ||
387 | goto read_error_no_unlock; | ||
388 | } | ||
389 | |||
390 | /* I want to be alone on cf_sk (except status and queue). */ | ||
391 | lock_sock(&(cf_sk->sk)); | ||
392 | |||
393 | if (!STATE_IS_OPEN(cf_sk)) { | ||
394 | /* Someone closed the link, report error. */ | ||
395 | pr_debug("CAIF: %s(): remote end shutdown!\n", | ||
396 | __func__); | ||
397 | ret = -EPIPE; | ||
398 | goto read_error; | ||
399 | } | ||
400 | } | ||
401 | |||
402 | /* The queue has its own lock. */ | ||
403 | len = cfpkt_getlen(pkt); | ||
404 | |||
405 | /* Check max length that can be copied. */ | ||
406 | if (len <= buf_len) | ||
407 | pkt = cfpkt_dequeue(cf_sk->pktq); | ||
408 | else { | ||
409 | pr_debug("CAIF: %s(): user buffer too small (%ld,%ld)\n", | ||
410 | __func__, (long) len, (long) buf_len); | ||
411 | if (sock->type == SOCK_SEQPACKET) { | ||
412 | ret = -EMSGSIZE; | ||
413 | goto read_error; | ||
414 | } | ||
415 | len = buf_len; | ||
416 | } | ||
417 | |||
418 | |||
419 | spin_lock(&cf_sk->read_queue_len_lock); | ||
420 | cf_sk->read_queue_len--; | ||
421 | read_queue_low = (cf_sk->read_queue_len < CHNL_SKT_READ_QUEUE_LOW); | ||
422 | spin_unlock(&cf_sk->read_queue_len_lock); | ||
423 | |||
424 | if (!RX_FLOW_IS_ON(cf_sk) && read_queue_low) { | ||
425 | dbfs_atomic_inc(&cnt.num_rx_flow_on); | ||
426 | SET_RX_FLOW_ON(cf_sk); | ||
427 | |||
428 | /* Send flow on. */ | ||
429 | pr_debug("CAIF: %s(): sending flow ON (queue len = %d)\n", | ||
430 | __func__, cf_sk->read_queue_len); | ||
431 | caif_assert(cf_sk->layer.dn); | ||
432 | caif_assert(cf_sk->layer.dn->ctrlcmd); | ||
433 | (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | ||
434 | CAIF_MODEMCMD_FLOW_ON_REQ); | ||
435 | |||
436 | caif_assert(cf_sk->read_queue_len >= 0); | ||
437 | } | ||
438 | |||
439 | skb = cfpkt_tonative(pkt); | ||
440 | result = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); | ||
441 | skb_pull(skb, len); | ||
442 | |||
443 | if (result) { | ||
444 | pr_debug("CAIF: %s(): copy to_iovec failed\n", __func__); | ||
445 | cfpkt_destroy(pkt); | ||
446 | ret = -EFAULT; | ||
447 | goto read_error; | ||
448 | } | ||
449 | |||
450 | /* Free packet and remove from queue */ | ||
451 | if (skb->len == 0) | ||
452 | skb_free_datagram(sk, skb); | ||
453 | |||
454 | /* Let the others in. */ | ||
455 | release_sock(&cf_sk->sk); | ||
456 | return len; | ||
457 | |||
458 | read_error: | ||
459 | release_sock(&cf_sk->sk); | ||
460 | read_error_no_unlock: | ||
461 | return ret; | ||
462 | } | ||
463 | |||
464 | /* Send a signal as a consequence of sendmsg, sendto or caif_sendmsg. */ | ||
465 | static int caif_sendmsg(struct kiocb *kiocb, struct socket *sock, | ||
466 | struct msghdr *msg, size_t len) | ||
467 | { | ||
468 | |||
469 | struct sock *sk = sock->sk; | ||
470 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
471 | size_t payload_size = msg->msg_iov->iov_len; | ||
472 | struct cfpkt *pkt = NULL; | ||
473 | struct caif_payload_info info; | ||
474 | unsigned char *txbuf; | ||
475 | ssize_t ret = -EIO; | ||
476 | int result; | ||
477 | struct sk_buff *skb; | ||
478 | caif_assert(msg->msg_iovlen == 1); | ||
479 | |||
480 | if (cf_sk == NULL) { | ||
481 | pr_debug("CAIF: %s(): private_data not set!\n", | ||
482 | __func__); | ||
483 | ret = -EBADFD; | ||
484 | goto write_error_no_unlock; | ||
485 | } | ||
486 | |||
487 | if (unlikely(msg->msg_iov->iov_base == NULL)) { | ||
488 | pr_warning("CAIF: %s(): Buffer is NULL.\n", __func__); | ||
489 | ret = -EINVAL; | ||
490 | goto write_error_no_unlock; | ||
491 | } | ||
492 | |||
493 | if (payload_size > CAIF_MAX_PAYLOAD_SIZE) { | ||
494 | pr_debug("CAIF: %s(): buffer too long\n", __func__); | ||
495 | if (sock->type == SOCK_SEQPACKET) { | ||
496 | ret = -EINVAL; | ||
497 | goto write_error_no_unlock; | ||
498 | } | ||
499 | payload_size = CAIF_MAX_PAYLOAD_SIZE; | ||
500 | } | ||
501 | |||
502 | /* I want to be alone on cf_sk (except status and queue) */ | ||
503 | lock_sock(&(cf_sk->sk)); | ||
504 | |||
505 | caif_assert(cf_sk->pktq); | ||
506 | |||
507 | if (!STATE_IS_OPEN(cf_sk)) { | ||
508 | /* Socket is closed or closing */ | ||
509 | if (!STATE_IS_PENDING(cf_sk)) { | ||
510 | pr_debug("CAIF: %s(): socket is closed (by remote)\n", | ||
511 | __func__); | ||
512 | ret = -EPIPE; | ||
513 | } else { | ||
514 | pr_debug("CAIF: %s(): socket is closing...\n", | ||
515 | __func__); | ||
516 | ret = -EBADF; | ||
517 | } | ||
518 | goto write_error; | ||
519 | } | ||
520 | |||
521 | /* Socket is open or opening */ | ||
522 | if (STATE_IS_PENDING(cf_sk)) { | ||
523 | pr_debug("CAIF: %s(): socket is opening...\n", __func__); | ||
524 | |||
525 | if (msg->msg_flags & MSG_DONTWAIT) { | ||
526 | /* We can't block */ | ||
527 | trace_printk("CAIF: %s():state pending:" | ||
528 | "state=MSG_DONTWAIT\n", __func__); | ||
529 | ret = -EAGAIN; | ||
530 | goto write_error; | ||
531 | } | ||
532 | /* Let readers in */ | ||
533 | release_sock(&cf_sk->sk); | ||
534 | |||
535 | /* | ||
536 | * Blocking mode; state is pending and we need to wait | ||
537 | * for its conclusion. | ||
538 | */ | ||
539 | result = | ||
540 | wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
541 | !STATE_IS_PENDING(cf_sk)); | ||
542 | /* I want to be alone on cf_sk (except status and queue) */ | ||
543 | lock_sock(&(cf_sk->sk)); | ||
544 | |||
545 | if (result == -ERESTARTSYS) { | ||
546 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
547 | " woken by a signal (1)", __func__); | ||
548 | ret = -ERESTARTSYS; | ||
549 | goto write_error; | ||
550 | } | ||
551 | } | ||
552 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) || | ||
553 | !STATE_IS_OPEN(cf_sk) || | ||
554 | STATE_IS_PENDING(cf_sk)) { | ||
555 | |||
556 | pr_debug("CAIF: %s(): socket closed\n", | ||
557 | __func__); | ||
558 | ret = -ESHUTDOWN; | ||
559 | goto write_error; | ||
560 | } | ||
561 | |||
562 | if (!TX_FLOW_IS_ON(cf_sk)) { | ||
563 | |||
564 | /* Flow is off. Check non-block flag */ | ||
565 | if (msg->msg_flags & MSG_DONTWAIT) { | ||
566 | trace_printk("CAIF: %s(): MSG_DONTWAIT and tx flow off", | ||
567 | __func__); | ||
568 | ret = -EAGAIN; | ||
569 | goto write_error; | ||
570 | } | ||
571 | |||
572 | /* release lock before waiting */ | ||
573 | release_sock(&cf_sk->sk); | ||
574 | |||
575 | /* Wait until flow is on or socket is closed */ | ||
576 | if (wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
577 | TX_FLOW_IS_ON(cf_sk) | ||
578 | || !STATE_IS_OPEN(cf_sk) | ||
579 | || STATE_IS_REMOTE_SHUTDOWN(cf_sk) | ||
580 | ) == -ERESTARTSYS) { | ||
581 | pr_debug("CAIF: %s():" | ||
582 | " wait_event_interruptible woken by a signal", | ||
583 | __func__); | ||
584 | ret = -ERESTARTSYS; | ||
585 | goto write_error_no_unlock; | ||
586 | } | ||
587 | |||
588 | /* I want to be alone on cf_sk (except status and queue) */ | ||
589 | lock_sock(&(cf_sk->sk)); | ||
590 | |||
591 | if (!STATE_IS_OPEN(cf_sk)) { | ||
592 | /* someone closed the link, report error */ | ||
593 | pr_debug("CAIF: %s(): remote end shutdown!\n", | ||
594 | __func__); | ||
595 | ret = -EPIPE; | ||
596 | goto write_error; | ||
597 | } | ||
598 | |||
599 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) { | ||
600 | pr_debug("CAIF: %s(): " | ||
601 | "received remote_shutdown indication\n", | ||
602 | __func__); | ||
603 | ret = -ESHUTDOWN; | ||
604 | goto write_error; | ||
605 | } | ||
606 | } | ||
607 | |||
608 | pkt = cfpkt_create(payload_size); | ||
609 | skb = (struct sk_buff *)pkt; | ||
610 | skb->destructor = skb_destructor; | ||
611 | skb->sk = sk; | ||
612 | dbfs_atomic_inc(&cnt.skb_alloc); | ||
613 | dbfs_atomic_inc(&cnt.skb_in_use); | ||
614 | if (cfpkt_raw_append(pkt, (void **) &txbuf, payload_size) < 0) { | ||
615 | pr_debug("CAIF: %s(): cfpkt_raw_append failed\n", __func__); | ||
616 | cfpkt_destroy(pkt); | ||
617 | ret = -EINVAL; | ||
618 | goto write_error; | ||
619 | } | ||
620 | |||
621 | /* Copy data into buffer. */ | ||
622 | if (copy_from_user(txbuf, msg->msg_iov->iov_base, payload_size)) { | ||
623 | pr_debug("CAIF: %s(): copy_from_user returned non zero.\n", | ||
624 | __func__); | ||
625 | cfpkt_destroy(pkt); | ||
626 | ret = -EINVAL; | ||
627 | goto write_error; | ||
628 | } | ||
629 | memset(&info, 0, sizeof(info)); | ||
630 | |||
631 | /* Send the packet down the stack. */ | ||
632 | caif_assert(cf_sk->layer.dn); | ||
633 | caif_assert(cf_sk->layer.dn->transmit); | ||
634 | |||
635 | do { | ||
636 | ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); | ||
637 | |||
638 | if (likely((ret >= 0) || (ret != -EAGAIN))) | ||
639 | break; | ||
640 | |||
641 | /* EAGAIN - retry */ | ||
642 | if (msg->msg_flags & MSG_DONTWAIT) { | ||
643 | pr_debug("CAIF: %s(): NONBLOCK and transmit failed," | ||
644 | " error = %ld\n", __func__, (long) ret); | ||
645 | ret = -EAGAIN; | ||
646 | goto write_error; | ||
647 | } | ||
648 | |||
649 | /* Let readers in */ | ||
650 | release_sock(&cf_sk->sk); | ||
651 | |||
652 | /* Wait until flow is on or socket is closed */ | ||
653 | if (wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
654 | TX_FLOW_IS_ON(cf_sk) | ||
655 | || !STATE_IS_OPEN(cf_sk) | ||
656 | || STATE_IS_REMOTE_SHUTDOWN(cf_sk) | ||
657 | ) == -ERESTARTSYS) { | ||
658 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
659 | " woken by a signal", __func__); | ||
660 | ret = -ERESTARTSYS; | ||
661 | goto write_error_no_unlock; | ||
662 | } | ||
663 | |||
664 | /* I want to be alone on cf_sk (except status and queue) */ | ||
665 | lock_sock(&(cf_sk->sk)); | ||
666 | |||
667 | } while (ret == -EAGAIN); | ||
668 | |||
669 | if (ret < 0) { | ||
670 | cfpkt_destroy(pkt); | ||
671 | pr_debug("CAIF: %s(): transmit failed, error = %ld\n", | ||
672 | __func__, (long) ret); | ||
673 | |||
674 | goto write_error; | ||
675 | } | ||
676 | |||
677 | release_sock(&cf_sk->sk); | ||
678 | return payload_size; | ||
679 | |||
680 | write_error: | ||
681 | release_sock(&cf_sk->sk); | ||
682 | write_error_no_unlock: | ||
683 | return ret; | ||
684 | } | ||
685 | |||
686 | static unsigned int caif_poll(struct file *file, struct socket *sock, | ||
687 | poll_table *wait) | ||
688 | { | ||
689 | struct sock *sk = sock->sk; | ||
690 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
691 | u32 mask = 0; | ||
692 | poll_wait(file, sk->sk_sleep, wait); | ||
693 | lock_sock(&(cf_sk->sk)); | ||
694 | if (!STATE_IS_OPEN(cf_sk)) { | ||
695 | if (!STATE_IS_PENDING(cf_sk)) | ||
696 | mask |= POLLHUP; | ||
697 | } else { | ||
698 | if (cfpkt_qpeek(cf_sk->pktq) != NULL) | ||
699 | mask |= (POLLIN | POLLRDNORM); | ||
700 | if (TX_FLOW_IS_ON(cf_sk)) | ||
701 | mask |= (POLLOUT | POLLWRNORM); | ||
702 | } | ||
703 | release_sock(&cf_sk->sk); | ||
704 | trace_printk("CAIF: %s(): poll mask=0x%04x\n", | ||
705 | __func__, mask); | ||
706 | return mask; | ||
707 | } | ||
708 | |||
709 | static void drain_queue(struct caifsock *cf_sk) | ||
710 | { | ||
711 | struct cfpkt *pkt = NULL; | ||
712 | |||
713 | /* Empty the queue */ | ||
714 | do { | ||
715 | /* The queue has its own lock */ | ||
716 | if (!cf_sk->pktq) | ||
717 | break; | ||
718 | |||
719 | pkt = cfpkt_dequeue(cf_sk->pktq); | ||
720 | if (!pkt) | ||
721 | break; | ||
722 | pr_debug("CAIF: %s(): freeing packet from read queue\n", | ||
723 | __func__); | ||
724 | cfpkt_destroy(pkt); | ||
725 | |||
726 | } while (1); | ||
727 | |||
728 | cf_sk->read_queue_len = 0; | ||
729 | } | ||
730 | |||
731 | static int setsockopt(struct socket *sock, | ||
732 | int lvl, int opt, char __user *ov, unsigned int ol) | ||
733 | { | ||
734 | struct sock *sk = sock->sk; | ||
735 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
736 | int prio, linksel; | ||
737 | struct ifreq ifreq; | ||
738 | |||
739 | if (STATE_IS_OPEN(cf_sk)) { | ||
740 | pr_debug("CAIF: %s(): setsockopt " | ||
741 | "cannot be done on a connected socket\n", | ||
742 | __func__); | ||
743 | return -ENOPROTOOPT; | ||
744 | } | ||
745 | switch (opt) { | ||
746 | case CAIFSO_LINK_SELECT: | ||
747 | if (ol < sizeof(int)) { | ||
748 | pr_debug("CAIF: %s(): setsockopt" | ||
749 | " CAIFSO_CHANNEL_CONFIG bad size\n", __func__); | ||
750 | return -EINVAL; | ||
751 | } | ||
752 | if (lvl != SOL_CAIF) | ||
753 | goto bad_sol; | ||
754 | if (copy_from_user(&linksel, ov, sizeof(int))) | ||
755 | return -EINVAL; | ||
756 | lock_sock(&(cf_sk->sk)); | ||
757 | cf_sk->conn_req.link_selector = linksel; | ||
758 | release_sock(&cf_sk->sk); | ||
759 | return 0; | ||
760 | |||
761 | case SO_PRIORITY: | ||
762 | if (lvl != SOL_SOCKET) | ||
763 | goto bad_sol; | ||
764 | if (ol < sizeof(int)) { | ||
765 | pr_debug("CAIF: %s(): setsockopt" | ||
766 | " SO_PRIORITY bad size\n", __func__); | ||
767 | return -EINVAL; | ||
768 | } | ||
769 | if (copy_from_user(&prio, ov, sizeof(int))) | ||
770 | return -EINVAL; | ||
771 | lock_sock(&(cf_sk->sk)); | ||
772 | cf_sk->conn_req.priority = prio; | ||
773 | pr_debug("CAIF: %s(): Setting sockopt priority=%d\n", __func__, | ||
774 | cf_sk->conn_req.priority); | ||
775 | release_sock(&cf_sk->sk); | ||
776 | return 0; | ||
777 | |||
778 | case SO_BINDTODEVICE: | ||
779 | if (lvl != SOL_SOCKET) | ||
780 | goto bad_sol; | ||
781 | if (ol < sizeof(struct ifreq)) { | ||
782 | pr_debug("CAIF: %s(): setsockopt" | ||
783 | " SO_PRIORITY bad size\n", __func__); | ||
784 | return -EINVAL; | ||
785 | } | ||
786 | if (copy_from_user(&ifreq, ov, sizeof(ifreq))) | ||
787 | return -EFAULT; | ||
788 | lock_sock(&(cf_sk->sk)); | ||
789 | strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name, | ||
790 | sizeof(cf_sk->conn_req.link_name)); | ||
791 | cf_sk->conn_req.link_name | ||
792 | [sizeof(cf_sk->conn_req.link_name)-1] = 0; | ||
793 | release_sock(&cf_sk->sk); | ||
794 | return 0; | ||
795 | |||
796 | case CAIFSO_REQ_PARAM: | ||
797 | if (lvl != SOL_CAIF) | ||
798 | goto bad_sol; | ||
799 | if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) | ||
800 | return -ENOPROTOOPT; | ||
801 | if (ol > sizeof(cf_sk->conn_req.param.data)) | ||
802 | goto req_param_bad_size; | ||
803 | |||
804 | lock_sock(&(cf_sk->sk)); | ||
805 | cf_sk->conn_req.param.size = ol; | ||
806 | if (copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { | ||
807 | release_sock(&cf_sk->sk); | ||
808 | req_param_bad_size: | ||
809 | pr_debug("CAIF: %s(): setsockopt" | ||
810 | " CAIFSO_CHANNEL_CONFIG bad size\n", __func__); | ||
811 | return -EINVAL; | ||
812 | } | ||
813 | |||
814 | release_sock(&cf_sk->sk); | ||
815 | return 0; | ||
816 | |||
817 | default: | ||
818 | pr_debug("CAIF: %s(): unhandled option %d\n", __func__, opt); | ||
819 | return -EINVAL; | ||
820 | } | ||
821 | |||
822 | return 0; | ||
823 | bad_sol: | ||
824 | pr_debug("CAIF: %s(): setsockopt bad level\n", __func__); | ||
825 | return -ENOPROTOOPT; | ||
826 | |||
827 | } | ||
828 | |||
829 | static int caif_connect(struct socket *sock, struct sockaddr *uservaddr, | ||
830 | int sockaddr_len, int flags) | ||
831 | { | ||
832 | struct caifsock *cf_sk = NULL; | ||
833 | int result = -1; | ||
834 | int mode = 0; | ||
835 | int ret = -EIO; | ||
836 | struct sock *sk = sock->sk; | ||
837 | BUG_ON(sk == NULL); | ||
838 | |||
839 | cf_sk = container_of(sk, struct caifsock, sk); | ||
840 | |||
841 | trace_printk("CAIF: %s(): cf_sk=%p OPEN=%d, TX_FLOW=%d, RX_FLOW=%d\n", | ||
842 | __func__, cf_sk, | ||
843 | STATE_IS_OPEN(cf_sk), | ||
844 | TX_FLOW_IS_ON(cf_sk), RX_FLOW_IS_ON(cf_sk)); | ||
845 | |||
846 | |||
847 | if (sock->type == SOCK_SEQPACKET || sock->type == SOCK_STREAM) | ||
848 | sock->state = SS_CONNECTING; | ||
849 | else | ||
850 | goto out; | ||
851 | |||
852 | /* I want to be alone on cf_sk (except status and queue) */ | ||
853 | lock_sock(&(cf_sk->sk)); | ||
854 | |||
855 | if (sockaddr_len != sizeof(struct sockaddr_caif)) { | ||
856 | pr_debug("CAIF: %s(): Bad address len (%ld,%lu)\n", | ||
857 | __func__, (long) sockaddr_len, | ||
858 | (long unsigned) sizeof(struct sockaddr_caif)); | ||
859 | ret = -EINVAL; | ||
860 | goto open_error; | ||
861 | } | ||
862 | |||
863 | if (uservaddr->sa_family != AF_CAIF) { | ||
864 | pr_debug("CAIF: %s(): Bad address family (%d)\n", | ||
865 | __func__, uservaddr->sa_family); | ||
866 | ret = -EAFNOSUPPORT; | ||
867 | goto open_error; | ||
868 | } | ||
869 | |||
870 | memcpy(&cf_sk->conn_req.sockaddr, uservaddr, | ||
871 | sizeof(struct sockaddr_caif)); | ||
872 | |||
873 | dbfs_atomic_inc(&cnt.num_open); | ||
874 | mode = SKT_READ_FLAG | SKT_WRITE_FLAG; | ||
875 | |||
876 | /* If socket is not open, make sure socket is in fully closed state */ | ||
877 | if (!STATE_IS_OPEN(cf_sk)) { | ||
878 | /* Has link close response been received (if we ever sent it)?*/ | ||
879 | if (STATE_IS_PENDING(cf_sk)) { | ||
880 | /* | ||
881 | * Still waiting for close response from remote. | ||
882 | * If opened non-blocking, report "would block" | ||
883 | */ | ||
884 | if (flags & O_NONBLOCK) { | ||
885 | pr_debug("CAIF: %s(): O_NONBLOCK" | ||
886 | " && close pending\n", __func__); | ||
887 | ret = -EAGAIN; | ||
888 | goto open_error; | ||
889 | } | ||
890 | |||
891 | pr_debug("CAIF: %s(): Wait for close response" | ||
892 | " from remote...\n", __func__); | ||
893 | |||
894 | release_sock(&cf_sk->sk); | ||
895 | |||
896 | /* | ||
897 | * Blocking mode; close is pending and we need to wait | ||
898 | * for its conclusion. | ||
899 | */ | ||
900 | result = | ||
901 | wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
902 | !STATE_IS_PENDING(cf_sk)); | ||
903 | |||
904 | lock_sock(&(cf_sk->sk)); | ||
905 | if (result == -ERESTARTSYS) { | ||
906 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
907 | "woken by a signal (1)", __func__); | ||
908 | ret = -ERESTARTSYS; | ||
909 | goto open_error; | ||
910 | } | ||
911 | } | ||
912 | } | ||
913 | |||
914 | /* socket is now either closed, pending open or open */ | ||
915 | if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) { | ||
916 | /* Open */ | ||
917 | pr_debug("CAIF: %s(): Socket is already opened (cf_sk=%p)" | ||
918 | " check access f_flags = 0x%x file_mode = 0x%x\n", | ||
919 | __func__, cf_sk, mode, cf_sk->file_mode); | ||
920 | |||
921 | } else { | ||
922 | /* We are closed or pending open. | ||
923 | * If closed: send link setup | ||
924 | * If pending open: link setup already sent (we could have been | ||
925 | * interrupted by a signal last time) | ||
926 | */ | ||
927 | if (!STATE_IS_OPEN(cf_sk)) { | ||
928 | /* First opening of file; connect lower layers: */ | ||
929 | /* Drain queue (very unlikely) */ | ||
930 | drain_queue(cf_sk); | ||
931 | |||
932 | cf_sk->layer.receive = caif_sktrecv_cb; | ||
933 | SET_STATE_OPEN(cf_sk); | ||
934 | SET_PENDING_ON(cf_sk); | ||
935 | |||
936 | /* Register this channel. */ | ||
937 | result = | ||
938 | caif_connect_client(&cf_sk->conn_req, | ||
939 | &cf_sk->layer); | ||
940 | if (result < 0) { | ||
941 | pr_debug("CAIF: %s(): can't register channel\n", | ||
942 | __func__); | ||
943 | ret = -EIO; | ||
944 | SET_STATE_CLOSED(cf_sk); | ||
945 | SET_PENDING_OFF(cf_sk); | ||
946 | goto open_error; | ||
947 | } | ||
948 | dbfs_atomic_inc(&cnt.num_init); | ||
949 | } | ||
950 | |||
951 | /* If opened non-blocking, report "success". | ||
952 | */ | ||
953 | if (flags & O_NONBLOCK) { | ||
954 | pr_debug("CAIF: %s(): O_NONBLOCK success\n", | ||
955 | __func__); | ||
956 | ret = -EINPROGRESS; | ||
957 | cf_sk->sk.sk_err = -EINPROGRESS; | ||
958 | goto open_error; | ||
959 | } | ||
960 | |||
961 | trace_printk("CAIF: %s(): Wait for connect response\n", | ||
962 | __func__); | ||
963 | |||
964 | /* release lock before waiting */ | ||
965 | release_sock(&cf_sk->sk); | ||
966 | |||
967 | result = | ||
968 | wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
969 | !STATE_IS_PENDING(cf_sk)); | ||
970 | |||
971 | lock_sock(&(cf_sk->sk)); | ||
972 | |||
973 | if (result == -ERESTARTSYS) { | ||
974 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
975 | "woken by a signal (2)", __func__); | ||
976 | ret = -ERESTARTSYS; | ||
977 | goto open_error; | ||
978 | } | ||
979 | |||
980 | if (!STATE_IS_OPEN(cf_sk)) { | ||
981 | /* Lower layers said "no" */ | ||
982 | pr_debug("CAIF: %s(): Closed received\n", __func__); | ||
983 | ret = -EPIPE; | ||
984 | goto open_error; | ||
985 | } | ||
986 | |||
987 | trace_printk("CAIF: %s(): Connect received\n", __func__); | ||
988 | } | ||
989 | /* Open is ok */ | ||
990 | cf_sk->file_mode |= mode; | ||
991 | |||
992 | trace_printk("CAIF: %s(): Connected - file mode = %x\n", | ||
993 | __func__, cf_sk->file_mode); | ||
994 | |||
995 | release_sock(&cf_sk->sk); | ||
996 | return 0; | ||
997 | open_error: | ||
998 | sock->state = SS_UNCONNECTED; | ||
999 | release_sock(&cf_sk->sk); | ||
1000 | out: | ||
1001 | return ret; | ||
1002 | } | ||
1003 | |||
1004 | static int caif_shutdown(struct socket *sock, int how) | ||
1005 | { | ||
1006 | struct caifsock *cf_sk = NULL; | ||
1007 | int result = 0; | ||
1008 | int tx_flow_state_was_on; | ||
1009 | struct sock *sk = sock->sk; | ||
1010 | |||
1011 | trace_printk("CAIF: %s(): enter\n", __func__); | ||
1012 | pr_debug("f_flags=%x\n", sock->file->f_flags); | ||
1013 | |||
1014 | if (how != SHUT_RDWR) | ||
1015 | return -EOPNOTSUPP; | ||
1016 | |||
1017 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1018 | if (cf_sk == NULL) { | ||
1019 | pr_debug("CAIF: %s(): COULD NOT FIND SOCKET\n", __func__); | ||
1020 | return -EBADF; | ||
1021 | } | ||
1022 | |||
1023 | /* I want to be alone on cf_sk (except status queue) */ | ||
1024 | lock_sock(&(cf_sk->sk)); | ||
1025 | sock_hold(&cf_sk->sk); | ||
1026 | |||
1027 | /* IS_CLOSED have double meaning: | ||
1028 | * 1) Spontanous Remote Shutdown Request. | ||
1029 | * 2) Ack on a channel teardown(disconnect) | ||
1030 | * Must clear bit in case we previously received | ||
1031 | * remote shudown request. | ||
1032 | */ | ||
1033 | if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) { | ||
1034 | SET_STATE_CLOSED(cf_sk); | ||
1035 | SET_PENDING_ON(cf_sk); | ||
1036 | tx_flow_state_was_on = TX_FLOW_IS_ON(cf_sk); | ||
1037 | SET_TX_FLOW_OFF(cf_sk); | ||
1038 | |||
1039 | /* Hold the socket until DEINIT_RSP is received */ | ||
1040 | sock_hold(&cf_sk->sk); | ||
1041 | result = caif_disconnect_client(&cf_sk->layer); | ||
1042 | |||
1043 | if (result < 0) { | ||
1044 | pr_debug("CAIF: %s(): " | ||
1045 | "caif_disconnect_client() failed\n", | ||
1046 | __func__); | ||
1047 | SET_STATE_CLOSED(cf_sk); | ||
1048 | SET_PENDING_OFF(cf_sk); | ||
1049 | SET_TX_FLOW_OFF(cf_sk); | ||
1050 | release_sock(&cf_sk->sk); | ||
1051 | sock_put(&cf_sk->sk); | ||
1052 | return -EIO; | ||
1053 | } | ||
1054 | |||
1055 | } | ||
1056 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) { | ||
1057 | SET_PENDING_OFF(cf_sk); | ||
1058 | SET_REMOTE_SHUTDOWN_OFF(cf_sk); | ||
1059 | } | ||
1060 | |||
1061 | /* | ||
1062 | * Socket is no longer in state pending close, | ||
1063 | * and we can release the reference. | ||
1064 | */ | ||
1065 | |||
1066 | dbfs_atomic_inc(&cnt.num_close); | ||
1067 | drain_queue(cf_sk); | ||
1068 | SET_RX_FLOW_ON(cf_sk); | ||
1069 | cf_sk->file_mode = 0; | ||
1070 | sock_put(&cf_sk->sk); | ||
1071 | release_sock(&cf_sk->sk); | ||
1072 | if (!result && (sock->file->f_flags & O_NONBLOCK)) { | ||
1073 | pr_debug("nonblocking shutdown returing -EAGAIN\n"); | ||
1074 | return -EAGAIN; | ||
1075 | } else | ||
1076 | return result; | ||
1077 | } | ||
1078 | |||
1079 | static ssize_t caif_sock_no_sendpage(struct socket *sock, | ||
1080 | struct page *page, | ||
1081 | int offset, size_t size, int flags) | ||
1082 | { | ||
1083 | return -EOPNOTSUPP; | ||
1084 | } | ||
1085 | |||
1086 | /* This function is called as part of close. */ | ||
1087 | static int caif_release(struct socket *sock) | ||
1088 | { | ||
1089 | struct sock *sk = sock->sk; | ||
1090 | struct caifsock *cf_sk = NULL; | ||
1091 | int res; | ||
1092 | caif_assert(sk != NULL); | ||
1093 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1094 | |||
1095 | if (cf_sk->debugfs_socket_dir != NULL) | ||
1096 | debugfs_remove_recursive(cf_sk->debugfs_socket_dir); | ||
1097 | |||
1098 | res = caif_shutdown(sock, SHUT_RDWR); | ||
1099 | if (res && res != -EINPROGRESS) | ||
1100 | return res; | ||
1101 | |||
1102 | /* | ||
1103 | * FIXME: Shutdown should probably be possible to do async | ||
1104 | * without flushing queues, allowing reception of frames while | ||
1105 | * waiting for DEINIT_IND. | ||
1106 | * Release should always block, to allow secure decoupling of | ||
1107 | * CAIF stack. | ||
1108 | */ | ||
1109 | if (!(sock->file->f_flags & O_NONBLOCK)) { | ||
1110 | res = wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
1111 | !STATE_IS_PENDING(cf_sk)); | ||
1112 | |||
1113 | if (res == -ERESTARTSYS) { | ||
1114 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
1115 | "woken by a signal (1)", __func__); | ||
1116 | } | ||
1117 | } | ||
1118 | lock_sock(&(cf_sk->sk)); | ||
1119 | |||
1120 | sock->sk = NULL; | ||
1121 | |||
1122 | /* Detach the socket from its process context by making it orphan. */ | ||
1123 | sock_orphan(sk); | ||
1124 | |||
1125 | /* | ||
1126 | * Setting SHUTDOWN_MASK means that both send and receive are shutdown | ||
1127 | * for the socket. | ||
1128 | */ | ||
1129 | sk->sk_shutdown = SHUTDOWN_MASK; | ||
1130 | |||
1131 | /* | ||
1132 | * Set the socket state to closed, the TCP_CLOSE macro is used when | ||
1133 | * closing any socket. | ||
1134 | */ | ||
1135 | |||
1136 | /* Flush out this sockets receive queue. */ | ||
1137 | drain_queue(cf_sk); | ||
1138 | |||
1139 | /* Finally release the socket. */ | ||
1140 | SET_STATE_PENDING_DESTROY(cf_sk); | ||
1141 | |||
1142 | release_sock(&cf_sk->sk); | ||
1143 | |||
1144 | sock_put(sk); | ||
1145 | |||
1146 | /* | ||
1147 | * The rest of the cleanup will be handled from the | ||
1148 | * caif_sock_destructor | ||
1149 | */ | ||
1150 | return res; | ||
1151 | } | ||
1152 | |||
1153 | static const struct proto_ops caif_ops = { | ||
1154 | .family = PF_CAIF, | ||
1155 | .owner = THIS_MODULE, | ||
1156 | .release = caif_release, | ||
1157 | .bind = sock_no_bind, | ||
1158 | .connect = caif_connect, | ||
1159 | .socketpair = sock_no_socketpair, | ||
1160 | .accept = sock_no_accept, | ||
1161 | .getname = sock_no_getname, | ||
1162 | .poll = caif_poll, | ||
1163 | .ioctl = sock_no_ioctl, | ||
1164 | .listen = sock_no_listen, | ||
1165 | .shutdown = caif_shutdown, | ||
1166 | .setsockopt = setsockopt, | ||
1167 | .getsockopt = sock_no_getsockopt, | ||
1168 | .sendmsg = caif_sendmsg, | ||
1169 | .recvmsg = caif_recvmsg, | ||
1170 | .mmap = sock_no_mmap, | ||
1171 | .sendpage = caif_sock_no_sendpage, | ||
1172 | }; | ||
1173 | |||
1174 | /* This function is called when a socket is finally destroyed. */ | ||
1175 | static void caif_sock_destructor(struct sock *sk) | ||
1176 | { | ||
1177 | struct caifsock *cf_sk = NULL; | ||
1178 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1179 | /* Error checks. */ | ||
1180 | caif_assert(!atomic_read(&sk->sk_wmem_alloc)); | ||
1181 | caif_assert(sk_unhashed(sk)); | ||
1182 | caif_assert(!sk->sk_socket); | ||
1183 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
1184 | pr_debug("CAIF: %s(): 0x%p", __func__, sk); | ||
1185 | return; | ||
1186 | } | ||
1187 | |||
1188 | if (STATE_IS_OPEN(cf_sk)) { | ||
1189 | pr_debug("CAIF: %s(): socket is opened (cf_sk=%p)" | ||
1190 | " file_mode = 0x%x\n", __func__, | ||
1191 | cf_sk, cf_sk->file_mode); | ||
1192 | return; | ||
1193 | } | ||
1194 | drain_queue(cf_sk); | ||
1195 | kfree(cf_sk->pktq); | ||
1196 | |||
1197 | trace_printk("CAIF: %s(): caif_sock_destructor: Removing socket %s\n", | ||
1198 | __func__, cf_sk->name); | ||
1199 | atomic_dec(&caif_nr_socks); | ||
1200 | } | ||
1201 | |||
1202 | static int caif_create(struct net *net, struct socket *sock, int protocol, | ||
1203 | int kern) | ||
1204 | { | ||
1205 | struct sock *sk = NULL; | ||
1206 | struct caifsock *cf_sk = NULL; | ||
1207 | int result = 0; | ||
1208 | static struct proto prot = {.name = "PF_CAIF", | ||
1209 | .owner = THIS_MODULE, | ||
1210 | .obj_size = sizeof(struct caifsock), | ||
1211 | }; | ||
1212 | |||
1213 | /* | ||
1214 | * The sock->type specifies the socket type to use. | ||
1215 | * in SEQPACKET mode packet boundaries are enforced. | ||
1216 | */ | ||
1217 | if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) | ||
1218 | return -ESOCKTNOSUPPORT; | ||
1219 | |||
1220 | if (net != &init_net) | ||
1221 | return -EAFNOSUPPORT; | ||
1222 | |||
1223 | if (protocol < 0 || protocol >= CAIFPROTO_MAX) | ||
1224 | return -EPROTONOSUPPORT; | ||
1225 | /* | ||
1226 | * Set the socket state to unconnected. The socket state is really | ||
1227 | * not used at all in the net/core or socket.c but the | ||
1228 | * initialization makes sure that sock->state is not uninitialized. | ||
1229 | */ | ||
1230 | sock->state = SS_UNCONNECTED; | ||
1231 | |||
1232 | sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); | ||
1233 | if (!sk) | ||
1234 | return -ENOMEM; | ||
1235 | |||
1236 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1237 | |||
1238 | /* Store the protocol */ | ||
1239 | sk->sk_protocol = (unsigned char) protocol; | ||
1240 | |||
1241 | spin_lock_init(&cf_sk->read_queue_len_lock); | ||
1242 | |||
1243 | /* Fill in some information concerning the misc socket. */ | ||
1244 | snprintf(cf_sk->name, sizeof(cf_sk->name), "cf_sk%d", | ||
1245 | atomic_read(&caif_nr_socks)); | ||
1246 | |||
1247 | /* | ||
1248 | * Lock in order to try to stop someone from opening the socket | ||
1249 | * too early. | ||
1250 | */ | ||
1251 | lock_sock(&(cf_sk->sk)); | ||
1252 | |||
1253 | /* Initialize the nozero default sock structure data. */ | ||
1254 | sock_init_data(sock, sk); | ||
1255 | sock->ops = &caif_ops; | ||
1256 | sk->sk_destruct = caif_sock_destructor; | ||
1257 | sk->sk_sndbuf = caif_sockbuf_size; | ||
1258 | sk->sk_rcvbuf = caif_sockbuf_size; | ||
1259 | |||
1260 | cf_sk->pktq = cfpktq_create(); | ||
1261 | |||
1262 | if (!cf_sk->pktq) { | ||
1263 | pr_err("CAIF: %s(): queue create failed.\n", __func__); | ||
1264 | result = -ENOMEM; | ||
1265 | release_sock(&cf_sk->sk); | ||
1266 | goto err_failed; | ||
1267 | } | ||
1268 | cf_sk->layer.ctrlcmd = caif_sktflowctrl_cb; | ||
1269 | SET_STATE_CLOSED(cf_sk); | ||
1270 | SET_PENDING_OFF(cf_sk); | ||
1271 | SET_TX_FLOW_OFF(cf_sk); | ||
1272 | SET_RX_FLOW_ON(cf_sk); | ||
1273 | |||
1274 | /* Set default options on configuration */ | ||
1275 | cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; | ||
1276 | cf_sk->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; | ||
1277 | cf_sk->conn_req.protocol = protocol; | ||
1278 | /* Increase the number of sockets created. */ | ||
1279 | atomic_inc(&caif_nr_socks); | ||
1280 | if (!IS_ERR(debugfsdir)) { | ||
1281 | cf_sk->debugfs_socket_dir = | ||
1282 | debugfs_create_dir(cf_sk->name, debugfsdir); | ||
1283 | debugfs_create_u32("conn_state", S_IRUSR | S_IWUSR, | ||
1284 | cf_sk->debugfs_socket_dir, &cf_sk->conn_state); | ||
1285 | debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR, | ||
1286 | cf_sk->debugfs_socket_dir, &cf_sk->flow_state); | ||
1287 | debugfs_create_u32("read_queue_len", S_IRUSR | S_IWUSR, | ||
1288 | cf_sk->debugfs_socket_dir, | ||
1289 | (u32 *) &cf_sk->read_queue_len); | ||
1290 | debugfs_create_u32("identity", S_IRUSR | S_IWUSR, | ||
1291 | cf_sk->debugfs_socket_dir, | ||
1292 | (u32 *) &cf_sk->layer.id); | ||
1293 | } | ||
1294 | release_sock(&cf_sk->sk); | ||
1295 | return 0; | ||
1296 | err_failed: | ||
1297 | sk_free(sk); | ||
1298 | return result; | ||
1299 | } | ||
1300 | |||
1301 | static struct net_proto_family caif_family_ops = { | ||
1302 | .family = PF_CAIF, | ||
1303 | .create = caif_create, | ||
1304 | .owner = THIS_MODULE, | ||
1305 | }; | ||
1306 | |||
1307 | static int af_caif_init(void) | ||
1308 | { | ||
1309 | int err; | ||
1310 | err = sock_register(&caif_family_ops); | ||
1311 | |||
1312 | if (!err) | ||
1313 | return err; | ||
1314 | |||
1315 | return 0; | ||
1316 | } | ||
1317 | |||
1318 | static int __init caif_sktinit_module(void) | ||
1319 | { | ||
1320 | int stat; | ||
1321 | #ifdef CONFIG_DEBUG_FS | ||
1322 | debugfsdir = debugfs_create_dir("chnl_skt", NULL); | ||
1323 | if (!IS_ERR(debugfsdir)) { | ||
1324 | debugfs_create_u32("skb_inuse", S_IRUSR | S_IWUSR, | ||
1325 | debugfsdir, | ||
1326 | (u32 *) &cnt.skb_in_use); | ||
1327 | debugfs_create_u32("skb_alloc", S_IRUSR | S_IWUSR, | ||
1328 | debugfsdir, | ||
1329 | (u32 *) &cnt.skb_alloc); | ||
1330 | debugfs_create_u32("skb_free", S_IRUSR | S_IWUSR, | ||
1331 | debugfsdir, | ||
1332 | (u32 *) &cnt.skb_free); | ||
1333 | debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR, | ||
1334 | debugfsdir, | ||
1335 | (u32 *) &caif_nr_socks); | ||
1336 | debugfs_create_u32("num_open", S_IRUSR | S_IWUSR, | ||
1337 | debugfsdir, | ||
1338 | (u32 *) &cnt.num_open); | ||
1339 | debugfs_create_u32("num_close", S_IRUSR | S_IWUSR, | ||
1340 | debugfsdir, | ||
1341 | (u32 *) &cnt.num_close); | ||
1342 | debugfs_create_u32("num_init", S_IRUSR | S_IWUSR, | ||
1343 | debugfsdir, | ||
1344 | (u32 *) &cnt.num_init); | ||
1345 | debugfs_create_u32("num_init_resp", S_IRUSR | S_IWUSR, | ||
1346 | debugfsdir, | ||
1347 | (u32 *) &cnt.num_init_resp); | ||
1348 | debugfs_create_u32("num_init_fail_resp", S_IRUSR | S_IWUSR, | ||
1349 | debugfsdir, | ||
1350 | (u32 *) &cnt.num_init_fail_resp); | ||
1351 | debugfs_create_u32("num_deinit", S_IRUSR | S_IWUSR, | ||
1352 | debugfsdir, | ||
1353 | (u32 *) &cnt.num_deinit); | ||
1354 | debugfs_create_u32("num_deinit_resp", S_IRUSR | S_IWUSR, | ||
1355 | debugfsdir, | ||
1356 | (u32 *) &cnt.num_deinit_resp); | ||
1357 | debugfs_create_u32("num_remote_shutdown_ind", | ||
1358 | S_IRUSR | S_IWUSR, debugfsdir, | ||
1359 | (u32 *) &cnt.num_remote_shutdown_ind); | ||
1360 | debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR, | ||
1361 | debugfsdir, | ||
1362 | (u32 *) &cnt.num_tx_flow_off_ind); | ||
1363 | debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR, | ||
1364 | debugfsdir, | ||
1365 | (u32 *) &cnt.num_tx_flow_on_ind); | ||
1366 | debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR, | ||
1367 | debugfsdir, | ||
1368 | (u32 *) &cnt.num_rx_flow_off); | ||
1369 | debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR, | ||
1370 | debugfsdir, | ||
1371 | (u32 *) &cnt.num_rx_flow_on); | ||
1372 | } | ||
1373 | #endif | ||
1374 | stat = af_caif_init(); | ||
1375 | if (stat) { | ||
1376 | pr_err("CAIF: %s(): Failed to initialize CAIF socket layer.", | ||
1377 | __func__); | ||
1378 | return stat; | ||
1379 | } | ||
1380 | return 0; | ||
1381 | } | ||
1382 | |||
1383 | static void __exit caif_sktexit_module(void) | ||
1384 | { | ||
1385 | sock_unregister(PF_CAIF); | ||
1386 | if (debugfsdir != NULL) | ||
1387 | debugfs_remove_recursive(debugfsdir); | ||
1388 | } | ||
1389 | |||
1390 | module_init(caif_sktinit_module); | ||
1391 | module_exit(caif_sktexit_module); | ||