diff options
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/Kconfig | 15 | ||||
-rw-r--r-- | net/iucv/Makefile | 6 | ||||
-rw-r--r-- | net/iucv/af_iucv.c | 1077 | ||||
-rw-r--r-- | net/iucv/iucv.c | 1619 |
4 files changed, 2717 insertions, 0 deletions
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig new file mode 100644 index 000000000000..f8fcc3d10327 --- /dev/null +++ b/net/iucv/Kconfig | |||
@@ -0,0 +1,15 @@ | |||
1 | config IUCV | ||
2 | tristate "IUCV support (VM only)" | ||
3 | depends on S390 | ||
4 | help | ||
5 | Select this option if you want to use inter-user communication under | ||
6 | VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast | ||
7 | communication link between VM guests. | ||
8 | |||
9 | config AFIUCV | ||
10 | tristate "AF_IUCV support (VM only)" | ||
11 | depends on IUCV | ||
12 | help | ||
13 | Select this option if you want to use inter-user communication under | ||
14 | VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast | ||
15 | communication link between VM guests. | ||
diff --git a/net/iucv/Makefile b/net/iucv/Makefile new file mode 100644 index 000000000000..7bfdc8532675 --- /dev/null +++ b/net/iucv/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for IUCV | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_IUCV) += iucv.o | ||
6 | obj-$(CONFIG_AFIUCV) += af_iucv.o | ||
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c new file mode 100644 index 000000000000..acc94214bde6 --- /dev/null +++ b/net/iucv/af_iucv.c | |||
@@ -0,0 +1,1077 @@ | |||
1 | /* | ||
2 | * linux/net/iucv/af_iucv.c | ||
3 | * | ||
4 | * IUCV protocol stack for Linux on zSeries | ||
5 | * | ||
6 | * Copyright 2006 IBM Corporation | ||
7 | * | ||
8 | * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/list.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/skbuff.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/poll.h> | ||
21 | #include <net/sock.h> | ||
22 | #include <asm/ebcdic.h> | ||
23 | #include <asm/cpcmd.h> | ||
24 | #include <linux/kmod.h> | ||
25 | |||
26 | #include <net/iucv/iucv.h> | ||
27 | #include <net/iucv/af_iucv.h> | ||
28 | |||
29 | #define CONFIG_IUCV_SOCK_DEBUG 1 | ||
30 | |||
31 | #define IPRMDATA 0x80 | ||
32 | #define VERSION "1.0" | ||
33 | |||
34 | static char iucv_userid[80]; | ||
35 | |||
36 | static struct proto_ops iucv_sock_ops; | ||
37 | |||
38 | static struct proto iucv_proto = { | ||
39 | .name = "AF_IUCV", | ||
40 | .owner = THIS_MODULE, | ||
41 | .obj_size = sizeof(struct iucv_sock), | ||
42 | }; | ||
43 | |||
44 | /* Call Back functions */ | ||
45 | static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); | ||
46 | static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); | ||
47 | static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); | ||
48 | static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); | ||
49 | static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); | ||
50 | |||
51 | static struct iucv_sock_list iucv_sk_list = { | ||
52 | .lock = RW_LOCK_UNLOCKED, | ||
53 | .autobind_name = ATOMIC_INIT(0) | ||
54 | }; | ||
55 | |||
56 | static struct iucv_handler af_iucv_handler = { | ||
57 | .path_pending = iucv_callback_connreq, | ||
58 | .path_complete = iucv_callback_connack, | ||
59 | .path_severed = iucv_callback_connrej, | ||
60 | .message_pending = iucv_callback_rx, | ||
61 | .message_complete = iucv_callback_txdone | ||
62 | }; | ||
63 | |||
64 | static inline void high_nmcpy(unsigned char *dst, char *src) | ||
65 | { | ||
66 | memcpy(dst, src, 8); | ||
67 | } | ||
68 | |||
69 | static inline void low_nmcpy(unsigned char *dst, char *src) | ||
70 | { | ||
71 | memcpy(&dst[8], src, 8); | ||
72 | } | ||
73 | |||
74 | /* Timers */ | ||
75 | static void iucv_sock_timeout(unsigned long arg) | ||
76 | { | ||
77 | struct sock *sk = (struct sock *)arg; | ||
78 | |||
79 | bh_lock_sock(sk); | ||
80 | sk->sk_err = ETIMEDOUT; | ||
81 | sk->sk_state_change(sk); | ||
82 | bh_unlock_sock(sk); | ||
83 | |||
84 | iucv_sock_kill(sk); | ||
85 | sock_put(sk); | ||
86 | } | ||
87 | |||
88 | static void iucv_sock_clear_timer(struct sock *sk) | ||
89 | { | ||
90 | sk_stop_timer(sk, &sk->sk_timer); | ||
91 | } | ||
92 | |||
93 | static void iucv_sock_init_timer(struct sock *sk) | ||
94 | { | ||
95 | init_timer(&sk->sk_timer); | ||
96 | sk->sk_timer.function = iucv_sock_timeout; | ||
97 | sk->sk_timer.data = (unsigned long)sk; | ||
98 | } | ||
99 | |||
100 | static struct sock *__iucv_get_sock_by_name(char *nm) | ||
101 | { | ||
102 | struct sock *sk; | ||
103 | struct hlist_node *node; | ||
104 | |||
105 | sk_for_each(sk, node, &iucv_sk_list.head) | ||
106 | if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) | ||
107 | return sk; | ||
108 | |||
109 | return NULL; | ||
110 | } | ||
111 | |||
112 | static void iucv_sock_destruct(struct sock *sk) | ||
113 | { | ||
114 | skb_queue_purge(&sk->sk_receive_queue); | ||
115 | skb_queue_purge(&sk->sk_write_queue); | ||
116 | } | ||
117 | |||
118 | /* Cleanup Listen */ | ||
119 | static void iucv_sock_cleanup_listen(struct sock *parent) | ||
120 | { | ||
121 | struct sock *sk; | ||
122 | |||
123 | /* Close non-accepted connections */ | ||
124 | while ((sk = iucv_accept_dequeue(parent, NULL))) { | ||
125 | iucv_sock_close(sk); | ||
126 | iucv_sock_kill(sk); | ||
127 | } | ||
128 | |||
129 | parent->sk_state = IUCV_CLOSED; | ||
130 | sock_set_flag(parent, SOCK_ZAPPED); | ||
131 | } | ||
132 | |||
133 | /* Kill socket */ | ||
134 | static void iucv_sock_kill(struct sock *sk) | ||
135 | { | ||
136 | if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) | ||
137 | return; | ||
138 | |||
139 | iucv_sock_unlink(&iucv_sk_list, sk); | ||
140 | sock_set_flag(sk, SOCK_DEAD); | ||
141 | sock_put(sk); | ||
142 | } | ||
143 | |||
144 | /* Close an IUCV socket */ | ||
145 | static void iucv_sock_close(struct sock *sk) | ||
146 | { | ||
147 | unsigned char user_data[16]; | ||
148 | struct iucv_sock *iucv = iucv_sk(sk); | ||
149 | int err; | ||
150 | |||
151 | iucv_sock_clear_timer(sk); | ||
152 | lock_sock(sk); | ||
153 | |||
154 | switch(sk->sk_state) { | ||
155 | case IUCV_LISTEN: | ||
156 | iucv_sock_cleanup_listen(sk); | ||
157 | break; | ||
158 | |||
159 | case IUCV_CONNECTED: | ||
160 | case IUCV_DISCONN: | ||
161 | err = 0; | ||
162 | if (iucv->path) { | ||
163 | low_nmcpy(user_data, iucv->src_name); | ||
164 | high_nmcpy(user_data, iucv->dst_name); | ||
165 | ASCEBC(user_data, sizeof(user_data)); | ||
166 | err = iucv_path_sever(iucv->path, user_data); | ||
167 | iucv_path_free(iucv->path); | ||
168 | iucv->path = NULL; | ||
169 | } | ||
170 | |||
171 | sk->sk_state = IUCV_CLOSED; | ||
172 | sk->sk_state_change(sk); | ||
173 | sk->sk_err = ECONNRESET; | ||
174 | sk->sk_state_change(sk); | ||
175 | |||
176 | skb_queue_purge(&iucv->send_skb_q); | ||
177 | |||
178 | sock_set_flag(sk, SOCK_ZAPPED); | ||
179 | break; | ||
180 | |||
181 | default: | ||
182 | sock_set_flag(sk, SOCK_ZAPPED); | ||
183 | break; | ||
184 | }; | ||
185 | |||
186 | release_sock(sk); | ||
187 | iucv_sock_kill(sk); | ||
188 | } | ||
189 | |||
190 | static void iucv_sock_init(struct sock *sk, struct sock *parent) | ||
191 | { | ||
192 | if (parent) | ||
193 | sk->sk_type = parent->sk_type; | ||
194 | } | ||
195 | |||
196 | static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) | ||
197 | { | ||
198 | struct sock *sk; | ||
199 | |||
200 | sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1); | ||
201 | if (!sk) | ||
202 | return NULL; | ||
203 | |||
204 | sock_init_data(sock, sk); | ||
205 | INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); | ||
206 | skb_queue_head_init(&iucv_sk(sk)->send_skb_q); | ||
207 | iucv_sk(sk)->send_tag = 0; | ||
208 | |||
209 | sk->sk_destruct = iucv_sock_destruct; | ||
210 | sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; | ||
211 | sk->sk_allocation = GFP_DMA; | ||
212 | |||
213 | sock_reset_flag(sk, SOCK_ZAPPED); | ||
214 | |||
215 | sk->sk_protocol = proto; | ||
216 | sk->sk_state = IUCV_OPEN; | ||
217 | |||
218 | iucv_sock_init_timer(sk); | ||
219 | |||
220 | iucv_sock_link(&iucv_sk_list, sk); | ||
221 | return sk; | ||
222 | } | ||
223 | |||
224 | /* Create an IUCV socket */ | ||
225 | static int iucv_sock_create(struct socket *sock, int protocol) | ||
226 | { | ||
227 | struct sock *sk; | ||
228 | |||
229 | if (sock->type != SOCK_STREAM) | ||
230 | return -ESOCKTNOSUPPORT; | ||
231 | |||
232 | sock->state = SS_UNCONNECTED; | ||
233 | sock->ops = &iucv_sock_ops; | ||
234 | |||
235 | sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); | ||
236 | if (!sk) | ||
237 | return -ENOMEM; | ||
238 | |||
239 | iucv_sock_init(sk, NULL); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) | ||
245 | { | ||
246 | write_lock_bh(&l->lock); | ||
247 | sk_add_node(sk, &l->head); | ||
248 | write_unlock_bh(&l->lock); | ||
249 | } | ||
250 | |||
251 | void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) | ||
252 | { | ||
253 | write_lock_bh(&l->lock); | ||
254 | sk_del_node_init(sk); | ||
255 | write_unlock_bh(&l->lock); | ||
256 | } | ||
257 | |||
258 | void iucv_accept_enqueue(struct sock *parent, struct sock *sk) | ||
259 | { | ||
260 | sock_hold(sk); | ||
261 | list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q); | ||
262 | iucv_sk(sk)->parent = parent; | ||
263 | parent->sk_ack_backlog++; | ||
264 | } | ||
265 | |||
266 | void iucv_accept_unlink(struct sock *sk) | ||
267 | { | ||
268 | list_del_init(&iucv_sk(sk)->accept_q); | ||
269 | iucv_sk(sk)->parent->sk_ack_backlog--; | ||
270 | iucv_sk(sk)->parent = NULL; | ||
271 | sock_put(sk); | ||
272 | } | ||
273 | |||
274 | struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) | ||
275 | { | ||
276 | struct iucv_sock *isk, *n; | ||
277 | struct sock *sk; | ||
278 | |||
279 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){ | ||
280 | sk = (struct sock *) isk; | ||
281 | lock_sock(sk); | ||
282 | |||
283 | if (sk->sk_state == IUCV_CLOSED) { | ||
284 | release_sock(sk); | ||
285 | iucv_accept_unlink(sk); | ||
286 | continue; | ||
287 | } | ||
288 | |||
289 | if (sk->sk_state == IUCV_CONNECTED || | ||
290 | sk->sk_state == IUCV_SEVERED || | ||
291 | !newsock) { | ||
292 | iucv_accept_unlink(sk); | ||
293 | if (newsock) | ||
294 | sock_graft(sk, newsock); | ||
295 | |||
296 | if (sk->sk_state == IUCV_SEVERED) | ||
297 | sk->sk_state = IUCV_DISCONN; | ||
298 | |||
299 | release_sock(sk); | ||
300 | return sk; | ||
301 | } | ||
302 | |||
303 | release_sock(sk); | ||
304 | } | ||
305 | return NULL; | ||
306 | } | ||
307 | |||
308 | int iucv_sock_wait_state(struct sock *sk, int state, int state2, | ||
309 | unsigned long timeo) | ||
310 | { | ||
311 | DECLARE_WAITQUEUE(wait, current); | ||
312 | int err = 0; | ||
313 | |||
314 | add_wait_queue(sk->sk_sleep, &wait); | ||
315 | while (sk->sk_state != state && sk->sk_state != state2) { | ||
316 | set_current_state(TASK_INTERRUPTIBLE); | ||
317 | |||
318 | if (!timeo) { | ||
319 | err = -EAGAIN; | ||
320 | break; | ||
321 | } | ||
322 | |||
323 | if (signal_pending(current)) { | ||
324 | err = sock_intr_errno(timeo); | ||
325 | break; | ||
326 | } | ||
327 | |||
328 | release_sock(sk); | ||
329 | timeo = schedule_timeout(timeo); | ||
330 | lock_sock(sk); | ||
331 | |||
332 | err = sock_error(sk); | ||
333 | if (err) | ||
334 | break; | ||
335 | } | ||
336 | set_current_state(TASK_RUNNING); | ||
337 | remove_wait_queue(sk->sk_sleep, &wait); | ||
338 | return err; | ||
339 | } | ||
340 | |||
341 | /* Bind an unbound socket */ | ||
342 | static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, | ||
343 | int addr_len) | ||
344 | { | ||
345 | struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; | ||
346 | struct sock *sk = sock->sk; | ||
347 | struct iucv_sock *iucv; | ||
348 | int err; | ||
349 | |||
350 | /* Verify the input sockaddr */ | ||
351 | if (!addr || addr->sa_family != AF_IUCV) | ||
352 | return -EINVAL; | ||
353 | |||
354 | lock_sock(sk); | ||
355 | if (sk->sk_state != IUCV_OPEN) { | ||
356 | err = -EBADFD; | ||
357 | goto done; | ||
358 | } | ||
359 | |||
360 | write_lock_bh(&iucv_sk_list.lock); | ||
361 | |||
362 | iucv = iucv_sk(sk); | ||
363 | if (__iucv_get_sock_by_name(sa->siucv_name)) { | ||
364 | err = -EADDRINUSE; | ||
365 | goto done_unlock; | ||
366 | } | ||
367 | if (iucv->path) { | ||
368 | err = 0; | ||
369 | goto done_unlock; | ||
370 | } | ||
371 | |||
372 | /* Bind the socket */ | ||
373 | memcpy(iucv->src_name, sa->siucv_name, 8); | ||
374 | |||
375 | /* Copy the user id */ | ||
376 | memcpy(iucv->src_user_id, iucv_userid, 8); | ||
377 | sk->sk_state = IUCV_BOUND; | ||
378 | err = 0; | ||
379 | |||
380 | done_unlock: | ||
381 | /* Release the socket list lock */ | ||
382 | write_unlock_bh(&iucv_sk_list.lock); | ||
383 | done: | ||
384 | release_sock(sk); | ||
385 | return err; | ||
386 | } | ||
387 | |||
388 | /* Automatically bind an unbound socket */ | ||
389 | static int iucv_sock_autobind(struct sock *sk) | ||
390 | { | ||
391 | struct iucv_sock *iucv = iucv_sk(sk); | ||
392 | char query_buffer[80]; | ||
393 | char name[12]; | ||
394 | int err = 0; | ||
395 | |||
396 | /* Set the userid and name */ | ||
397 | cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err); | ||
398 | if (unlikely(err)) | ||
399 | return -EPROTO; | ||
400 | |||
401 | memcpy(iucv->src_user_id, query_buffer, 8); | ||
402 | |||
403 | write_lock_bh(&iucv_sk_list.lock); | ||
404 | |||
405 | sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); | ||
406 | while (__iucv_get_sock_by_name(name)) { | ||
407 | sprintf(name, "%08x", | ||
408 | atomic_inc_return(&iucv_sk_list.autobind_name)); | ||
409 | } | ||
410 | |||
411 | write_unlock_bh(&iucv_sk_list.lock); | ||
412 | |||
413 | memcpy(&iucv->src_name, name, 8); | ||
414 | |||
415 | return err; | ||
416 | } | ||
417 | |||
418 | /* Connect an unconnected socket */ | ||
419 | static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, | ||
420 | int alen, int flags) | ||
421 | { | ||
422 | struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; | ||
423 | struct sock *sk = sock->sk; | ||
424 | struct iucv_sock *iucv; | ||
425 | unsigned char user_data[16]; | ||
426 | int err; | ||
427 | |||
428 | if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) | ||
429 | return -EINVAL; | ||
430 | |||
431 | if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) | ||
432 | return -EBADFD; | ||
433 | |||
434 | if (sk->sk_type != SOCK_STREAM) | ||
435 | return -EINVAL; | ||
436 | |||
437 | iucv = iucv_sk(sk); | ||
438 | |||
439 | if (sk->sk_state == IUCV_OPEN) { | ||
440 | err = iucv_sock_autobind(sk); | ||
441 | if (unlikely(err)) | ||
442 | return err; | ||
443 | } | ||
444 | |||
445 | lock_sock(sk); | ||
446 | |||
447 | /* Set the destination information */ | ||
448 | memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8); | ||
449 | memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8); | ||
450 | |||
451 | high_nmcpy(user_data, sa->siucv_name); | ||
452 | low_nmcpy(user_data, iucv_sk(sk)->src_name); | ||
453 | ASCEBC(user_data, sizeof(user_data)); | ||
454 | |||
455 | iucv = iucv_sk(sk); | ||
456 | /* Create path. */ | ||
457 | iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT, | ||
458 | IPRMDATA, GFP_KERNEL); | ||
459 | err = iucv_path_connect(iucv->path, &af_iucv_handler, | ||
460 | sa->siucv_user_id, NULL, user_data, sk); | ||
461 | if (err) { | ||
462 | iucv_path_free(iucv->path); | ||
463 | iucv->path = NULL; | ||
464 | err = -ECONNREFUSED; | ||
465 | goto done; | ||
466 | } | ||
467 | |||
468 | if (sk->sk_state != IUCV_CONNECTED) { | ||
469 | err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN, | ||
470 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | ||
471 | } | ||
472 | |||
473 | if (sk->sk_state == IUCV_DISCONN) { | ||
474 | release_sock(sk); | ||
475 | return -ECONNREFUSED; | ||
476 | } | ||
477 | done: | ||
478 | release_sock(sk); | ||
479 | return err; | ||
480 | } | ||
481 | |||
482 | /* Move a socket into listening state. */ | ||
483 | static int iucv_sock_listen(struct socket *sock, int backlog) | ||
484 | { | ||
485 | struct sock *sk = sock->sk; | ||
486 | int err; | ||
487 | |||
488 | lock_sock(sk); | ||
489 | |||
490 | err = -EINVAL; | ||
491 | if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM) | ||
492 | goto done; | ||
493 | |||
494 | sk->sk_max_ack_backlog = backlog; | ||
495 | sk->sk_ack_backlog = 0; | ||
496 | sk->sk_state = IUCV_LISTEN; | ||
497 | err = 0; | ||
498 | |||
499 | done: | ||
500 | release_sock(sk); | ||
501 | return err; | ||
502 | } | ||
503 | |||
504 | /* Accept a pending connection */ | ||
505 | static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | ||
506 | int flags) | ||
507 | { | ||
508 | DECLARE_WAITQUEUE(wait, current); | ||
509 | struct sock *sk = sock->sk, *nsk; | ||
510 | long timeo; | ||
511 | int err = 0; | ||
512 | |||
513 | lock_sock(sk); | ||
514 | |||
515 | if (sk->sk_state != IUCV_LISTEN) { | ||
516 | err = -EBADFD; | ||
517 | goto done; | ||
518 | } | ||
519 | |||
520 | timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | ||
521 | |||
522 | /* Wait for an incoming connection */ | ||
523 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | ||
524 | while (!(nsk = iucv_accept_dequeue(sk, newsock))){ | ||
525 | set_current_state(TASK_INTERRUPTIBLE); | ||
526 | if (!timeo) { | ||
527 | err = -EAGAIN; | ||
528 | break; | ||
529 | } | ||
530 | |||
531 | release_sock(sk); | ||
532 | timeo = schedule_timeout(timeo); | ||
533 | lock_sock(sk); | ||
534 | |||
535 | if (sk->sk_state != IUCV_LISTEN) { | ||
536 | err = -EBADFD; | ||
537 | break; | ||
538 | } | ||
539 | |||
540 | if (signal_pending(current)) { | ||
541 | err = sock_intr_errno(timeo); | ||
542 | break; | ||
543 | } | ||
544 | } | ||
545 | |||
546 | set_current_state(TASK_RUNNING); | ||
547 | remove_wait_queue(sk->sk_sleep, &wait); | ||
548 | |||
549 | if (err) | ||
550 | goto done; | ||
551 | |||
552 | newsock->state = SS_CONNECTED; | ||
553 | |||
554 | done: | ||
555 | release_sock(sk); | ||
556 | return err; | ||
557 | } | ||
558 | |||
559 | static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, | ||
560 | int *len, int peer) | ||
561 | { | ||
562 | struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; | ||
563 | struct sock *sk = sock->sk; | ||
564 | |||
565 | addr->sa_family = AF_IUCV; | ||
566 | *len = sizeof(struct sockaddr_iucv); | ||
567 | |||
568 | if (peer) { | ||
569 | memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8); | ||
570 | memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8); | ||
571 | } else { | ||
572 | memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8); | ||
573 | memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8); | ||
574 | } | ||
575 | memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); | ||
576 | memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); | ||
577 | memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); | ||
578 | |||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | ||
583 | struct msghdr *msg, size_t len) | ||
584 | { | ||
585 | struct sock *sk = sock->sk; | ||
586 | struct iucv_sock *iucv = iucv_sk(sk); | ||
587 | struct sk_buff *skb; | ||
588 | struct iucv_message txmsg; | ||
589 | int err; | ||
590 | |||
591 | err = sock_error(sk); | ||
592 | if (err) | ||
593 | return err; | ||
594 | |||
595 | if (msg->msg_flags & MSG_OOB) | ||
596 | return -EOPNOTSUPP; | ||
597 | |||
598 | lock_sock(sk); | ||
599 | |||
600 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | ||
601 | err = -EPIPE; | ||
602 | goto out; | ||
603 | } | ||
604 | |||
605 | if (sk->sk_state == IUCV_CONNECTED){ | ||
606 | if(!(skb = sock_alloc_send_skb(sk, len, | ||
607 | msg->msg_flags & MSG_DONTWAIT, | ||
608 | &err))) | ||
609 | return err; | ||
610 | |||
611 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){ | ||
612 | err = -EFAULT; | ||
613 | goto fail; | ||
614 | } | ||
615 | |||
616 | txmsg.class = 0; | ||
617 | txmsg.tag = iucv->send_tag++; | ||
618 | memcpy(skb->cb, &txmsg.tag, 4); | ||
619 | skb_queue_tail(&iucv->send_skb_q, skb); | ||
620 | err = iucv_message_send(iucv->path, &txmsg, 0, 0, | ||
621 | (void *) skb->data, skb->len); | ||
622 | if (err) { | ||
623 | if (err == 3) | ||
624 | printk(KERN_ERR "AF_IUCV msg limit exceeded\n"); | ||
625 | skb_unlink(skb, &iucv->send_skb_q); | ||
626 | err = -EPIPE; | ||
627 | goto fail; | ||
628 | } | ||
629 | |||
630 | } else { | ||
631 | err = -ENOTCONN; | ||
632 | goto out; | ||
633 | } | ||
634 | |||
635 | release_sock(sk); | ||
636 | return len; | ||
637 | |||
638 | fail: | ||
639 | kfree_skb(skb); | ||
640 | out: | ||
641 | release_sock(sk); | ||
642 | return err; | ||
643 | } | ||
644 | |||
645 | static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
646 | struct msghdr *msg, size_t len, int flags) | ||
647 | { | ||
648 | int noblock = flags & MSG_DONTWAIT; | ||
649 | struct sock *sk = sock->sk; | ||
650 | int target, copied = 0; | ||
651 | struct sk_buff *skb; | ||
652 | int err = 0; | ||
653 | |||
654 | if (flags & (MSG_OOB)) | ||
655 | return -EOPNOTSUPP; | ||
656 | |||
657 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | ||
658 | |||
659 | skb = skb_recv_datagram(sk, flags, noblock, &err); | ||
660 | if (!skb) { | ||
661 | if (sk->sk_shutdown & RCV_SHUTDOWN) | ||
662 | return 0; | ||
663 | return err; | ||
664 | } | ||
665 | |||
666 | copied = min_t(unsigned int, skb->len, len); | ||
667 | |||
668 | if (memcpy_toiovec(msg->msg_iov, skb->data, copied)) { | ||
669 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
670 | if (copied == 0) | ||
671 | return -EFAULT; | ||
672 | } | ||
673 | |||
674 | len -= copied; | ||
675 | |||
676 | /* Mark read part of skb as used */ | ||
677 | if (!(flags & MSG_PEEK)) { | ||
678 | skb_pull(skb, copied); | ||
679 | |||
680 | if (skb->len) { | ||
681 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
682 | goto done; | ||
683 | } | ||
684 | |||
685 | kfree_skb(skb); | ||
686 | } else | ||
687 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
688 | |||
689 | done: | ||
690 | return err ? : copied; | ||
691 | } | ||
692 | |||
693 | static inline unsigned int iucv_accept_poll(struct sock *parent) | ||
694 | { | ||
695 | struct iucv_sock *isk, *n; | ||
696 | struct sock *sk; | ||
697 | |||
698 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){ | ||
699 | sk = (struct sock *) isk; | ||
700 | |||
701 | if (sk->sk_state == IUCV_CONNECTED) | ||
702 | return POLLIN | POLLRDNORM; | ||
703 | } | ||
704 | |||
705 | return 0; | ||
706 | } | ||
707 | |||
708 | unsigned int iucv_sock_poll(struct file *file, struct socket *sock, | ||
709 | poll_table *wait) | ||
710 | { | ||
711 | struct sock *sk = sock->sk; | ||
712 | unsigned int mask = 0; | ||
713 | |||
714 | poll_wait(file, sk->sk_sleep, wait); | ||
715 | |||
716 | if (sk->sk_state == IUCV_LISTEN) | ||
717 | return iucv_accept_poll(sk); | ||
718 | |||
719 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | ||
720 | mask |= POLLERR; | ||
721 | |||
722 | if (sk->sk_shutdown & RCV_SHUTDOWN) | ||
723 | mask |= POLLRDHUP; | ||
724 | |||
725 | if (sk->sk_shutdown == SHUTDOWN_MASK) | ||
726 | mask |= POLLHUP; | ||
727 | |||
728 | if (!skb_queue_empty(&sk->sk_receive_queue) || | ||
729 | (sk->sk_shutdown & RCV_SHUTDOWN)) | ||
730 | mask |= POLLIN | POLLRDNORM; | ||
731 | |||
732 | if (sk->sk_state == IUCV_CLOSED) | ||
733 | mask |= POLLHUP; | ||
734 | |||
735 | if (sock_writeable(sk)) | ||
736 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | ||
737 | else | ||
738 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | ||
739 | |||
740 | return mask; | ||
741 | } | ||
742 | |||
743 | static int iucv_sock_shutdown(struct socket *sock, int how) | ||
744 | { | ||
745 | struct sock *sk = sock->sk; | ||
746 | struct iucv_sock *iucv = iucv_sk(sk); | ||
747 | struct iucv_message txmsg; | ||
748 | int err = 0; | ||
749 | u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; | ||
750 | |||
751 | how++; | ||
752 | |||
753 | if ((how & ~SHUTDOWN_MASK) || !how) | ||
754 | return -EINVAL; | ||
755 | |||
756 | lock_sock(sk); | ||
757 | switch(sk->sk_state) { | ||
758 | case IUCV_CLOSED: | ||
759 | err = -ENOTCONN; | ||
760 | goto fail; | ||
761 | |||
762 | default: | ||
763 | sk->sk_shutdown |= how; | ||
764 | break; | ||
765 | } | ||
766 | |||
767 | if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { | ||
768 | txmsg.class = 0; | ||
769 | txmsg.tag = 0; | ||
770 | err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, | ||
771 | (void *) prmmsg, 8); | ||
772 | if (err) { | ||
773 | switch(err) { | ||
774 | case 1: | ||
775 | err = -ENOTCONN; | ||
776 | break; | ||
777 | case 2: | ||
778 | err = -ECONNRESET; | ||
779 | break; | ||
780 | default: | ||
781 | err = -ENOTCONN; | ||
782 | break; | ||
783 | } | ||
784 | } | ||
785 | } | ||
786 | |||
787 | if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { | ||
788 | err = iucv_path_quiesce(iucv_sk(sk)->path, NULL); | ||
789 | if (err) | ||
790 | err = -ENOTCONN; | ||
791 | |||
792 | skb_queue_purge(&sk->sk_receive_queue); | ||
793 | } | ||
794 | |||
795 | /* Wake up anyone sleeping in poll */ | ||
796 | sk->sk_state_change(sk); | ||
797 | |||
798 | fail: | ||
799 | release_sock(sk); | ||
800 | return err; | ||
801 | } | ||
802 | |||
803 | static int iucv_sock_release(struct socket *sock) | ||
804 | { | ||
805 | struct sock *sk = sock->sk; | ||
806 | int err = 0; | ||
807 | |||
808 | if (!sk) | ||
809 | return 0; | ||
810 | |||
811 | iucv_sock_close(sk); | ||
812 | |||
813 | /* Unregister with IUCV base support */ | ||
814 | if (iucv_sk(sk)->path) { | ||
815 | iucv_path_sever(iucv_sk(sk)->path, NULL); | ||
816 | iucv_path_free(iucv_sk(sk)->path); | ||
817 | iucv_sk(sk)->path = NULL; | ||
818 | } | ||
819 | |||
820 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime){ | ||
821 | lock_sock(sk); | ||
822 | err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, | ||
823 | sk->sk_lingertime); | ||
824 | release_sock(sk); | ||
825 | } | ||
826 | |||
827 | sock_orphan(sk); | ||
828 | iucv_sock_kill(sk); | ||
829 | return err; | ||
830 | } | ||
831 | |||
832 | /* Callback wrappers - called from iucv base support */ | ||
833 | static int iucv_callback_connreq(struct iucv_path *path, | ||
834 | u8 ipvmid[8], u8 ipuser[16]) | ||
835 | { | ||
836 | unsigned char user_data[16]; | ||
837 | unsigned char nuser_data[16]; | ||
838 | unsigned char src_name[8]; | ||
839 | struct hlist_node *node; | ||
840 | struct sock *sk, *nsk; | ||
841 | struct iucv_sock *iucv, *niucv; | ||
842 | int err; | ||
843 | |||
844 | memcpy(src_name, ipuser, 8); | ||
845 | EBCASC(src_name, 8); | ||
846 | /* Find out if this path belongs to af_iucv. */ | ||
847 | read_lock(&iucv_sk_list.lock); | ||
848 | iucv = NULL; | ||
849 | sk_for_each(sk, node, &iucv_sk_list.head) | ||
850 | if (sk->sk_state == IUCV_LISTEN && | ||
851 | !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { | ||
852 | /* | ||
853 | * Found a listening socket with | ||
854 | * src_name == ipuser[0-7]. | ||
855 | */ | ||
856 | iucv = iucv_sk(sk); | ||
857 | break; | ||
858 | } | ||
859 | read_unlock(&iucv_sk_list.lock); | ||
860 | if (!iucv) | ||
861 | /* No socket found, not one of our paths. */ | ||
862 | return -EINVAL; | ||
863 | |||
864 | bh_lock_sock(sk); | ||
865 | |||
866 | /* Check if parent socket is listening */ | ||
867 | low_nmcpy(user_data, iucv->src_name); | ||
868 | high_nmcpy(user_data, iucv->dst_name); | ||
869 | ASCEBC(user_data, sizeof(user_data)); | ||
870 | if (sk->sk_state != IUCV_LISTEN) { | ||
871 | err = iucv_path_sever(path, user_data); | ||
872 | goto fail; | ||
873 | } | ||
874 | |||
875 | /* Check for backlog size */ | ||
876 | if (sk_acceptq_is_full(sk)) { | ||
877 | err = iucv_path_sever(path, user_data); | ||
878 | goto fail; | ||
879 | } | ||
880 | |||
881 | /* Create the new socket */ | ||
882 | nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); | ||
883 | if (!nsk){ | ||
884 | err = iucv_path_sever(path, user_data); | ||
885 | goto fail; | ||
886 | } | ||
887 | |||
888 | niucv = iucv_sk(nsk); | ||
889 | iucv_sock_init(nsk, sk); | ||
890 | |||
891 | /* Set the new iucv_sock */ | ||
892 | memcpy(niucv->dst_name, ipuser + 8, 8); | ||
893 | EBCASC(niucv->dst_name, 8); | ||
894 | memcpy(niucv->dst_user_id, ipvmid, 8); | ||
895 | memcpy(niucv->src_name, iucv->src_name, 8); | ||
896 | memcpy(niucv->src_user_id, iucv->src_user_id, 8); | ||
897 | niucv->path = path; | ||
898 | |||
899 | /* Call iucv_accept */ | ||
900 | high_nmcpy(nuser_data, ipuser + 8); | ||
901 | memcpy(nuser_data + 8, niucv->src_name, 8); | ||
902 | ASCEBC(nuser_data + 8, 8); | ||
903 | |||
904 | path->msglim = IUCV_QUEUELEN_DEFAULT; | ||
905 | err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); | ||
906 | if (err){ | ||
907 | err = iucv_path_sever(path, user_data); | ||
908 | goto fail; | ||
909 | } | ||
910 | |||
911 | iucv_accept_enqueue(sk, nsk); | ||
912 | |||
913 | /* Wake up accept */ | ||
914 | nsk->sk_state = IUCV_CONNECTED; | ||
915 | sk->sk_data_ready(sk, 1); | ||
916 | err = 0; | ||
917 | fail: | ||
918 | bh_unlock_sock(sk); | ||
919 | return 0; | ||
920 | } | ||
921 | |||
922 | static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) | ||
923 | { | ||
924 | struct sock *sk = path->private; | ||
925 | |||
926 | sk->sk_state = IUCV_CONNECTED; | ||
927 | sk->sk_state_change(sk); | ||
928 | } | ||
929 | |||
930 | static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | ||
931 | { | ||
932 | struct sock *sk = path->private; | ||
933 | struct sk_buff *skb; | ||
934 | int rc; | ||
935 | |||
936 | if (sk->sk_shutdown & RCV_SHUTDOWN) | ||
937 | return; | ||
938 | |||
939 | skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); | ||
940 | if (!skb) { | ||
941 | iucv_message_reject(path, msg); | ||
942 | return; | ||
943 | } | ||
944 | |||
945 | if (msg->flags & IPRMDATA) { | ||
946 | skb->data = NULL; | ||
947 | skb->len = 0; | ||
948 | } else { | ||
949 | rc = iucv_message_receive(path, msg, 0, skb->data, | ||
950 | msg->length, NULL); | ||
951 | if (rc) { | ||
952 | kfree_skb(skb); | ||
953 | return; | ||
954 | } | ||
955 | |||
956 | skb->h.raw = skb->data; | ||
957 | skb->nh.raw = skb->data; | ||
958 | skb->len = msg->length; | ||
959 | } | ||
960 | |||
961 | if (sock_queue_rcv_skb(sk, skb)) | ||
962 | kfree_skb(skb); | ||
963 | } | ||
964 | |||
965 | static void iucv_callback_txdone(struct iucv_path *path, | ||
966 | struct iucv_message *msg) | ||
967 | { | ||
968 | struct sock *sk = path->private; | ||
969 | struct sk_buff *this; | ||
970 | struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; | ||
971 | struct sk_buff *list_skb = list->next; | ||
972 | unsigned long flags; | ||
973 | |||
974 | spin_lock_irqsave(&list->lock, flags); | ||
975 | |||
976 | do { | ||
977 | this = list_skb; | ||
978 | list_skb = list_skb->next; | ||
979 | } while (memcmp(&msg->tag, this->cb, 4)); | ||
980 | |||
981 | spin_unlock_irqrestore(&list->lock, flags); | ||
982 | |||
983 | skb_unlink(this, &iucv_sk(sk)->send_skb_q); | ||
984 | kfree_skb(this); | ||
985 | } | ||
986 | |||
987 | static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) | ||
988 | { | ||
989 | struct sock *sk = path->private; | ||
990 | |||
991 | if (!list_empty(&iucv_sk(sk)->accept_q)) | ||
992 | sk->sk_state = IUCV_SEVERED; | ||
993 | else | ||
994 | sk->sk_state = IUCV_DISCONN; | ||
995 | |||
996 | sk->sk_state_change(sk); | ||
997 | } | ||
998 | |||
999 | static struct proto_ops iucv_sock_ops = { | ||
1000 | .family = PF_IUCV, | ||
1001 | .owner = THIS_MODULE, | ||
1002 | .release = iucv_sock_release, | ||
1003 | .bind = iucv_sock_bind, | ||
1004 | .connect = iucv_sock_connect, | ||
1005 | .listen = iucv_sock_listen, | ||
1006 | .accept = iucv_sock_accept, | ||
1007 | .getname = iucv_sock_getname, | ||
1008 | .sendmsg = iucv_sock_sendmsg, | ||
1009 | .recvmsg = iucv_sock_recvmsg, | ||
1010 | .poll = iucv_sock_poll, | ||
1011 | .ioctl = sock_no_ioctl, | ||
1012 | .mmap = sock_no_mmap, | ||
1013 | .socketpair = sock_no_socketpair, | ||
1014 | .shutdown = iucv_sock_shutdown, | ||
1015 | .setsockopt = sock_no_setsockopt, | ||
1016 | .getsockopt = sock_no_getsockopt | ||
1017 | }; | ||
1018 | |||
1019 | static struct net_proto_family iucv_sock_family_ops = { | ||
1020 | .family = AF_IUCV, | ||
1021 | .owner = THIS_MODULE, | ||
1022 | .create = iucv_sock_create, | ||
1023 | }; | ||
1024 | |||
1025 | static int afiucv_init(void) | ||
1026 | { | ||
1027 | int err; | ||
1028 | |||
1029 | if (!MACHINE_IS_VM) { | ||
1030 | printk(KERN_ERR "AF_IUCV connection needs VM as base\n"); | ||
1031 | err = -EPROTONOSUPPORT; | ||
1032 | goto out; | ||
1033 | } | ||
1034 | cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); | ||
1035 | if (unlikely(err)) { | ||
1036 | printk(KERN_ERR "AF_IUCV needs the VM userid\n"); | ||
1037 | err = -EPROTONOSUPPORT; | ||
1038 | goto out; | ||
1039 | } | ||
1040 | |||
1041 | err = iucv_register(&af_iucv_handler, 0); | ||
1042 | if (err) | ||
1043 | goto out; | ||
1044 | err = proto_register(&iucv_proto, 0); | ||
1045 | if (err) | ||
1046 | goto out_iucv; | ||
1047 | err = sock_register(&iucv_sock_family_ops); | ||
1048 | if (err) | ||
1049 | goto out_proto; | ||
1050 | printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n"); | ||
1051 | return 0; | ||
1052 | |||
1053 | out_proto: | ||
1054 | proto_unregister(&iucv_proto); | ||
1055 | out_iucv: | ||
1056 | iucv_unregister(&af_iucv_handler, 0); | ||
1057 | out: | ||
1058 | return err; | ||
1059 | } | ||
1060 | |||
1061 | static void __exit afiucv_exit(void) | ||
1062 | { | ||
1063 | sock_unregister(PF_IUCV); | ||
1064 | proto_unregister(&iucv_proto); | ||
1065 | iucv_unregister(&af_iucv_handler, 0); | ||
1066 | |||
1067 | printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n"); | ||
1068 | } | ||
1069 | |||
1070 | module_init(afiucv_init); | ||
1071 | module_exit(afiucv_exit); | ||
1072 | |||
1073 | MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); | ||
1074 | MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); | ||
1075 | MODULE_VERSION(VERSION); | ||
1076 | MODULE_LICENSE("GPL"); | ||
1077 | MODULE_ALIAS_NETPROTO(PF_IUCV); | ||
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c new file mode 100644 index 000000000000..1b10d576f222 --- /dev/null +++ b/net/iucv/iucv.c | |||
@@ -0,0 +1,1619 @@ | |||
1 | /* | ||
2 | * IUCV base infrastructure. | ||
3 | * | ||
4 | * Copyright 2001, 2006 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): | ||
6 | * Original source: | ||
7 | * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 | ||
8 | * Xenia Tkatschow (xenia@us.ibm.com) | ||
9 | * 2Gb awareness and general cleanup: | ||
10 | * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | ||
11 | * Rewritten for af_iucv: | ||
12 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
13 | * | ||
14 | * Documentation used: | ||
15 | * The original source | ||
16 | * CP Programming Service, IBM document # SC24-5760 | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or modify | ||
19 | * it under the terms of the GNU General Public License as published by | ||
20 | * the Free Software Foundation; either version 2, or (at your option) | ||
21 | * any later version. | ||
22 | * | ||
23 | * This program is distributed in the hope that it will be useful, | ||
24 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
25 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
26 | * GNU General Public License for more details. | ||
27 | * | ||
28 | * You should have received a copy of the GNU General Public License | ||
29 | * along with this program; if not, write to the Free Software | ||
30 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
31 | */ | ||
32 | |||
33 | #include <linux/module.h> | ||
34 | #include <linux/moduleparam.h> | ||
35 | |||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/interrupt.h> | ||
41 | #include <linux/list.h> | ||
42 | #include <linux/errno.h> | ||
43 | #include <linux/err.h> | ||
44 | #include <linux/device.h> | ||
45 | #include <linux/cpu.h> | ||
46 | #include <net/iucv/iucv.h> | ||
47 | #include <asm/atomic.h> | ||
48 | #include <asm/ebcdic.h> | ||
49 | #include <asm/io.h> | ||
50 | #include <asm/s390_ext.h> | ||
51 | #include <asm/s390_rdev.h> | ||
52 | #include <asm/smp.h> | ||
53 | |||
54 | /* | ||
55 | * FLAGS: | ||
56 | * All flags are defined in the field IPFLAGS1 of each function | ||
57 | * and can be found in CP Programming Services. | ||
58 | * IPSRCCLS - Indicates you have specified a source class. | ||
59 | * IPTRGCLS - Indicates you have specified a target class. | ||
60 | * IPFGPID - Indicates you have specified a pathid. | ||
61 | * IPFGMID - Indicates you have specified a message ID. | ||
62 | * IPNORPY - Indicates a one-way message. No reply expected. | ||
63 | * IPALL - Indicates that all paths are affected. | ||
64 | */ | ||
65 | #define IUCV_IPSRCCLS 0x01 | ||
66 | #define IUCV_IPTRGCLS 0x01 | ||
67 | #define IUCV_IPFGPID 0x02 | ||
68 | #define IUCV_IPFGMID 0x04 | ||
69 | #define IUCV_IPNORPY 0x10 | ||
70 | #define IUCV_IPALL 0x80 | ||
71 | |||
72 | static int iucv_bus_match (struct device *dev, struct device_driver *drv) | ||
73 | { | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | struct bus_type iucv_bus = { | ||
78 | .name = "iucv", | ||
79 | .match = iucv_bus_match, | ||
80 | }; | ||
81 | |||
82 | struct device *iucv_root; | ||
83 | static int iucv_available; | ||
84 | |||
85 | /* General IUCV interrupt structure */ | ||
86 | struct iucv_irq_data { | ||
87 | u16 ippathid; | ||
88 | u8 ipflags1; | ||
89 | u8 iptype; | ||
90 | u32 res2[8]; | ||
91 | }; | ||
92 | |||
93 | struct iucv_work { | ||
94 | struct list_head list; | ||
95 | struct iucv_irq_data data; | ||
96 | }; | ||
97 | |||
98 | static LIST_HEAD(iucv_work_queue); | ||
99 | static DEFINE_SPINLOCK(iucv_work_lock); | ||
100 | |||
101 | static struct iucv_irq_data *iucv_irq_data; | ||
102 | static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; | ||
103 | static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; | ||
104 | |||
105 | static void iucv_tasklet_handler(unsigned long); | ||
106 | static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_handler,0); | ||
107 | |||
108 | enum iucv_command_codes { | ||
109 | IUCV_QUERY = 0, | ||
110 | IUCV_RETRIEVE_BUFFER = 2, | ||
111 | IUCV_SEND = 4, | ||
112 | IUCV_RECEIVE = 5, | ||
113 | IUCV_REPLY = 6, | ||
114 | IUCV_REJECT = 8, | ||
115 | IUCV_PURGE = 9, | ||
116 | IUCV_ACCEPT = 10, | ||
117 | IUCV_CONNECT = 11, | ||
118 | IUCV_DECLARE_BUFFER = 12, | ||
119 | IUCV_QUIESCE = 13, | ||
120 | IUCV_RESUME = 14, | ||
121 | IUCV_SEVER = 15, | ||
122 | IUCV_SETMASK = 16, | ||
123 | }; | ||
124 | |||
125 | /* | ||
126 | * Error messages that are used with the iucv_sever function. They get | ||
127 | * converted to EBCDIC. | ||
128 | */ | ||
129 | static char iucv_error_no_listener[16] = "NO LISTENER"; | ||
130 | static char iucv_error_no_memory[16] = "NO MEMORY"; | ||
131 | static char iucv_error_pathid[16] = "INVALID PATHID"; | ||
132 | |||
133 | /* | ||
134 | * iucv_handler_list: List of registered handlers. | ||
135 | */ | ||
136 | static LIST_HEAD(iucv_handler_list); | ||
137 | |||
138 | /* | ||
139 | * iucv_path_table: an array of iucv_path structures. | ||
140 | */ | ||
141 | static struct iucv_path **iucv_path_table; | ||
142 | static unsigned long iucv_max_pathid; | ||
143 | |||
144 | /* | ||
145 | * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table | ||
146 | */ | ||
147 | static DEFINE_SPINLOCK(iucv_table_lock); | ||
148 | |||
149 | /* | ||
150 | * iucv_tasklet_cpu: contains the number of the cpu executing the tasklet. | ||
151 | * Needed for iucv_path_sever called from tasklet. | ||
152 | */ | ||
153 | static int iucv_tasklet_cpu = -1; | ||
154 | |||
155 | /* | ||
156 | * Mutex and wait queue for iucv_register/iucv_unregister. | ||
157 | */ | ||
158 | static DEFINE_MUTEX(iucv_register_mutex); | ||
159 | |||
160 | /* | ||
161 | * Counter for number of non-smp capable handlers. | ||
162 | */ | ||
163 | static int iucv_nonsmp_handler; | ||
164 | |||
165 | /* | ||
166 | * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, | ||
167 | * iucv_path_quiesce and iucv_path_sever. | ||
168 | */ | ||
169 | struct iucv_cmd_control { | ||
170 | u16 ippathid; | ||
171 | u8 ipflags1; | ||
172 | u8 iprcode; | ||
173 | u16 ipmsglim; | ||
174 | u16 res1; | ||
175 | u8 ipvmid[8]; | ||
176 | u8 ipuser[16]; | ||
177 | u8 iptarget[8]; | ||
178 | } __attribute__ ((packed,aligned(8))); | ||
179 | |||
180 | /* | ||
181 | * Data in parameter list iucv structure. Used by iucv_message_send, | ||
182 | * iucv_message_send2way and iucv_message_reply. | ||
183 | */ | ||
184 | struct iucv_cmd_dpl { | ||
185 | u16 ippathid; | ||
186 | u8 ipflags1; | ||
187 | u8 iprcode; | ||
188 | u32 ipmsgid; | ||
189 | u32 iptrgcls; | ||
190 | u8 iprmmsg[8]; | ||
191 | u32 ipsrccls; | ||
192 | u32 ipmsgtag; | ||
193 | u32 ipbfadr2; | ||
194 | u32 ipbfln2f; | ||
195 | u32 res; | ||
196 | } __attribute__ ((packed,aligned(8))); | ||
197 | |||
198 | /* | ||
199 | * Data in buffer iucv structure. Used by iucv_message_receive, | ||
200 | * iucv_message_reject, iucv_message_send, iucv_message_send2way | ||
201 | * and iucv_declare_cpu. | ||
202 | */ | ||
203 | struct iucv_cmd_db { | ||
204 | u16 ippathid; | ||
205 | u8 ipflags1; | ||
206 | u8 iprcode; | ||
207 | u32 ipmsgid; | ||
208 | u32 iptrgcls; | ||
209 | u32 ipbfadr1; | ||
210 | u32 ipbfln1f; | ||
211 | u32 ipsrccls; | ||
212 | u32 ipmsgtag; | ||
213 | u32 ipbfadr2; | ||
214 | u32 ipbfln2f; | ||
215 | u32 res; | ||
216 | } __attribute__ ((packed,aligned(8))); | ||
217 | |||
218 | /* | ||
219 | * Purge message iucv structure. Used by iucv_message_purge. | ||
220 | */ | ||
221 | struct iucv_cmd_purge { | ||
222 | u16 ippathid; | ||
223 | u8 ipflags1; | ||
224 | u8 iprcode; | ||
225 | u32 ipmsgid; | ||
226 | u8 ipaudit[3]; | ||
227 | u8 res1[5]; | ||
228 | u32 res2; | ||
229 | u32 ipsrccls; | ||
230 | u32 ipmsgtag; | ||
231 | u32 res3[3]; | ||
232 | } __attribute__ ((packed,aligned(8))); | ||
233 | |||
234 | /* | ||
235 | * Set mask iucv structure. Used by iucv_enable_cpu. | ||
236 | */ | ||
237 | struct iucv_cmd_set_mask { | ||
238 | u8 ipmask; | ||
239 | u8 res1[2]; | ||
240 | u8 iprcode; | ||
241 | u32 res2[9]; | ||
242 | } __attribute__ ((packed,aligned(8))); | ||
243 | |||
244 | union iucv_param { | ||
245 | struct iucv_cmd_control ctrl; | ||
246 | struct iucv_cmd_dpl dpl; | ||
247 | struct iucv_cmd_db db; | ||
248 | struct iucv_cmd_purge purge; | ||
249 | struct iucv_cmd_set_mask set_mask; | ||
250 | }; | ||
251 | |||
252 | /* | ||
253 | * Anchor for per-cpu IUCV command parameter block. | ||
254 | */ | ||
255 | static union iucv_param *iucv_param; | ||
256 | |||
257 | /** | ||
258 | * iucv_call_b2f0 | ||
259 | * @code: identifier of IUCV call to CP. | ||
260 | * @parm: pointer to a struct iucv_parm block | ||
261 | * | ||
262 | * Calls CP to execute IUCV commands. | ||
263 | * | ||
264 | * Returns the result of the CP IUCV call. | ||
265 | */ | ||
266 | static inline int iucv_call_b2f0(int command, union iucv_param *parm) | ||
267 | { | ||
268 | register unsigned long reg0 asm ("0"); | ||
269 | register unsigned long reg1 asm ("1"); | ||
270 | int ccode; | ||
271 | |||
272 | reg0 = command; | ||
273 | reg1 = virt_to_phys(parm); | ||
274 | asm volatile( | ||
275 | " .long 0xb2f01000\n" | ||
276 | " ipm %0\n" | ||
277 | " srl %0,28\n" | ||
278 | : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) | ||
279 | : "m" (*parm) : "cc"); | ||
280 | return (ccode == 1) ? parm->ctrl.iprcode : ccode; | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * iucv_query_maxconn | ||
285 | * | ||
286 | * Determines the maximum number of connections that may be established. | ||
287 | * | ||
288 | * Returns the maximum number of connections or -EPERM is IUCV is not | ||
289 | * available. | ||
290 | */ | ||
291 | static int iucv_query_maxconn(void) | ||
292 | { | ||
293 | register unsigned long reg0 asm ("0"); | ||
294 | register unsigned long reg1 asm ("1"); | ||
295 | void *param; | ||
296 | int ccode; | ||
297 | |||
298 | param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); | ||
299 | if (!param) | ||
300 | return -ENOMEM; | ||
301 | reg0 = IUCV_QUERY; | ||
302 | reg1 = (unsigned long) param; | ||
303 | asm volatile ( | ||
304 | " .long 0xb2f01000\n" | ||
305 | " ipm %0\n" | ||
306 | " srl %0,28\n" | ||
307 | : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); | ||
308 | if (ccode == 0) | ||
309 | iucv_max_pathid = reg0; | ||
310 | kfree(param); | ||
311 | return ccode ? -EPERM : 0; | ||
312 | } | ||
313 | |||
314 | /** | ||
315 | * iucv_allow_cpu | ||
316 | * @data: unused | ||
317 | * | ||
318 | * Allow iucv interrupts on this cpu. | ||
319 | */ | ||
320 | static void iucv_allow_cpu(void *data) | ||
321 | { | ||
322 | int cpu = smp_processor_id(); | ||
323 | union iucv_param *parm; | ||
324 | |||
325 | /* | ||
326 | * Enable all iucv interrupts. | ||
327 | * ipmask contains bits for the different interrupts | ||
328 | * 0x80 - Flag to allow nonpriority message pending interrupts | ||
329 | * 0x40 - Flag to allow priority message pending interrupts | ||
330 | * 0x20 - Flag to allow nonpriority message completion interrupts | ||
331 | * 0x10 - Flag to allow priority message completion interrupts | ||
332 | * 0x08 - Flag to allow IUCV control interrupts | ||
333 | */ | ||
334 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
335 | memset(parm, 0, sizeof(union iucv_param)); | ||
336 | parm->set_mask.ipmask = 0xf8; | ||
337 | iucv_call_b2f0(IUCV_SETMASK, parm); | ||
338 | |||
339 | /* Set indication that iucv interrupts are allowed for this cpu. */ | ||
340 | cpu_set(cpu, iucv_irq_cpumask); | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * iucv_block_cpu | ||
345 | * @data: unused | ||
346 | * | ||
347 | * Block iucv interrupts on this cpu. | ||
348 | */ | ||
349 | static void iucv_block_cpu(void *data) | ||
350 | { | ||
351 | int cpu = smp_processor_id(); | ||
352 | union iucv_param *parm; | ||
353 | |||
354 | /* Disable all iucv interrupts. */ | ||
355 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
356 | memset(parm, 0, sizeof(union iucv_param)); | ||
357 | iucv_call_b2f0(IUCV_SETMASK, parm); | ||
358 | |||
359 | /* Clear indication that iucv interrupts are allowed for this cpu. */ | ||
360 | cpu_clear(cpu, iucv_irq_cpumask); | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * iucv_declare_cpu | ||
365 | * @data: unused | ||
366 | * | ||
367 | * Declare a interupt buffer on this cpu. | ||
368 | */ | ||
369 | static void iucv_declare_cpu(void *data) | ||
370 | { | ||
371 | int cpu = smp_processor_id(); | ||
372 | union iucv_param *parm; | ||
373 | int rc; | ||
374 | |||
375 | if (cpu_isset(cpu, iucv_buffer_cpumask)) | ||
376 | return; | ||
377 | |||
378 | /* Declare interrupt buffer. */ | ||
379 | parm = percpu_ptr(iucv_param, cpu); | ||
380 | memset(parm, 0, sizeof(union iucv_param)); | ||
381 | parm->db.ipbfadr1 = virt_to_phys(percpu_ptr(iucv_irq_data, cpu)); | ||
382 | rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); | ||
383 | if (rc) { | ||
384 | char *err = "Unknown"; | ||
385 | switch(rc) { | ||
386 | case 0x03: | ||
387 | err = "Directory error"; | ||
388 | break; | ||
389 | case 0x0a: | ||
390 | err = "Invalid length"; | ||
391 | break; | ||
392 | case 0x13: | ||
393 | err = "Buffer already exists"; | ||
394 | break; | ||
395 | case 0x3e: | ||
396 | err = "Buffer overlap"; | ||
397 | break; | ||
398 | case 0x5c: | ||
399 | err = "Paging or storage error"; | ||
400 | break; | ||
401 | } | ||
402 | printk(KERN_WARNING "iucv_register: iucv_declare_buffer " | ||
403 | "on cpu %i returned error 0x%02x (%s)\n", cpu, rc, err); | ||
404 | return; | ||
405 | } | ||
406 | |||
407 | /* Set indication that an iucv buffer exists for this cpu. */ | ||
408 | cpu_set(cpu, iucv_buffer_cpumask); | ||
409 | |||
410 | if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask)) | ||
411 | /* Enable iucv interrupts on this cpu. */ | ||
412 | iucv_allow_cpu(NULL); | ||
413 | else | ||
414 | /* Disable iucv interrupts on this cpu. */ | ||
415 | iucv_block_cpu(NULL); | ||
416 | } | ||
417 | |||
418 | /** | ||
419 | * iucv_retrieve_cpu | ||
420 | * @data: unused | ||
421 | * | ||
422 | * Retrieve interrupt buffer on this cpu. | ||
423 | */ | ||
424 | static void iucv_retrieve_cpu(void *data) | ||
425 | { | ||
426 | int cpu = smp_processor_id(); | ||
427 | union iucv_param *parm; | ||
428 | |||
429 | if (!cpu_isset(cpu, iucv_buffer_cpumask)) | ||
430 | return; | ||
431 | |||
432 | /* Block iucv interrupts. */ | ||
433 | iucv_block_cpu(NULL); | ||
434 | |||
435 | /* Retrieve interrupt buffer. */ | ||
436 | parm = percpu_ptr(iucv_param, cpu); | ||
437 | iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); | ||
438 | |||
439 | /* Clear indication that an iucv buffer exists for this cpu. */ | ||
440 | cpu_clear(cpu, iucv_buffer_cpumask); | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * iucv_setmask_smp | ||
445 | * | ||
446 | * Allow iucv interrupts on all cpus. | ||
447 | */ | ||
448 | static void iucv_setmask_mp(void) | ||
449 | { | ||
450 | int cpu; | ||
451 | |||
452 | for_each_online_cpu(cpu) | ||
453 | /* Enable all cpus with a declared buffer. */ | ||
454 | if (cpu_isset(cpu, iucv_buffer_cpumask) && | ||
455 | !cpu_isset(cpu, iucv_irq_cpumask)) | ||
456 | smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu); | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * iucv_setmask_up | ||
461 | * | ||
462 | * Allow iucv interrupts on a single cpus. | ||
463 | */ | ||
464 | static void iucv_setmask_up(void) | ||
465 | { | ||
466 | cpumask_t cpumask; | ||
467 | int cpu; | ||
468 | |||
469 | /* Disable all cpu but the first in cpu_irq_cpumask. */ | ||
470 | cpumask = iucv_irq_cpumask; | ||
471 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); | ||
472 | for_each_cpu_mask(cpu, cpumask) | ||
473 | smp_call_function_on(iucv_block_cpu, NULL, 0, 1, cpu); | ||
474 | } | ||
475 | |||
476 | /** | ||
477 | * iucv_enable | ||
478 | * | ||
479 | * This function makes iucv ready for use. It allocates the pathid | ||
480 | * table, declares an iucv interrupt buffer and enables the iucv | ||
481 | * interrupts. Called when the first user has registered an iucv | ||
482 | * handler. | ||
483 | */ | ||
484 | static int iucv_enable(void) | ||
485 | { | ||
486 | size_t alloc_size; | ||
487 | int cpu, rc; | ||
488 | |||
489 | rc = -ENOMEM; | ||
490 | alloc_size = iucv_max_pathid * sizeof(struct iucv_path); | ||
491 | iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); | ||
492 | if (!iucv_path_table) | ||
493 | goto out; | ||
494 | /* Declare per cpu buffers. */ | ||
495 | rc = -EIO; | ||
496 | for_each_online_cpu(cpu) | ||
497 | smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu); | ||
498 | if (cpus_empty(iucv_buffer_cpumask)) | ||
499 | /* No cpu could declare an iucv buffer. */ | ||
500 | goto out_path; | ||
501 | return 0; | ||
502 | |||
503 | out_path: | ||
504 | kfree(iucv_path_table); | ||
505 | out: | ||
506 | return rc; | ||
507 | } | ||
508 | |||
509 | /** | ||
510 | * iucv_disable | ||
511 | * | ||
512 | * This function shuts down iucv. It disables iucv interrupts, retrieves | ||
513 | * the iucv interrupt buffer and frees the pathid table. Called after the | ||
514 | * last user unregister its iucv handler. | ||
515 | */ | ||
516 | static void iucv_disable(void) | ||
517 | { | ||
518 | on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1); | ||
519 | kfree(iucv_path_table); | ||
520 | } | ||
521 | |||
522 | #ifdef CONFIG_HOTPLUG_CPU | ||
523 | static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | ||
524 | unsigned long action, void *hcpu) | ||
525 | { | ||
526 | cpumask_t cpumask; | ||
527 | long cpu = (long) hcpu; | ||
528 | |||
529 | switch (action) { | ||
530 | case CPU_UP_PREPARE: | ||
531 | if (!percpu_populate(iucv_irq_data, | ||
532 | sizeof(struct iucv_irq_data), | ||
533 | GFP_KERNEL|GFP_DMA, cpu)) | ||
534 | return NOTIFY_BAD; | ||
535 | if (!percpu_populate(iucv_param, sizeof(union iucv_param), | ||
536 | GFP_KERNEL|GFP_DMA, cpu)) { | ||
537 | percpu_depopulate(iucv_irq_data, cpu); | ||
538 | return NOTIFY_BAD; | ||
539 | } | ||
540 | break; | ||
541 | case CPU_UP_CANCELED: | ||
542 | case CPU_DEAD: | ||
543 | percpu_depopulate(iucv_param, cpu); | ||
544 | percpu_depopulate(iucv_irq_data, cpu); | ||
545 | break; | ||
546 | case CPU_ONLINE: | ||
547 | case CPU_DOWN_FAILED: | ||
548 | smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu); | ||
549 | break; | ||
550 | case CPU_DOWN_PREPARE: | ||
551 | cpumask = iucv_buffer_cpumask; | ||
552 | cpu_clear(cpu, cpumask); | ||
553 | if (cpus_empty(cpumask)) | ||
554 | /* Can't offline last IUCV enabled cpu. */ | ||
555 | return NOTIFY_BAD; | ||
556 | smp_call_function_on(iucv_retrieve_cpu, NULL, 0, 1, cpu); | ||
557 | if (cpus_empty(iucv_irq_cpumask)) | ||
558 | smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, | ||
559 | first_cpu(iucv_buffer_cpumask)); | ||
560 | break; | ||
561 | } | ||
562 | return NOTIFY_OK; | ||
563 | } | ||
564 | |||
565 | static struct notifier_block iucv_cpu_notifier = { | ||
566 | .notifier_call = iucv_cpu_notify, | ||
567 | }; | ||
568 | #endif | ||
569 | |||
570 | /** | ||
571 | * iucv_sever_pathid | ||
572 | * @pathid: path identification number. | ||
573 | * @userdata: 16-bytes of user data. | ||
574 | * | ||
575 | * Sever an iucv path to free up the pathid. Used internally. | ||
576 | */ | ||
577 | static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) | ||
578 | { | ||
579 | union iucv_param *parm; | ||
580 | |||
581 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
582 | memset(parm, 0, sizeof(union iucv_param)); | ||
583 | if (userdata) | ||
584 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | ||
585 | parm->ctrl.ippathid = pathid; | ||
586 | return iucv_call_b2f0(IUCV_SEVER, parm); | ||
587 | } | ||
588 | |||
589 | /** | ||
590 | * __iucv_cleanup_pathid | ||
591 | * @dummy: unused dummy argument | ||
592 | * | ||
593 | * Nop function called via smp_call_function to force work items from | ||
594 | * pending external iucv interrupts to the work queue. | ||
595 | */ | ||
596 | static void __iucv_cleanup_pathid(void *dummy) | ||
597 | { | ||
598 | } | ||
599 | |||
600 | /** | ||
601 | * iucv_cleanup_pathid | ||
602 | * @pathid: 16 bit pathid | ||
603 | * | ||
604 | * Function called after a path has been severed to find all remaining | ||
605 | * work items for the now stale pathid. The caller needs to hold the | ||
606 | * iucv_table_lock. | ||
607 | */ | ||
608 | static void iucv_cleanup_pathid(u16 pathid) | ||
609 | { | ||
610 | struct iucv_work *p, *n; | ||
611 | |||
612 | /* | ||
613 | * Path is severed, the pathid can be reused immediatly on | ||
614 | * a iucv connect or a connection pending interrupt. | ||
615 | * iucv_path_connect and connection pending interrupt will | ||
616 | * wait until the iucv_table_lock is released before the | ||
617 | * recycled pathid enters the system. | ||
618 | * Force remaining interrupts to the work queue, then | ||
619 | * scan the work queue for items of this path. | ||
620 | */ | ||
621 | smp_call_function(__iucv_cleanup_pathid, NULL, 0, 1); | ||
622 | spin_lock_irq(&iucv_work_lock); | ||
623 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) { | ||
624 | /* Remove work items for pathid except connection pending */ | ||
625 | if (p->data.ippathid == pathid && p->data.iptype != 0x01) { | ||
626 | list_del(&p->list); | ||
627 | kfree(p); | ||
628 | } | ||
629 | } | ||
630 | spin_unlock_irq(&iucv_work_lock); | ||
631 | } | ||
632 | |||
633 | /** | ||
634 | * iucv_register: | ||
635 | * @handler: address of iucv handler structure | ||
636 | * @smp: != 0 indicates that the handler can deal with out of order messages | ||
637 | * | ||
638 | * Registers a driver with IUCV. | ||
639 | * | ||
640 | * Returns 0 on success, -ENOMEM if the memory allocation for the pathid | ||
641 | * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. | ||
642 | */ | ||
643 | int iucv_register(struct iucv_handler *handler, int smp) | ||
644 | { | ||
645 | int rc; | ||
646 | |||
647 | if (!iucv_available) | ||
648 | return -ENOSYS; | ||
649 | mutex_lock(&iucv_register_mutex); | ||
650 | if (!smp) | ||
651 | iucv_nonsmp_handler++; | ||
652 | if (list_empty(&iucv_handler_list)) { | ||
653 | rc = iucv_enable(); | ||
654 | if (rc) | ||
655 | goto out_mutex; | ||
656 | } else if (!smp && iucv_nonsmp_handler == 1) | ||
657 | iucv_setmask_up(); | ||
658 | INIT_LIST_HEAD(&handler->paths); | ||
659 | |||
660 | spin_lock_irq(&iucv_table_lock); | ||
661 | list_add_tail(&handler->list, &iucv_handler_list); | ||
662 | spin_unlock_irq(&iucv_table_lock); | ||
663 | rc = 0; | ||
664 | out_mutex: | ||
665 | mutex_unlock(&iucv_register_mutex); | ||
666 | return rc; | ||
667 | } | ||
668 | |||
669 | /** | ||
670 | * iucv_unregister | ||
671 | * @handler: address of iucv handler structure | ||
672 | * @smp: != 0 indicates that the handler can deal with out of order messages | ||
673 | * | ||
674 | * Unregister driver from IUCV. | ||
675 | */ | ||
676 | void iucv_unregister(struct iucv_handler *handler, int smp) | ||
677 | { | ||
678 | struct iucv_path *p, *n; | ||
679 | |||
680 | mutex_lock(&iucv_register_mutex); | ||
681 | spin_lock_bh(&iucv_table_lock); | ||
682 | /* Remove handler from the iucv_handler_list. */ | ||
683 | list_del_init(&handler->list); | ||
684 | /* Sever all pathids still refering to the handler. */ | ||
685 | list_for_each_entry_safe(p, n, &handler->paths, list) { | ||
686 | iucv_sever_pathid(p->pathid, NULL); | ||
687 | iucv_path_table[p->pathid] = NULL; | ||
688 | list_del(&p->list); | ||
689 | iucv_cleanup_pathid(p->pathid); | ||
690 | iucv_path_free(p); | ||
691 | } | ||
692 | spin_unlock_bh(&iucv_table_lock); | ||
693 | if (!smp) | ||
694 | iucv_nonsmp_handler--; | ||
695 | if (list_empty(&iucv_handler_list)) | ||
696 | iucv_disable(); | ||
697 | else if (!smp && iucv_nonsmp_handler == 0) | ||
698 | iucv_setmask_mp(); | ||
699 | mutex_unlock(&iucv_register_mutex); | ||
700 | } | ||
701 | |||
702 | /** | ||
703 | * iucv_path_accept | ||
704 | * @path: address of iucv path structure | ||
705 | * @handler: address of iucv handler structure | ||
706 | * @userdata: 16 bytes of data reflected to the communication partner | ||
707 | * @private: private data passed to interrupt handlers for this path | ||
708 | * | ||
709 | * This function is issued after the user received a connection pending | ||
710 | * external interrupt and now wishes to complete the IUCV communication path. | ||
711 | * | ||
712 | * Returns the result of the CP IUCV call. | ||
713 | */ | ||
714 | int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, | ||
715 | u8 userdata[16], void *private) | ||
716 | { | ||
717 | union iucv_param *parm; | ||
718 | int rc; | ||
719 | |||
720 | local_bh_disable(); | ||
721 | /* Prepare parameter block. */ | ||
722 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
723 | memset(parm, 0, sizeof(union iucv_param)); | ||
724 | parm->ctrl.ippathid = path->pathid; | ||
725 | parm->ctrl.ipmsglim = path->msglim; | ||
726 | if (userdata) | ||
727 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | ||
728 | parm->ctrl.ipflags1 = path->flags; | ||
729 | |||
730 | rc = iucv_call_b2f0(IUCV_ACCEPT, parm); | ||
731 | if (!rc) { | ||
732 | path->private = private; | ||
733 | path->msglim = parm->ctrl.ipmsglim; | ||
734 | path->flags = parm->ctrl.ipflags1; | ||
735 | } | ||
736 | local_bh_enable(); | ||
737 | return rc; | ||
738 | } | ||
739 | |||
740 | /** | ||
741 | * iucv_path_connect | ||
742 | * @path: address of iucv path structure | ||
743 | * @handler: address of iucv handler structure | ||
744 | * @userid: 8-byte user identification | ||
745 | * @system: 8-byte target system identification | ||
746 | * @userdata: 16 bytes of data reflected to the communication partner | ||
747 | * @private: private data passed to interrupt handlers for this path | ||
748 | * | ||
749 | * This function establishes an IUCV path. Although the connect may complete | ||
750 | * successfully, you are not able to use the path until you receive an IUCV | ||
751 | * Connection Complete external interrupt. | ||
752 | * | ||
753 | * Returns the result of the CP IUCV call. | ||
754 | */ | ||
755 | int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, | ||
756 | u8 userid[8], u8 system[8], u8 userdata[16], | ||
757 | void *private) | ||
758 | { | ||
759 | union iucv_param *parm; | ||
760 | int rc; | ||
761 | |||
762 | preempt_disable(); | ||
763 | if (iucv_tasklet_cpu != smp_processor_id()) | ||
764 | spin_lock_bh(&iucv_table_lock); | ||
765 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
766 | memset(parm, 0, sizeof(union iucv_param)); | ||
767 | parm->ctrl.ipmsglim = path->msglim; | ||
768 | parm->ctrl.ipflags1 = path->flags; | ||
769 | if (userid) { | ||
770 | memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); | ||
771 | ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); | ||
772 | EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); | ||
773 | } | ||
774 | if (system) { | ||
775 | memcpy(parm->ctrl.iptarget, system, | ||
776 | sizeof(parm->ctrl.iptarget)); | ||
777 | ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); | ||
778 | EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); | ||
779 | } | ||
780 | if (userdata) | ||
781 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | ||
782 | |||
783 | rc = iucv_call_b2f0(IUCV_CONNECT, parm); | ||
784 | if (!rc) { | ||
785 | if (parm->ctrl.ippathid < iucv_max_pathid) { | ||
786 | path->pathid = parm->ctrl.ippathid; | ||
787 | path->msglim = parm->ctrl.ipmsglim; | ||
788 | path->flags = parm->ctrl.ipflags1; | ||
789 | path->handler = handler; | ||
790 | path->private = private; | ||
791 | list_add_tail(&path->list, &handler->paths); | ||
792 | iucv_path_table[path->pathid] = path; | ||
793 | } else { | ||
794 | iucv_sever_pathid(parm->ctrl.ippathid, | ||
795 | iucv_error_pathid); | ||
796 | rc = -EIO; | ||
797 | } | ||
798 | } | ||
799 | if (iucv_tasklet_cpu != smp_processor_id()) | ||
800 | spin_unlock_bh(&iucv_table_lock); | ||
801 | preempt_enable(); | ||
802 | return rc; | ||
803 | } | ||
804 | |||
805 | /** | ||
806 | * iucv_path_quiesce: | ||
807 | * @path: address of iucv path structure | ||
808 | * @userdata: 16 bytes of data reflected to the communication partner | ||
809 | * | ||
810 | * This function temporarily suspends incoming messages on an IUCV path. | ||
811 | * You can later reactivate the path by invoking the iucv_resume function. | ||
812 | * | ||
813 | * Returns the result from the CP IUCV call. | ||
814 | */ | ||
815 | int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) | ||
816 | { | ||
817 | union iucv_param *parm; | ||
818 | int rc; | ||
819 | |||
820 | local_bh_disable(); | ||
821 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
822 | memset(parm, 0, sizeof(union iucv_param)); | ||
823 | if (userdata) | ||
824 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | ||
825 | parm->ctrl.ippathid = path->pathid; | ||
826 | rc = iucv_call_b2f0(IUCV_QUIESCE, parm); | ||
827 | local_bh_enable(); | ||
828 | return rc; | ||
829 | } | ||
830 | |||
831 | /** | ||
832 | * iucv_path_resume: | ||
833 | * @path: address of iucv path structure | ||
834 | * @userdata: 16 bytes of data reflected to the communication partner | ||
835 | * | ||
836 | * This function resumes incoming messages on an IUCV path that has | ||
837 | * been stopped with iucv_path_quiesce. | ||
838 | * | ||
839 | * Returns the result from the CP IUCV call. | ||
840 | */ | ||
841 | int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) | ||
842 | { | ||
843 | union iucv_param *parm; | ||
844 | int rc; | ||
845 | |||
846 | local_bh_disable(); | ||
847 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
848 | memset(parm, 0, sizeof(union iucv_param)); | ||
849 | if (userdata) | ||
850 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | ||
851 | parm->ctrl.ippathid = path->pathid; | ||
852 | rc = iucv_call_b2f0(IUCV_RESUME, parm); | ||
853 | local_bh_enable(); | ||
854 | return rc; | ||
855 | } | ||
856 | |||
857 | /** | ||
858 | * iucv_path_sever | ||
859 | * @path: address of iucv path structure | ||
860 | * @userdata: 16 bytes of data reflected to the communication partner | ||
861 | * | ||
862 | * This function terminates an IUCV path. | ||
863 | * | ||
864 | * Returns the result from the CP IUCV call. | ||
865 | */ | ||
866 | int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) | ||
867 | { | ||
868 | int rc; | ||
869 | |||
870 | |||
871 | preempt_disable(); | ||
872 | if (iucv_tasklet_cpu != smp_processor_id()) | ||
873 | spin_lock_bh(&iucv_table_lock); | ||
874 | rc = iucv_sever_pathid(path->pathid, userdata); | ||
875 | if (!rc) { | ||
876 | iucv_path_table[path->pathid] = NULL; | ||
877 | list_del_init(&path->list); | ||
878 | iucv_cleanup_pathid(path->pathid); | ||
879 | } | ||
880 | if (iucv_tasklet_cpu != smp_processor_id()) | ||
881 | spin_unlock_bh(&iucv_table_lock); | ||
882 | preempt_enable(); | ||
883 | return rc; | ||
884 | } | ||
885 | |||
886 | /** | ||
887 | * iucv_message_purge | ||
888 | * @path: address of iucv path structure | ||
889 | * @msg: address of iucv msg structure | ||
890 | * @srccls: source class of message | ||
891 | * | ||
892 | * Cancels a message you have sent. | ||
893 | * | ||
894 | * Returns the result from the CP IUCV call. | ||
895 | */ | ||
896 | int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, | ||
897 | u32 srccls) | ||
898 | { | ||
899 | union iucv_param *parm; | ||
900 | int rc; | ||
901 | |||
902 | local_bh_disable(); | ||
903 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
904 | memset(parm, 0, sizeof(union iucv_param)); | ||
905 | parm->purge.ippathid = path->pathid; | ||
906 | parm->purge.ipmsgid = msg->id; | ||
907 | parm->purge.ipsrccls = srccls; | ||
908 | parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; | ||
909 | rc = iucv_call_b2f0(IUCV_PURGE, parm); | ||
910 | if (!rc) { | ||
911 | msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; | ||
912 | msg->tag = parm->purge.ipmsgtag; | ||
913 | } | ||
914 | local_bh_enable(); | ||
915 | return rc; | ||
916 | } | ||
917 | |||
918 | /** | ||
919 | * iucv_message_receive | ||
920 | * @path: address of iucv path structure | ||
921 | * @msg: address of iucv msg structure | ||
922 | * @flags: how the message is received (IUCV_IPBUFLST) | ||
923 | * @buffer: address of data buffer or address of struct iucv_array | ||
924 | * @size: length of data buffer | ||
925 | * @residual: | ||
926 | * | ||
927 | * This function receives messages that are being sent to you over | ||
928 | * established paths. This function will deal with RMDATA messages | ||
929 | * embedded in struct iucv_message as well. | ||
930 | * | ||
931 | * Returns the result from the CP IUCV call. | ||
932 | */ | ||
933 | int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | ||
934 | u8 flags, void *buffer, size_t size, size_t *residual) | ||
935 | { | ||
936 | union iucv_param *parm; | ||
937 | struct iucv_array *array; | ||
938 | u8 *rmmsg; | ||
939 | size_t copy; | ||
940 | int rc; | ||
941 | |||
942 | if (msg->flags & IUCV_IPRMDATA) { | ||
943 | /* | ||
944 | * Message is 8 bytes long and has been stored to the | ||
945 | * message descriptor itself. | ||
946 | */ | ||
947 | rc = (size < 8) ? 5 : 0; | ||
948 | if (residual) | ||
949 | *residual = abs(size - 8); | ||
950 | rmmsg = msg->rmmsg; | ||
951 | if (flags & IUCV_IPBUFLST) { | ||
952 | /* Copy to struct iucv_array. */ | ||
953 | size = (size < 8) ? size : 8; | ||
954 | for (array = buffer; size > 0; array++) { | ||
955 | copy = min_t(size_t, size, array->length); | ||
956 | memcpy((u8 *)(addr_t) array->address, | ||
957 | rmmsg, copy); | ||
958 | rmmsg += copy; | ||
959 | size -= copy; | ||
960 | } | ||
961 | } else { | ||
962 | /* Copy to direct buffer. */ | ||
963 | memcpy(buffer, rmmsg, min_t(size_t, size, 8)); | ||
964 | } | ||
965 | return 0; | ||
966 | } | ||
967 | |||
968 | local_bh_disable(); | ||
969 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
970 | memset(parm, 0, sizeof(union iucv_param)); | ||
971 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | ||
972 | parm->db.ipbfln1f = (u32) size; | ||
973 | parm->db.ipmsgid = msg->id; | ||
974 | parm->db.ippathid = path->pathid; | ||
975 | parm->db.iptrgcls = msg->class; | ||
976 | parm->db.ipflags1 = (flags | IUCV_IPFGPID | | ||
977 | IUCV_IPFGMID | IUCV_IPTRGCLS); | ||
978 | rc = iucv_call_b2f0(IUCV_RECEIVE, parm); | ||
979 | if (!rc || rc == 5) { | ||
980 | msg->flags = parm->db.ipflags1; | ||
981 | if (residual) | ||
982 | *residual = parm->db.ipbfln1f; | ||
983 | } | ||
984 | local_bh_enable(); | ||
985 | return rc; | ||
986 | } | ||
987 | |||
988 | /** | ||
989 | * iucv_message_reject | ||
990 | * @path: address of iucv path structure | ||
991 | * @msg: address of iucv msg structure | ||
992 | * | ||
993 | * The reject function refuses a specified message. Between the time you | ||
994 | * are notified of a message and the time that you complete the message, | ||
995 | * the message may be rejected. | ||
996 | * | ||
997 | * Returns the result from the CP IUCV call. | ||
998 | */ | ||
999 | int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) | ||
1000 | { | ||
1001 | union iucv_param *parm; | ||
1002 | int rc; | ||
1003 | |||
1004 | local_bh_disable(); | ||
1005 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
1006 | memset(parm, 0, sizeof(union iucv_param)); | ||
1007 | parm->db.ippathid = path->pathid; | ||
1008 | parm->db.ipmsgid = msg->id; | ||
1009 | parm->db.iptrgcls = msg->class; | ||
1010 | parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); | ||
1011 | rc = iucv_call_b2f0(IUCV_REJECT, parm); | ||
1012 | local_bh_enable(); | ||
1013 | return rc; | ||
1014 | } | ||
1015 | |||
1016 | /** | ||
1017 | * iucv_message_reply | ||
1018 | * @path: address of iucv path structure | ||
1019 | * @msg: address of iucv msg structure | ||
1020 | * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | ||
1021 | * @reply: address of reply data buffer or address of struct iucv_array | ||
1022 | * @size: length of reply data buffer | ||
1023 | * | ||
1024 | * This function responds to the two-way messages that you receive. You | ||
1025 | * must identify completely the message to which you wish to reply. ie, | ||
1026 | * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into | ||
1027 | * the parameter list. | ||
1028 | * | ||
1029 | * Returns the result from the CP IUCV call. | ||
1030 | */ | ||
1031 | int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, | ||
1032 | u8 flags, void *reply, size_t size) | ||
1033 | { | ||
1034 | union iucv_param *parm; | ||
1035 | int rc; | ||
1036 | |||
1037 | local_bh_disable(); | ||
1038 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
1039 | memset(parm, 0, sizeof(union iucv_param)); | ||
1040 | if (flags & IUCV_IPRMDATA) { | ||
1041 | parm->dpl.ippathid = path->pathid; | ||
1042 | parm->dpl.ipflags1 = flags; | ||
1043 | parm->dpl.ipmsgid = msg->id; | ||
1044 | parm->dpl.iptrgcls = msg->class; | ||
1045 | memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); | ||
1046 | } else { | ||
1047 | parm->db.ipbfadr1 = (u32)(addr_t) reply; | ||
1048 | parm->db.ipbfln1f = (u32) size; | ||
1049 | parm->db.ippathid = path->pathid; | ||
1050 | parm->db.ipflags1 = flags; | ||
1051 | parm->db.ipmsgid = msg->id; | ||
1052 | parm->db.iptrgcls = msg->class; | ||
1053 | } | ||
1054 | rc = iucv_call_b2f0(IUCV_REPLY, parm); | ||
1055 | local_bh_enable(); | ||
1056 | return rc; | ||
1057 | } | ||
1058 | |||
1059 | /** | ||
1060 | * iucv_message_send | ||
1061 | * @path: address of iucv path structure | ||
1062 | * @msg: address of iucv msg structure | ||
1063 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | ||
1064 | * @srccls: source class of message | ||
1065 | * @buffer: address of send buffer or address of struct iucv_array | ||
1066 | * @size: length of send buffer | ||
1067 | * | ||
1068 | * This function transmits data to another application. Data to be | ||
1069 | * transmitted is in a buffer and this is a one-way message and the | ||
1070 | * receiver will not reply to the message. | ||
1071 | * | ||
1072 | * Returns the result from the CP IUCV call. | ||
1073 | */ | ||
1074 | int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | ||
1075 | u8 flags, u32 srccls, void *buffer, size_t size) | ||
1076 | { | ||
1077 | union iucv_param *parm; | ||
1078 | int rc; | ||
1079 | |||
1080 | local_bh_disable(); | ||
1081 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
1082 | memset(parm, 0, sizeof(union iucv_param)); | ||
1083 | if (flags & IUCV_IPRMDATA) { | ||
1084 | /* Message of 8 bytes can be placed into the parameter list. */ | ||
1085 | parm->dpl.ippathid = path->pathid; | ||
1086 | parm->dpl.ipflags1 = flags | IUCV_IPNORPY; | ||
1087 | parm->dpl.iptrgcls = msg->class; | ||
1088 | parm->dpl.ipsrccls = srccls; | ||
1089 | parm->dpl.ipmsgtag = msg->tag; | ||
1090 | memcpy(parm->dpl.iprmmsg, buffer, 8); | ||
1091 | } else { | ||
1092 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | ||
1093 | parm->db.ipbfln1f = (u32) size; | ||
1094 | parm->db.ippathid = path->pathid; | ||
1095 | parm->db.ipflags1 = flags | IUCV_IPNORPY; | ||
1096 | parm->db.iptrgcls = msg->class; | ||
1097 | parm->db.ipsrccls = srccls; | ||
1098 | parm->db.ipmsgtag = msg->tag; | ||
1099 | } | ||
1100 | rc = iucv_call_b2f0(IUCV_SEND, parm); | ||
1101 | if (!rc) | ||
1102 | msg->id = parm->db.ipmsgid; | ||
1103 | local_bh_enable(); | ||
1104 | return rc; | ||
1105 | } | ||
1106 | |||
1107 | /** | ||
1108 | * iucv_message_send2way | ||
1109 | * @path: address of iucv path structure | ||
1110 | * @msg: address of iucv msg structure | ||
1111 | * @flags: how the message is sent and the reply is received | ||
1112 | * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) | ||
1113 | * @srccls: source class of message | ||
1114 | * @buffer: address of send buffer or address of struct iucv_array | ||
1115 | * @size: length of send buffer | ||
1116 | * @ansbuf: address of answer buffer or address of struct iucv_array | ||
1117 | * @asize: size of reply buffer | ||
1118 | * | ||
1119 | * This function transmits data to another application. Data to be | ||
1120 | * transmitted is in a buffer. The receiver of the send is expected to | ||
1121 | * reply to the message and a buffer is provided into which IUCV moves | ||
1122 | * the reply to this message. | ||
1123 | * | ||
1124 | * Returns the result from the CP IUCV call. | ||
1125 | */ | ||
1126 | int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, | ||
1127 | u8 flags, u32 srccls, void *buffer, size_t size, | ||
1128 | void *answer, size_t asize, size_t *residual) | ||
1129 | { | ||
1130 | union iucv_param *parm; | ||
1131 | int rc; | ||
1132 | |||
1133 | local_bh_disable(); | ||
1134 | parm = percpu_ptr(iucv_param, smp_processor_id()); | ||
1135 | memset(parm, 0, sizeof(union iucv_param)); | ||
1136 | if (flags & IUCV_IPRMDATA) { | ||
1137 | parm->dpl.ippathid = path->pathid; | ||
1138 | parm->dpl.ipflags1 = path->flags; /* priority message */ | ||
1139 | parm->dpl.iptrgcls = msg->class; | ||
1140 | parm->dpl.ipsrccls = srccls; | ||
1141 | parm->dpl.ipmsgtag = msg->tag; | ||
1142 | parm->dpl.ipbfadr2 = (u32)(addr_t) answer; | ||
1143 | parm->dpl.ipbfln2f = (u32) asize; | ||
1144 | memcpy(parm->dpl.iprmmsg, buffer, 8); | ||
1145 | } else { | ||
1146 | parm->db.ippathid = path->pathid; | ||
1147 | parm->db.ipflags1 = path->flags; /* priority message */ | ||
1148 | parm->db.iptrgcls = msg->class; | ||
1149 | parm->db.ipsrccls = srccls; | ||
1150 | parm->db.ipmsgtag = msg->tag; | ||
1151 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | ||
1152 | parm->db.ipbfln1f = (u32) size; | ||
1153 | parm->db.ipbfadr2 = (u32)(addr_t) answer; | ||
1154 | parm->db.ipbfln2f = (u32) asize; | ||
1155 | } | ||
1156 | rc = iucv_call_b2f0(IUCV_SEND, parm); | ||
1157 | if (!rc) | ||
1158 | msg->id = parm->db.ipmsgid; | ||
1159 | local_bh_enable(); | ||
1160 | return rc; | ||
1161 | } | ||
1162 | |||
1163 | /** | ||
1164 | * iucv_path_pending | ||
1165 | * @data: Pointer to external interrupt buffer | ||
1166 | * | ||
1167 | * Process connection pending work item. Called from tasklet while holding | ||
1168 | * iucv_table_lock. | ||
1169 | */ | ||
1170 | struct iucv_path_pending { | ||
1171 | u16 ippathid; | ||
1172 | u8 ipflags1; | ||
1173 | u8 iptype; | ||
1174 | u16 ipmsglim; | ||
1175 | u16 res1; | ||
1176 | u8 ipvmid[8]; | ||
1177 | u8 ipuser[16]; | ||
1178 | u32 res3; | ||
1179 | u8 ippollfg; | ||
1180 | u8 res4[3]; | ||
1181 | } __attribute__ ((packed)); | ||
1182 | |||
1183 | static void iucv_path_pending(struct iucv_irq_data *data) | ||
1184 | { | ||
1185 | struct iucv_path_pending *ipp = (void *) data; | ||
1186 | struct iucv_handler *handler; | ||
1187 | struct iucv_path *path; | ||
1188 | char *error; | ||
1189 | |||
1190 | BUG_ON(iucv_path_table[ipp->ippathid]); | ||
1191 | /* New pathid, handler found. Create a new path struct. */ | ||
1192 | error = iucv_error_no_memory; | ||
1193 | path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); | ||
1194 | if (!path) | ||
1195 | goto out_sever; | ||
1196 | path->pathid = ipp->ippathid; | ||
1197 | iucv_path_table[path->pathid] = path; | ||
1198 | EBCASC(ipp->ipvmid, 8); | ||
1199 | |||
1200 | /* Call registered handler until one is found that wants the path. */ | ||
1201 | list_for_each_entry(handler, &iucv_handler_list, list) { | ||
1202 | if (!handler->path_pending) | ||
1203 | continue; | ||
1204 | /* | ||
1205 | * Add path to handler to allow a call to iucv_path_sever | ||
1206 | * inside the path_pending function. If the handler returns | ||
1207 | * an error remove the path from the handler again. | ||
1208 | */ | ||
1209 | list_add(&path->list, &handler->paths); | ||
1210 | path->handler = handler; | ||
1211 | if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) | ||
1212 | return; | ||
1213 | list_del(&path->list); | ||
1214 | path->handler = NULL; | ||
1215 | } | ||
1216 | /* No handler wanted the path. */ | ||
1217 | iucv_path_table[path->pathid] = NULL; | ||
1218 | iucv_path_free(path); | ||
1219 | error = iucv_error_no_listener; | ||
1220 | out_sever: | ||
1221 | iucv_sever_pathid(ipp->ippathid, error); | ||
1222 | } | ||
1223 | |||
1224 | /** | ||
1225 | * iucv_path_complete | ||
1226 | * @data: Pointer to external interrupt buffer | ||
1227 | * | ||
1228 | * Process connection complete work item. Called from tasklet while holding | ||
1229 | * iucv_table_lock. | ||
1230 | */ | ||
1231 | struct iucv_path_complete { | ||
1232 | u16 ippathid; | ||
1233 | u8 ipflags1; | ||
1234 | u8 iptype; | ||
1235 | u16 ipmsglim; | ||
1236 | u16 res1; | ||
1237 | u8 res2[8]; | ||
1238 | u8 ipuser[16]; | ||
1239 | u32 res3; | ||
1240 | u8 ippollfg; | ||
1241 | u8 res4[3]; | ||
1242 | } __attribute__ ((packed)); | ||
1243 | |||
1244 | static void iucv_path_complete(struct iucv_irq_data *data) | ||
1245 | { | ||
1246 | struct iucv_path_complete *ipc = (void *) data; | ||
1247 | struct iucv_path *path = iucv_path_table[ipc->ippathid]; | ||
1248 | |||
1249 | BUG_ON(!path || !path->handler); | ||
1250 | if (path->handler->path_complete) | ||
1251 | path->handler->path_complete(path, ipc->ipuser); | ||
1252 | } | ||
1253 | |||
1254 | /** | ||
1255 | * iucv_path_severed | ||
1256 | * @data: Pointer to external interrupt buffer | ||
1257 | * | ||
1258 | * Process connection severed work item. Called from tasklet while holding | ||
1259 | * iucv_table_lock. | ||
1260 | */ | ||
1261 | struct iucv_path_severed { | ||
1262 | u16 ippathid; | ||
1263 | u8 res1; | ||
1264 | u8 iptype; | ||
1265 | u32 res2; | ||
1266 | u8 res3[8]; | ||
1267 | u8 ipuser[16]; | ||
1268 | u32 res4; | ||
1269 | u8 ippollfg; | ||
1270 | u8 res5[3]; | ||
1271 | } __attribute__ ((packed)); | ||
1272 | |||
1273 | static void iucv_path_severed(struct iucv_irq_data *data) | ||
1274 | { | ||
1275 | struct iucv_path_severed *ips = (void *) data; | ||
1276 | struct iucv_path *path = iucv_path_table[ips->ippathid]; | ||
1277 | |||
1278 | BUG_ON(!path || !path->handler); | ||
1279 | if (path->handler->path_severed) | ||
1280 | path->handler->path_severed(path, ips->ipuser); | ||
1281 | else { | ||
1282 | iucv_sever_pathid(path->pathid, NULL); | ||
1283 | iucv_path_table[path->pathid] = NULL; | ||
1284 | list_del_init(&path->list); | ||
1285 | iucv_cleanup_pathid(path->pathid); | ||
1286 | iucv_path_free(path); | ||
1287 | } | ||
1288 | } | ||
1289 | |||
1290 | /** | ||
1291 | * iucv_path_quiesced | ||
1292 | * @data: Pointer to external interrupt buffer | ||
1293 | * | ||
1294 | * Process connection quiesced work item. Called from tasklet while holding | ||
1295 | * iucv_table_lock. | ||
1296 | */ | ||
1297 | struct iucv_path_quiesced { | ||
1298 | u16 ippathid; | ||
1299 | u8 res1; | ||
1300 | u8 iptype; | ||
1301 | u32 res2; | ||
1302 | u8 res3[8]; | ||
1303 | u8 ipuser[16]; | ||
1304 | u32 res4; | ||
1305 | u8 ippollfg; | ||
1306 | u8 res5[3]; | ||
1307 | } __attribute__ ((packed)); | ||
1308 | |||
1309 | static void iucv_path_quiesced(struct iucv_irq_data *data) | ||
1310 | { | ||
1311 | struct iucv_path_quiesced *ipq = (void *) data; | ||
1312 | struct iucv_path *path = iucv_path_table[ipq->ippathid]; | ||
1313 | |||
1314 | BUG_ON(!path || !path->handler); | ||
1315 | if (path->handler->path_quiesced) | ||
1316 | path->handler->path_quiesced(path, ipq->ipuser); | ||
1317 | } | ||
1318 | |||
1319 | /** | ||
1320 | * iucv_path_resumed | ||
1321 | * @data: Pointer to external interrupt buffer | ||
1322 | * | ||
1323 | * Process connection resumed work item. Called from tasklet while holding | ||
1324 | * iucv_table_lock. | ||
1325 | */ | ||
1326 | struct iucv_path_resumed { | ||
1327 | u16 ippathid; | ||
1328 | u8 res1; | ||
1329 | u8 iptype; | ||
1330 | u32 res2; | ||
1331 | u8 res3[8]; | ||
1332 | u8 ipuser[16]; | ||
1333 | u32 res4; | ||
1334 | u8 ippollfg; | ||
1335 | u8 res5[3]; | ||
1336 | } __attribute__ ((packed)); | ||
1337 | |||
1338 | static void iucv_path_resumed(struct iucv_irq_data *data) | ||
1339 | { | ||
1340 | struct iucv_path_resumed *ipr = (void *) data; | ||
1341 | struct iucv_path *path = iucv_path_table[ipr->ippathid]; | ||
1342 | |||
1343 | BUG_ON(!path || !path->handler); | ||
1344 | if (path->handler->path_resumed) | ||
1345 | path->handler->path_resumed(path, ipr->ipuser); | ||
1346 | } | ||
1347 | |||
1348 | /** | ||
1349 | * iucv_message_complete | ||
1350 | * @data: Pointer to external interrupt buffer | ||
1351 | * | ||
1352 | * Process message complete work item. Called from tasklet while holding | ||
1353 | * iucv_table_lock. | ||
1354 | */ | ||
1355 | struct iucv_message_complete { | ||
1356 | u16 ippathid; | ||
1357 | u8 ipflags1; | ||
1358 | u8 iptype; | ||
1359 | u32 ipmsgid; | ||
1360 | u32 ipaudit; | ||
1361 | u8 iprmmsg[8]; | ||
1362 | u32 ipsrccls; | ||
1363 | u32 ipmsgtag; | ||
1364 | u32 res; | ||
1365 | u32 ipbfln2f; | ||
1366 | u8 ippollfg; | ||
1367 | u8 res2[3]; | ||
1368 | } __attribute__ ((packed)); | ||
1369 | |||
1370 | static void iucv_message_complete(struct iucv_irq_data *data) | ||
1371 | { | ||
1372 | struct iucv_message_complete *imc = (void *) data; | ||
1373 | struct iucv_path *path = iucv_path_table[imc->ippathid]; | ||
1374 | struct iucv_message msg; | ||
1375 | |||
1376 | BUG_ON(!path || !path->handler); | ||
1377 | if (path->handler->message_complete) { | ||
1378 | msg.flags = imc->ipflags1; | ||
1379 | msg.id = imc->ipmsgid; | ||
1380 | msg.audit = imc->ipaudit; | ||
1381 | memcpy(msg.rmmsg, imc->iprmmsg, 8); | ||
1382 | msg.class = imc->ipsrccls; | ||
1383 | msg.tag = imc->ipmsgtag; | ||
1384 | msg.length = imc->ipbfln2f; | ||
1385 | path->handler->message_complete(path, &msg); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | /** | ||
1390 | * iucv_message_pending | ||
1391 | * @data: Pointer to external interrupt buffer | ||
1392 | * | ||
1393 | * Process message pending work item. Called from tasklet while holding | ||
1394 | * iucv_table_lock. | ||
1395 | */ | ||
1396 | struct iucv_message_pending { | ||
1397 | u16 ippathid; | ||
1398 | u8 ipflags1; | ||
1399 | u8 iptype; | ||
1400 | u32 ipmsgid; | ||
1401 | u32 iptrgcls; | ||
1402 | union { | ||
1403 | u32 iprmmsg1_u32; | ||
1404 | u8 iprmmsg1[4]; | ||
1405 | } ln1msg1; | ||
1406 | union { | ||
1407 | u32 ipbfln1f; | ||
1408 | u8 iprmmsg2[4]; | ||
1409 | } ln1msg2; | ||
1410 | u32 res1[3]; | ||
1411 | u32 ipbfln2f; | ||
1412 | u8 ippollfg; | ||
1413 | u8 res2[3]; | ||
1414 | } __attribute__ ((packed)); | ||
1415 | |||
1416 | static void iucv_message_pending(struct iucv_irq_data *data) | ||
1417 | { | ||
1418 | struct iucv_message_pending *imp = (void *) data; | ||
1419 | struct iucv_path *path = iucv_path_table[imp->ippathid]; | ||
1420 | struct iucv_message msg; | ||
1421 | |||
1422 | BUG_ON(!path || !path->handler); | ||
1423 | if (path->handler->message_pending) { | ||
1424 | msg.flags = imp->ipflags1; | ||
1425 | msg.id = imp->ipmsgid; | ||
1426 | msg.class = imp->iptrgcls; | ||
1427 | if (imp->ipflags1 & IUCV_IPRMDATA) { | ||
1428 | memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); | ||
1429 | msg.length = 8; | ||
1430 | } else | ||
1431 | msg.length = imp->ln1msg2.ipbfln1f; | ||
1432 | msg.reply_size = imp->ipbfln2f; | ||
1433 | path->handler->message_pending(path, &msg); | ||
1434 | } | ||
1435 | } | ||
1436 | |||
1437 | /** | ||
1438 | * iucv_tasklet_handler: | ||
1439 | * | ||
1440 | * This tasklet loops over the queue of irq buffers created by | ||
1441 | * iucv_external_interrupt, calls the appropriate action handler | ||
1442 | * and then frees the buffer. | ||
1443 | */ | ||
1444 | static void iucv_tasklet_handler(unsigned long ignored) | ||
1445 | { | ||
1446 | typedef void iucv_irq_fn(struct iucv_irq_data *); | ||
1447 | static iucv_irq_fn *irq_fn[] = { | ||
1448 | [0x01] = iucv_path_pending, | ||
1449 | [0x02] = iucv_path_complete, | ||
1450 | [0x03] = iucv_path_severed, | ||
1451 | [0x04] = iucv_path_quiesced, | ||
1452 | [0x05] = iucv_path_resumed, | ||
1453 | [0x06] = iucv_message_complete, | ||
1454 | [0x07] = iucv_message_complete, | ||
1455 | [0x08] = iucv_message_pending, | ||
1456 | [0x09] = iucv_message_pending, | ||
1457 | }; | ||
1458 | struct iucv_work *p; | ||
1459 | |||
1460 | /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ | ||
1461 | spin_lock(&iucv_table_lock); | ||
1462 | iucv_tasklet_cpu = smp_processor_id(); | ||
1463 | |||
1464 | spin_lock_irq(&iucv_work_lock); | ||
1465 | while (!list_empty(&iucv_work_queue)) { | ||
1466 | p = list_entry(iucv_work_queue.next, struct iucv_work, list); | ||
1467 | list_del_init(&p->list); | ||
1468 | spin_unlock_irq(&iucv_work_lock); | ||
1469 | irq_fn[p->data.iptype](&p->data); | ||
1470 | kfree(p); | ||
1471 | spin_lock_irq(&iucv_work_lock); | ||
1472 | } | ||
1473 | spin_unlock_irq(&iucv_work_lock); | ||
1474 | |||
1475 | iucv_tasklet_cpu = -1; | ||
1476 | spin_unlock(&iucv_table_lock); | ||
1477 | } | ||
1478 | |||
1479 | /** | ||
1480 | * iucv_external_interrupt | ||
1481 | * @code: irq code | ||
1482 | * | ||
1483 | * Handles external interrupts coming in from CP. | ||
1484 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler(). | ||
1485 | */ | ||
1486 | static void iucv_external_interrupt(u16 code) | ||
1487 | { | ||
1488 | struct iucv_irq_data *p; | ||
1489 | struct iucv_work *work; | ||
1490 | |||
1491 | p = percpu_ptr(iucv_irq_data, smp_processor_id()); | ||
1492 | if (p->ippathid >= iucv_max_pathid) { | ||
1493 | printk(KERN_WARNING "iucv_do_int: Got interrupt with " | ||
1494 | "pathid %d > max_connections (%ld)\n", | ||
1495 | p->ippathid, iucv_max_pathid - 1); | ||
1496 | iucv_sever_pathid(p->ippathid, iucv_error_no_listener); | ||
1497 | return; | ||
1498 | } | ||
1499 | if (p->iptype < 0x01 || p->iptype > 0x09) { | ||
1500 | printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n"); | ||
1501 | return; | ||
1502 | } | ||
1503 | work = kmalloc(sizeof(struct iucv_work), GFP_ATOMIC); | ||
1504 | if (!work) { | ||
1505 | printk(KERN_WARNING "iucv_external_interrupt: out of memory\n"); | ||
1506 | return; | ||
1507 | } | ||
1508 | memcpy(&work->data, p, sizeof(work->data)); | ||
1509 | spin_lock(&iucv_work_lock); | ||
1510 | list_add_tail(&work->list, &iucv_work_queue); | ||
1511 | spin_unlock(&iucv_work_lock); | ||
1512 | tasklet_schedule(&iucv_tasklet); | ||
1513 | } | ||
1514 | |||
1515 | /** | ||
1516 | * iucv_init | ||
1517 | * | ||
1518 | * Allocates and initializes various data structures. | ||
1519 | */ | ||
1520 | static int iucv_init(void) | ||
1521 | { | ||
1522 | int rc; | ||
1523 | |||
1524 | if (!MACHINE_IS_VM) { | ||
1525 | rc = -EPROTONOSUPPORT; | ||
1526 | goto out; | ||
1527 | } | ||
1528 | rc = iucv_query_maxconn(); | ||
1529 | if (rc) | ||
1530 | goto out; | ||
1531 | rc = register_external_interrupt (0x4000, iucv_external_interrupt); | ||
1532 | if (rc) | ||
1533 | goto out; | ||
1534 | rc = bus_register(&iucv_bus); | ||
1535 | if (rc) | ||
1536 | goto out_int; | ||
1537 | iucv_root = s390_root_dev_register("iucv"); | ||
1538 | if (IS_ERR(iucv_root)) { | ||
1539 | rc = PTR_ERR(iucv_root); | ||
1540 | goto out_bus; | ||
1541 | } | ||
1542 | /* Note: GFP_DMA used used to get memory below 2G */ | ||
1543 | iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data), | ||
1544 | GFP_KERNEL|GFP_DMA); | ||
1545 | if (!iucv_irq_data) { | ||
1546 | rc = -ENOMEM; | ||
1547 | goto out_root; | ||
1548 | } | ||
1549 | /* Allocate parameter blocks. */ | ||
1550 | iucv_param = percpu_alloc(sizeof(union iucv_param), | ||
1551 | GFP_KERNEL|GFP_DMA); | ||
1552 | if (!iucv_param) { | ||
1553 | rc = -ENOMEM; | ||
1554 | goto out_extint; | ||
1555 | } | ||
1556 | register_hotcpu_notifier(&iucv_cpu_notifier); | ||
1557 | ASCEBC(iucv_error_no_listener, 16); | ||
1558 | ASCEBC(iucv_error_no_memory, 16); | ||
1559 | ASCEBC(iucv_error_pathid, 16); | ||
1560 | iucv_available = 1; | ||
1561 | return 0; | ||
1562 | |||
1563 | out_extint: | ||
1564 | percpu_free(iucv_irq_data); | ||
1565 | out_root: | ||
1566 | s390_root_dev_unregister(iucv_root); | ||
1567 | out_bus: | ||
1568 | bus_unregister(&iucv_bus); | ||
1569 | out_int: | ||
1570 | unregister_external_interrupt(0x4000, iucv_external_interrupt); | ||
1571 | out: | ||
1572 | return rc; | ||
1573 | } | ||
1574 | |||
1575 | /** | ||
1576 | * iucv_exit | ||
1577 | * | ||
1578 | * Frees everything allocated from iucv_init. | ||
1579 | */ | ||
1580 | static void iucv_exit(void) | ||
1581 | { | ||
1582 | struct iucv_work *p, *n; | ||
1583 | |||
1584 | spin_lock_irq(&iucv_work_lock); | ||
1585 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) | ||
1586 | kfree(p); | ||
1587 | spin_unlock_irq(&iucv_work_lock); | ||
1588 | unregister_hotcpu_notifier(&iucv_cpu_notifier); | ||
1589 | percpu_free(iucv_param); | ||
1590 | percpu_free(iucv_irq_data); | ||
1591 | s390_root_dev_unregister(iucv_root); | ||
1592 | bus_unregister(&iucv_bus); | ||
1593 | unregister_external_interrupt(0x4000, iucv_external_interrupt); | ||
1594 | } | ||
1595 | |||
1596 | subsys_initcall(iucv_init); | ||
1597 | module_exit(iucv_exit); | ||
1598 | |||
1599 | /** | ||
1600 | * Export all public stuff | ||
1601 | */ | ||
1602 | EXPORT_SYMBOL (iucv_bus); | ||
1603 | EXPORT_SYMBOL (iucv_root); | ||
1604 | EXPORT_SYMBOL (iucv_register); | ||
1605 | EXPORT_SYMBOL (iucv_unregister); | ||
1606 | EXPORT_SYMBOL (iucv_path_accept); | ||
1607 | EXPORT_SYMBOL (iucv_path_connect); | ||
1608 | EXPORT_SYMBOL (iucv_path_quiesce); | ||
1609 | EXPORT_SYMBOL (iucv_path_sever); | ||
1610 | EXPORT_SYMBOL (iucv_message_purge); | ||
1611 | EXPORT_SYMBOL (iucv_message_receive); | ||
1612 | EXPORT_SYMBOL (iucv_message_reject); | ||
1613 | EXPORT_SYMBOL (iucv_message_reply); | ||
1614 | EXPORT_SYMBOL (iucv_message_send); | ||
1615 | EXPORT_SYMBOL (iucv_message_send2way); | ||
1616 | |||
1617 | MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); | ||
1618 | MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); | ||
1619 | MODULE_LICENSE("GPL"); | ||