aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-09 11:01:37 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-09 11:01:37 -0500
commitf049274b012fd3b50113f194bfbbcbc3143d0da3 (patch)
tree15ef947c1959da3196d8dbc524b435972f6d37f7
parentb37df85960a34dd96d0a4695c650f7972ef56c30 (diff)
parent1539b98b561754252dd520b98fa03a688a4f81b5 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (79 commits) [IPX]: Fix NULL pointer dereference on ipx unload [ATM]: atmarp.h needs to always include linux/types.h [NET]: Fix net/socket.c warnings. [NET]: cleanup sock_from_file() [NET]: change layout of ehash table [S390]: Add AF_IUCV socket support [S390]: Adapt special message interface to new IUCV API [S390]: Adapt netiucv driver to new IUCV API [S390]: Adapt vmlogrdr driver to new IUCV API [S390]: Adapt monreader driver to new IUCV API [S390]: Rewrite of the IUCV base code, part 2 [S390]: Rewrite of the IUCV base code, part 1 [X.25]: Adds /proc/net/x25/forward to view active forwarded calls. [X.25]: Adds /proc/sys/net/x25/x25_forward to control forwarding. [X.25]: Add call forwarding [XFRM]: xfrm_migrate() needs exporting to modules. [PFKEYV2]: CONFIG_NET_KEY_MIGRATE option [PFKEYV2]: Extension for dynamic update of endpoint address(es) [XFRM]: CONFIG_XFRM_MIGRATE option [XFRM]: User interface for handling XFRM_MSG_MIGRATE ...
-rw-r--r--Documentation/crypto/api-intro.txt4
-rw-r--r--arch/s390/defconfig3
-rw-r--r--crypto/Kconfig31
-rw-r--r--crypto/Makefile3
-rw-r--r--crypto/algapi.c15
-rw-r--r--crypto/api.c80
-rw-r--r--crypto/blkcipher.c9
-rw-r--r--crypto/camellia.c1801
-rw-r--r--crypto/cbc.c9
-rw-r--r--crypto/cipher.c447
-rw-r--r--crypto/compress.c5
-rw-r--r--crypto/digest.c24
-rw-r--r--crypto/ecb.c9
-rw-r--r--crypto/fcrypt.c423
-rw-r--r--crypto/hash.c5
-rw-r--r--crypto/hmac.c9
-rw-r--r--crypto/internal.h27
-rw-r--r--crypto/lrw.c11
-rw-r--r--crypto/pcbc.c349
-rw-r--r--crypto/tcrypt.c73
-rw-r--r--crypto/tcrypt.h538
-rw-r--r--crypto/xcbc.c60
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/slip.c5
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/s390/char/monreader.c218
-rw-r--r--drivers/s390/char/vmlogrdr.c279
-rw-r--r--drivers/s390/net/Kconfig7
-rw-r--r--drivers/s390/net/Makefile1
-rw-r--r--drivers/s390/net/iucv.c2540
-rw-r--r--drivers/s390/net/iucv.h849
-rw-r--r--drivers/s390/net/netiucv.c1314
-rw-r--r--drivers/s390/net/smsgiucv.c147
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h1
-rw-r--r--include/crypto/algapi.h24
-rw-r--r--include/linux/atmarp.h2
-rw-r--r--include/linux/crypto.h148
-rw-r--r--include/linux/if_packet.h10
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter/Kbuild1
-rw-r--r--include/linux/netfilter/nf_conntrack_sane.h21
-rw-r--r--include/linux/netfilter/nf_conntrack_tcp.h4
-rw-r--r--include/linux/netfilter/xt_TCPMSS.h10
-rw-r--r--include/linux/netfilter_ipv4/ip_nat.h1
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h22
-rw-r--r--include/linux/netfilter_ipv4/ipt_TCPMSS.h7
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h35
-rw-r--r--include/linux/netfilter_ipv6/ip6t_mh.h15
-rw-r--r--include/linux/pfkeyv2.h4
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/wanrouter.h8
-rw-r--r--include/linux/xfrm.h19
-rw-r--r--include/net/inet_hashtables.h10
-rw-r--r--include/net/iucv/af_iucv.h106
-rw-r--r--include/net/iucv/iucv.h415
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_nat.h1
-rw-r--r--include/net/route.h5
-rw-r--r--include/net/tcp.h5
-rw-r--r--include/net/x25.h18
-rw-r--r--include/net/xfrm.h47
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/atm/common.c3
-rw-r--r--net/bridge/br_netfilter.c29
-rw-r--r--net/bridge/br_netlink.c14
-rw-r--r--net/bridge/netfilter/ebt_ip.c1
-rw-r--r--net/bridge/netfilter/ebt_log.c1
-rw-r--r--net/core/dev.c13
-rw-r--r--net/core/dst.c9
-rw-r--r--net/core/fib_rules.c14
-rw-r--r--net/core/neighbour.c29
-rw-r--r--net/core/rtnetlink.c23
-rw-r--r--net/dccp/ccids/ccid3.c5
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/decnet/dn_dev.c14
-rw-r--r--net/decnet/dn_table.c11
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c14
-rw-r--r--net/ipv4/fib_semantics.c14
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_diag.c19
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c4
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/netfilter/Kconfig26
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c40
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c12
-rw-r--r--net/ipv4/netfilter/ip_nat_helper.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_tcp.c5
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_udp.c5
-rw-r--r--net/ipv4/netfilter/ip_nat_rule.c32
-rw-r--r--net/ipv4/netfilter/ip_tables.c40
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c15
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c13
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c16
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c9
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c8
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c8
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c12
-rw-r--r--net/ipv4/netfilter/ipt_SAME.c8
-rw-r--r--net/ipv4/netfilter/ipt_TCPMSS.c207
-rw-r--r--net/ipv4/netfilter/ipt_TOS.c11
-rw-r--r--net/ipv4/netfilter/ipt_TTL.c11
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c18
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c9
-rw-r--r--net/ipv4/netfilter/ipt_ah.c10
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c10
-rw-r--r--net/ipv4/netfilter/ipt_iprange.c10
-rw-r--r--net/ipv4/netfilter/ipt_owner.c9
-rw-r--r--net/ipv4/netfilter/ipt_recent.c12
-rw-r--r--net/ipv4/netfilter/ipt_tos.c10
-rw-r--r--net/ipv4/netfilter/ipt_ttl.c11
-rw-r--r--net/ipv4/netfilter/iptable_filter.c2
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c2
-rw-r--r--net/ipv4/netfilter/iptable_raw.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c12
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_tcp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c6
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c5
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_input.c105
-rw-r--r--net/ipv4/tcp_ipv4.c18
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c57
-rw-r--r--net/ipv4/xfrm4_policy.c51
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c70
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/mip6.c26
-rw-r--r--net/ipv6/netfilter/Kconfig8
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c12
-rw-r--r--net/ipv6/netfilter/ip6t_HL.c17
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c15
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c10
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c8
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c8
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c8
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c1
-rw-r--r--net/ipv6/netfilter/ip6t_hl.c11
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c8
-rw-r--r--net/ipv6/netfilter/ip6t_mh.c108
-rw-r--r--net/ipv6/netfilter/ip6t_owner.c8
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c8
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c21
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c21
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c19
-rw-r--r--net/ipv6/raw.c15
-rw-r--r--net/ipv6/route.c33
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c42
-rw-r--r--net/ipv6/xfrm6_policy.c46
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/ipx/af_ipx.c24
-rw-r--r--net/irda/irias_object.c40
-rw-r--r--net/irda/irlan/irlan_common.c23
-rw-r--r--net/iucv/Kconfig15
-rw-r--r--net/iucv/Makefile6
-rw-r--r--net/iucv/af_iucv.c1077
-rw-r--r--net/iucv/iucv.c1619
-rw-r--r--net/key/af_key.c422
-rw-r--r--net/netfilter/Kconfig39
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c40
-rw-r--r--net/netfilter/nf_conntrack_sane.c242
-rw-r--r--net/netfilter/xt_CLASSIFY.c4
-rw-r--r--net/netfilter/xt_CONNMARK.c5
-rw-r--r--net/netfilter/xt_CONNSECMARK.c6
-rw-r--r--net/netfilter/xt_MARK.c8
-rw-r--r--net/netfilter/xt_SECMARK.c4
-rw-r--r--net/netfilter/xt_TCPMSS.c296
-rw-r--r--net/netfilter/xt_hashlimit.c1
-rw-r--r--net/packet/af_packet.c79
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_prio.c15
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/socket.c29
-rw-r--r--net/wanrouter/wanmain.c17
-rw-r--r--net/x25/Makefile2
-rw-r--r--net/x25/af_x25.c32
-rw-r--r--net/x25/sysctl_net_x25.c8
-rw-r--r--net/x25/x25_dev.c13
-rw-r--r--net/x25/x25_forward.c163
-rw-r--r--net/x25/x25_proc.c98
-rw-r--r--net/x25/x25_route.c3
-rw-r--r--net/xfrm/Kconfig26
-rw-r--r--net/xfrm/xfrm_algo.c17
-rw-r--r--net/xfrm/xfrm_policy.c231
-rw-r--r--net/xfrm/xfrm_state.c184
-rw-r--r--net/xfrm/xfrm_user.c173
213 files changed, 10783 insertions, 6108 deletions
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt
index 5a03a2801d67..e41a79aa71ce 100644
--- a/Documentation/crypto/api-intro.txt
+++ b/Documentation/crypto/api-intro.txt
@@ -193,6 +193,7 @@ Original developers of the crypto algorithms:
193 Kartikey Mahendra Bhatt (CAST6) 193 Kartikey Mahendra Bhatt (CAST6)
194 Jon Oberheide (ARC4) 194 Jon Oberheide (ARC4)
195 Jouni Malinen (Michael MIC) 195 Jouni Malinen (Michael MIC)
196 NTT(Nippon Telegraph and Telephone Corporation) (Camellia)
196 197
197SHA1 algorithm contributors: 198SHA1 algorithm contributors:
198 Jean-Francois Dive 199 Jean-Francois Dive
@@ -246,6 +247,9 @@ Tiger algorithm contributors:
246VIA PadLock contributors: 247VIA PadLock contributors:
247 Michal Ludvig 248 Michal Ludvig
248 249
250Camellia algorithm contributors:
251 NTT(Nippon Telegraph and Telephone Corporation) (Camellia)
252
249Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com> 253Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>
250 254
251Please send any credits updates or corrections to: 255Please send any credits updates or corrections to:
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 7c621b8ef683..1406400bf3ea 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -179,6 +179,8 @@ CONFIG_XFRM=y
179# CONFIG_XFRM_USER is not set 179# CONFIG_XFRM_USER is not set
180# CONFIG_XFRM_SUB_POLICY is not set 180# CONFIG_XFRM_SUB_POLICY is not set
181CONFIG_NET_KEY=y 181CONFIG_NET_KEY=y
182CONFIG_IUCV=m
183CONFIG_AFIUCV=m
182CONFIG_INET=y 184CONFIG_INET=y
183CONFIG_IP_MULTICAST=y 185CONFIG_IP_MULTICAST=y
184# CONFIG_IP_ADVANCED_ROUTER is not set 186# CONFIG_IP_ADVANCED_ROUTER is not set
@@ -508,7 +510,6 @@ CONFIG_NET_ETHERNET=y
508# 510#
509CONFIG_LCS=m 511CONFIG_LCS=m
510CONFIG_CTC=m 512CONFIG_CTC=m
511CONFIG_IUCV=m
512# CONFIG_NETIUCV is not set 513# CONFIG_NETIUCV is not set
513# CONFIG_SMSGIUCV is not set 514# CONFIG_SMSGIUCV is not set
514# CONFIG_CLAW is not set 515# CONFIG_CLAW is not set
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 918b4d845f93..086fcec44720 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -149,6 +149,15 @@ config CRYPTO_CBC
149 CBC: Cipher Block Chaining mode 149 CBC: Cipher Block Chaining mode
150 This block cipher algorithm is required for IPSec. 150 This block cipher algorithm is required for IPSec.
151 151
152config CRYPTO_PCBC
153 tristate "PCBC support"
154 select CRYPTO_BLKCIPHER
155 select CRYPTO_MANAGER
156 default m
157 help
158 PCBC: Propagating Cipher Block Chaining mode
159 This block cipher algorithm is required for RxRPC.
160
152config CRYPTO_LRW 161config CRYPTO_LRW
153 tristate "LRW support (EXPERIMENTAL)" 162 tristate "LRW support (EXPERIMENTAL)"
154 depends on EXPERIMENTAL 163 depends on EXPERIMENTAL
@@ -168,6 +177,13 @@ config CRYPTO_DES
168 help 177 help
169 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 178 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
170 179
180config CRYPTO_FCRYPT
181 tristate "FCrypt cipher algorithm"
182 select CRYPTO_ALGAPI
183 select CRYPTO_BLKCIPHER
184 help
185 FCrypt algorithm used by RxRPC.
186
171config CRYPTO_BLOWFISH 187config CRYPTO_BLOWFISH
172 tristate "Blowfish cipher algorithm" 188 tristate "Blowfish cipher algorithm"
173 select CRYPTO_ALGAPI 189 select CRYPTO_ALGAPI
@@ -409,6 +425,21 @@ config CRYPTO_CRC32C
409 See Castagnoli93. This implementation uses lib/libcrc32c. 425 See Castagnoli93. This implementation uses lib/libcrc32c.
410 Module will be crc32c. 426 Module will be crc32c.
411 427
428config CRYPTO_CAMELLIA
429 tristate "Camellia cipher algorithms"
430 depends on CRYPTO
431 select CRYPTO_ALGAPI
432 help
433 Camellia cipher algorithms module.
434
435 Camellia is a symmetric key block cipher developed jointly
436 at NTT and Mitsubishi Electric Corporation.
437
438 The Camellia specifies three key sizes: 128, 192 and 256 bits.
439
440 See also:
441 <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
442
412config CRYPTO_TEST 443config CRYPTO_TEST
413 tristate "Testing module" 444 tristate "Testing module"
414 depends on m 445 depends on m
diff --git a/crypto/Makefile b/crypto/Makefile
index 60e3d24f61f5..12f93f578171 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -27,13 +27,16 @@ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
27obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o 27obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
28obj-$(CONFIG_CRYPTO_ECB) += ecb.o 28obj-$(CONFIG_CRYPTO_ECB) += ecb.o
29obj-$(CONFIG_CRYPTO_CBC) += cbc.o 29obj-$(CONFIG_CRYPTO_CBC) += cbc.o
30obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
30obj-$(CONFIG_CRYPTO_LRW) += lrw.o 31obj-$(CONFIG_CRYPTO_LRW) += lrw.o
31obj-$(CONFIG_CRYPTO_DES) += des.o 32obj-$(CONFIG_CRYPTO_DES) += des.o
33obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
32obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o 34obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
33obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o 35obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
34obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o 36obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
35obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o 37obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
36obj-$(CONFIG_CRYPTO_AES) += aes.o 38obj-$(CONFIG_CRYPTO_AES) += aes.o
39obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia.o
37obj-$(CONFIG_CRYPTO_CAST5) += cast5.o 40obj-$(CONFIG_CRYPTO_CAST5) += cast5.o
38obj-$(CONFIG_CRYPTO_CAST6) += cast6.o 41obj-$(CONFIG_CRYPTO_CAST6) += cast6.o
39obj-$(CONFIG_CRYPTO_ARC4) += arc4.o 42obj-$(CONFIG_CRYPTO_ARC4) += arc4.o
diff --git a/crypto/algapi.c b/crypto/algapi.c
index c91530021e9c..f7d2185b2c8f 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -377,7 +377,8 @@ void crypto_drop_spawn(struct crypto_spawn *spawn)
377} 377}
378EXPORT_SYMBOL_GPL(crypto_drop_spawn); 378EXPORT_SYMBOL_GPL(crypto_drop_spawn);
379 379
380struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn) 380struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
381 u32 mask)
381{ 382{
382 struct crypto_alg *alg; 383 struct crypto_alg *alg;
383 struct crypto_alg *alg2; 384 struct crypto_alg *alg2;
@@ -396,10 +397,18 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn)
396 return ERR_PTR(-EAGAIN); 397 return ERR_PTR(-EAGAIN);
397 } 398 }
398 399
399 tfm = __crypto_alloc_tfm(alg, 0); 400 tfm = ERR_PTR(-EINVAL);
401 if (unlikely((alg->cra_flags ^ type) & mask))
402 goto out_put_alg;
403
404 tfm = __crypto_alloc_tfm(alg, type, mask);
400 if (IS_ERR(tfm)) 405 if (IS_ERR(tfm))
401 crypto_mod_put(alg); 406 goto out_put_alg;
407
408 return tfm;
402 409
410out_put_alg:
411 crypto_mod_put(alg);
403 return tfm; 412 return tfm;
404} 413}
405EXPORT_SYMBOL_GPL(crypto_spawn_tfm); 414EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
diff --git a/crypto/api.c b/crypto/api.c
index 8c446871cd5b..55af8bb0f050 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -212,31 +212,12 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
212} 212}
213EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); 213EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
214 214
215static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags) 215static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
216{ 216{
217 tfm->crt_flags = flags & CRYPTO_TFM_REQ_MASK; 217 const struct crypto_type *type_obj = tfm->__crt_alg->cra_type;
218 flags &= ~CRYPTO_TFM_REQ_MASK;
219
220 switch (crypto_tfm_alg_type(tfm)) {
221 case CRYPTO_ALG_TYPE_CIPHER:
222 return crypto_init_cipher_flags(tfm, flags);
223
224 case CRYPTO_ALG_TYPE_DIGEST:
225 return crypto_init_digest_flags(tfm, flags);
226
227 case CRYPTO_ALG_TYPE_COMPRESS:
228 return crypto_init_compress_flags(tfm, flags);
229 }
230
231 return 0;
232}
233 218
234static int crypto_init_ops(struct crypto_tfm *tfm) 219 if (type_obj)
235{ 220 return type_obj->init(tfm, type, mask);
236 const struct crypto_type *type = tfm->__crt_alg->cra_type;
237
238 if (type)
239 return type->init(tfm);
240 221
241 switch (crypto_tfm_alg_type(tfm)) { 222 switch (crypto_tfm_alg_type(tfm)) {
242 case CRYPTO_ALG_TYPE_CIPHER: 223 case CRYPTO_ALG_TYPE_CIPHER:
@@ -285,29 +266,29 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
285 } 266 }
286} 267}
287 268
288static unsigned int crypto_ctxsize(struct crypto_alg *alg, int flags) 269static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
289{ 270{
290 const struct crypto_type *type = alg->cra_type; 271 const struct crypto_type *type_obj = alg->cra_type;
291 unsigned int len; 272 unsigned int len;
292 273
293 len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1); 274 len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
294 if (type) 275 if (type_obj)
295 return len + type->ctxsize(alg); 276 return len + type_obj->ctxsize(alg, type, mask);
296 277
297 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 278 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
298 default: 279 default:
299 BUG(); 280 BUG();
300 281
301 case CRYPTO_ALG_TYPE_CIPHER: 282 case CRYPTO_ALG_TYPE_CIPHER:
302 len += crypto_cipher_ctxsize(alg, flags); 283 len += crypto_cipher_ctxsize(alg);
303 break; 284 break;
304 285
305 case CRYPTO_ALG_TYPE_DIGEST: 286 case CRYPTO_ALG_TYPE_DIGEST:
306 len += crypto_digest_ctxsize(alg, flags); 287 len += crypto_digest_ctxsize(alg);
307 break; 288 break;
308 289
309 case CRYPTO_ALG_TYPE_COMPRESS: 290 case CRYPTO_ALG_TYPE_COMPRESS:
310 len += crypto_compress_ctxsize(alg, flags); 291 len += crypto_compress_ctxsize(alg);
311 break; 292 break;
312 } 293 }
313 294
@@ -322,24 +303,21 @@ void crypto_shoot_alg(struct crypto_alg *alg)
322} 303}
323EXPORT_SYMBOL_GPL(crypto_shoot_alg); 304EXPORT_SYMBOL_GPL(crypto_shoot_alg);
324 305
325struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags) 306struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
307 u32 mask)
326{ 308{
327 struct crypto_tfm *tfm = NULL; 309 struct crypto_tfm *tfm = NULL;
328 unsigned int tfm_size; 310 unsigned int tfm_size;
329 int err = -ENOMEM; 311 int err = -ENOMEM;
330 312
331 tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags); 313 tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
332 tfm = kzalloc(tfm_size, GFP_KERNEL); 314 tfm = kzalloc(tfm_size, GFP_KERNEL);
333 if (tfm == NULL) 315 if (tfm == NULL)
334 goto out_err; 316 goto out_err;
335 317
336 tfm->__crt_alg = alg; 318 tfm->__crt_alg = alg;
337 319
338 err = crypto_init_flags(tfm, flags); 320 err = crypto_init_ops(tfm, type, mask);
339 if (err)
340 goto out_free_tfm;
341
342 err = crypto_init_ops(tfm);
343 if (err) 321 if (err)
344 goto out_free_tfm; 322 goto out_free_tfm;
345 323
@@ -362,31 +340,6 @@ out:
362} 340}
363EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); 341EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
364 342
365struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
366{
367 struct crypto_tfm *tfm = NULL;
368 int err;
369
370 do {
371 struct crypto_alg *alg;
372
373 alg = crypto_alg_mod_lookup(name, 0, CRYPTO_ALG_ASYNC);
374 err = PTR_ERR(alg);
375 if (IS_ERR(alg))
376 continue;
377
378 tfm = __crypto_alloc_tfm(alg, flags);
379 err = 0;
380 if (IS_ERR(tfm)) {
381 crypto_mod_put(alg);
382 err = PTR_ERR(tfm);
383 tfm = NULL;
384 }
385 } while (err == -EAGAIN && !signal_pending(current));
386
387 return tfm;
388}
389
390/* 343/*
391 * crypto_alloc_base - Locate algorithm and allocate transform 344 * crypto_alloc_base - Locate algorithm and allocate transform
392 * @alg_name: Name of algorithm 345 * @alg_name: Name of algorithm
@@ -420,7 +373,7 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
420 goto err; 373 goto err;
421 } 374 }
422 375
423 tfm = __crypto_alloc_tfm(alg, 0); 376 tfm = __crypto_alloc_tfm(alg, type, mask);
424 if (!IS_ERR(tfm)) 377 if (!IS_ERR(tfm))
425 return tfm; 378 return tfm;
426 379
@@ -466,7 +419,6 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
466 kfree(tfm); 419 kfree(tfm);
467} 420}
468 421
469EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
470EXPORT_SYMBOL_GPL(crypto_free_tfm); 422EXPORT_SYMBOL_GPL(crypto_free_tfm);
471 423
472int crypto_has_alg(const char *name, u32 type, u32 mask) 424int crypto_has_alg(const char *name, u32 type, u32 mask)
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 6e93004f2181..b5befe8c3a96 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/crypto.h> 17#include <linux/crypto.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/hardirq.h>
19#include <linux/kernel.h> 20#include <linux/kernel.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
@@ -313,6 +314,9 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
313 struct crypto_blkcipher *tfm = desc->tfm; 314 struct crypto_blkcipher *tfm = desc->tfm;
314 unsigned int alignmask = crypto_blkcipher_alignmask(tfm); 315 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
315 316
317 if (WARN_ON_ONCE(in_irq()))
318 return -EDEADLK;
319
316 walk->nbytes = walk->total; 320 walk->nbytes = walk->total;
317 if (unlikely(!walk->total)) 321 if (unlikely(!walk->total))
318 return 0; 322 return 0;
@@ -345,7 +349,8 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key,
345 return cipher->setkey(tfm, key, keylen); 349 return cipher->setkey(tfm, key, keylen);
346} 350}
347 351
348static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg) 352static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
353 u32 mask)
349{ 354{
350 struct blkcipher_alg *cipher = &alg->cra_blkcipher; 355 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
351 unsigned int len = alg->cra_ctxsize; 356 unsigned int len = alg->cra_ctxsize;
@@ -358,7 +363,7 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg)
358 return len; 363 return len;
359} 364}
360 365
361static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm) 366static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
362{ 367{
363 struct blkcipher_tfm *crt = &tfm->crt_blkcipher; 368 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
364 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; 369 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
diff --git a/crypto/camellia.c b/crypto/camellia.c
new file mode 100644
index 000000000000..6877ecfd90bb
--- /dev/null
+++ b/crypto/camellia.c
@@ -0,0 +1,1801 @@
1/*
2 * Copyright (C) 2006
3 * NTT (Nippon Telegraph and Telephone Corporation).
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20/*
21 * Algorithm Specification
22 * http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
23 */
24
25/*
26 *
27 * NOTE --- NOTE --- NOTE --- NOTE
28 * This implementation assumes that all memory addresses passed
29 * as parameters are four-byte aligned.
30 *
31 */
32
33#include <linux/crypto.h>
34#include <linux/errno.h>
35#include <linux/init.h>
36#include <linux/kernel.h>
37#include <linux/module.h>
38
39
40#define CAMELLIA_MIN_KEY_SIZE 16
41#define CAMELLIA_MAX_KEY_SIZE 32
42#define CAMELLIA_BLOCK_SIZE 16
43#define CAMELLIA_TABLE_BYTE_LEN 272
44#define CAMELLIA_TABLE_WORD_LEN (CAMELLIA_TABLE_BYTE_LEN / 4)
45
46typedef u32 KEY_TABLE_TYPE[CAMELLIA_TABLE_WORD_LEN];
47
48
49/* key constants */
50
51#define CAMELLIA_SIGMA1L (0xA09E667FL)
52#define CAMELLIA_SIGMA1R (0x3BCC908BL)
53#define CAMELLIA_SIGMA2L (0xB67AE858L)
54#define CAMELLIA_SIGMA2R (0x4CAA73B2L)
55#define CAMELLIA_SIGMA3L (0xC6EF372FL)
56#define CAMELLIA_SIGMA3R (0xE94F82BEL)
57#define CAMELLIA_SIGMA4L (0x54FF53A5L)
58#define CAMELLIA_SIGMA4R (0xF1D36F1CL)
59#define CAMELLIA_SIGMA5L (0x10E527FAL)
60#define CAMELLIA_SIGMA5R (0xDE682D1DL)
61#define CAMELLIA_SIGMA6L (0xB05688C2L)
62#define CAMELLIA_SIGMA6R (0xB3E6C1FDL)
63
64struct camellia_ctx {
65 int key_length;
66 KEY_TABLE_TYPE key_table;
67};
68
69
70/*
71 * macros
72 */
73
74
75# define GETU32(pt) (((u32)(pt)[0] << 24) \
76 ^ ((u32)(pt)[1] << 16) \
77 ^ ((u32)(pt)[2] << 8) \
78 ^ ((u32)(pt)[3]))
79
80#define COPY4WORD(dst, src) \
81 do { \
82 (dst)[0]=(src)[0]; \
83 (dst)[1]=(src)[1]; \
84 (dst)[2]=(src)[2]; \
85 (dst)[3]=(src)[3]; \
86 }while(0)
87
88#define SWAP4WORD(word) \
89 do { \
90 CAMELLIA_SWAP4((word)[0]); \
91 CAMELLIA_SWAP4((word)[1]); \
92 CAMELLIA_SWAP4((word)[2]); \
93 CAMELLIA_SWAP4((word)[3]); \
94 }while(0)
95
96#define XOR4WORD(a, b)/* a = a ^ b */ \
97 do { \
98 (a)[0]^=(b)[0]; \
99 (a)[1]^=(b)[1]; \
100 (a)[2]^=(b)[2]; \
101 (a)[3]^=(b)[3]; \
102 }while(0)
103
104#define XOR4WORD2(a, b, c)/* a = b ^ c */ \
105 do { \
106 (a)[0]=(b)[0]^(c)[0]; \
107 (a)[1]=(b)[1]^(c)[1]; \
108 (a)[2]=(b)[2]^(c)[2]; \
109 (a)[3]=(b)[3]^(c)[3]; \
110 }while(0)
111
112#define CAMELLIA_SUBKEY_L(INDEX) (subkey[(INDEX)*2])
113#define CAMELLIA_SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
114
115/* rotation right shift 1byte */
116#define CAMELLIA_RR8(x) (((x) >> 8) + ((x) << 24))
117/* rotation left shift 1bit */
118#define CAMELLIA_RL1(x) (((x) << 1) + ((x) >> 31))
119/* rotation left shift 1byte */
120#define CAMELLIA_RL8(x) (((x) << 8) + ((x) >> 24))
121
122#define CAMELLIA_ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
123 do { \
124 w0 = ll; \
125 ll = (ll << bits) + (lr >> (32 - bits)); \
126 lr = (lr << bits) + (rl >> (32 - bits)); \
127 rl = (rl << bits) + (rr >> (32 - bits)); \
128 rr = (rr << bits) + (w0 >> (32 - bits)); \
129 } while(0)
130
131#define CAMELLIA_ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \
132 do { \
133 w0 = ll; \
134 w1 = lr; \
135 ll = (lr << (bits - 32)) + (rl >> (64 - bits)); \
136 lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \
137 rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \
138 rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \
139 } while(0)
140
141#define CAMELLIA_SP1110(INDEX) (camellia_sp1110[(INDEX)])
142#define CAMELLIA_SP0222(INDEX) (camellia_sp0222[(INDEX)])
143#define CAMELLIA_SP3033(INDEX) (camellia_sp3033[(INDEX)])
144#define CAMELLIA_SP4404(INDEX) (camellia_sp4404[(INDEX)])
145
146#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \
147 do { \
148 il = xl ^ kl; \
149 ir = xr ^ kr; \
150 t0 = il >> 16; \
151 t1 = ir >> 16; \
152 yl = CAMELLIA_SP1110(ir & 0xff) \
153 ^ CAMELLIA_SP0222((t1 >> 8) & 0xff) \
154 ^ CAMELLIA_SP3033(t1 & 0xff) \
155 ^ CAMELLIA_SP4404((ir >> 8) & 0xff); \
156 yr = CAMELLIA_SP1110((t0 >> 8) & 0xff) \
157 ^ CAMELLIA_SP0222(t0 & 0xff) \
158 ^ CAMELLIA_SP3033((il >> 8) & 0xff) \
159 ^ CAMELLIA_SP4404(il & 0xff); \
160 yl ^= yr; \
161 yr = CAMELLIA_RR8(yr); \
162 yr ^= yl; \
163 } while(0)
164
165
166/*
167 * for speed up
168 *
169 */
170#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \
171 do { \
172 t0 = kll; \
173 t2 = krr; \
174 t0 &= ll; \
175 t2 |= rr; \
176 rl ^= t2; \
177 lr ^= CAMELLIA_RL1(t0); \
178 t3 = krl; \
179 t1 = klr; \
180 t3 &= rl; \
181 t1 |= lr; \
182 ll ^= t1; \
183 rr ^= CAMELLIA_RL1(t3); \
184 } while(0)
185
186#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \
187 do { \
188 ir = CAMELLIA_SP1110(xr & 0xff); \
189 il = CAMELLIA_SP1110((xl>>24) & 0xff); \
190 ir ^= CAMELLIA_SP0222((xr>>24) & 0xff); \
191 il ^= CAMELLIA_SP0222((xl>>16) & 0xff); \
192 ir ^= CAMELLIA_SP3033((xr>>16) & 0xff); \
193 il ^= CAMELLIA_SP3033((xl>>8) & 0xff); \
194 ir ^= CAMELLIA_SP4404((xr>>8) & 0xff); \
195 il ^= CAMELLIA_SP4404(xl & 0xff); \
196 il ^= kl; \
197 ir ^= il ^ kr; \
198 yl ^= ir; \
199 yr ^= CAMELLIA_RR8(il) ^ ir; \
200 } while(0)
201
202/**
203 * Stuff related to the Camellia key schedule
204 */
205#define SUBL(x) subL[(x)]
206#define SUBR(x) subR[(x)]
207
208
209static const u32 camellia_sp1110[256] = {
210 0x70707000,0x82828200,0x2c2c2c00,0xececec00,
211 0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500,
212 0xe4e4e400,0x85858500,0x57575700,0x35353500,
213 0xeaeaea00,0x0c0c0c00,0xaeaeae00,0x41414100,
214 0x23232300,0xefefef00,0x6b6b6b00,0x93939300,
215 0x45454500,0x19191900,0xa5a5a500,0x21212100,
216 0xededed00,0x0e0e0e00,0x4f4f4f00,0x4e4e4e00,
217 0x1d1d1d00,0x65656500,0x92929200,0xbdbdbd00,
218 0x86868600,0xb8b8b800,0xafafaf00,0x8f8f8f00,
219 0x7c7c7c00,0xebebeb00,0x1f1f1f00,0xcecece00,
220 0x3e3e3e00,0x30303000,0xdcdcdc00,0x5f5f5f00,
221 0x5e5e5e00,0xc5c5c500,0x0b0b0b00,0x1a1a1a00,
222 0xa6a6a600,0xe1e1e100,0x39393900,0xcacaca00,
223 0xd5d5d500,0x47474700,0x5d5d5d00,0x3d3d3d00,
224 0xd9d9d900,0x01010100,0x5a5a5a00,0xd6d6d600,
225 0x51515100,0x56565600,0x6c6c6c00,0x4d4d4d00,
226 0x8b8b8b00,0x0d0d0d00,0x9a9a9a00,0x66666600,
227 0xfbfbfb00,0xcccccc00,0xb0b0b000,0x2d2d2d00,
228 0x74747400,0x12121200,0x2b2b2b00,0x20202000,
229 0xf0f0f000,0xb1b1b100,0x84848400,0x99999900,
230 0xdfdfdf00,0x4c4c4c00,0xcbcbcb00,0xc2c2c200,
231 0x34343400,0x7e7e7e00,0x76767600,0x05050500,
232 0x6d6d6d00,0xb7b7b700,0xa9a9a900,0x31313100,
233 0xd1d1d100,0x17171700,0x04040400,0xd7d7d700,
234 0x14141400,0x58585800,0x3a3a3a00,0x61616100,
235 0xdedede00,0x1b1b1b00,0x11111100,0x1c1c1c00,
236 0x32323200,0x0f0f0f00,0x9c9c9c00,0x16161600,
237 0x53535300,0x18181800,0xf2f2f200,0x22222200,
238 0xfefefe00,0x44444400,0xcfcfcf00,0xb2b2b200,
239 0xc3c3c300,0xb5b5b500,0x7a7a7a00,0x91919100,
240 0x24242400,0x08080800,0xe8e8e800,0xa8a8a800,
241 0x60606000,0xfcfcfc00,0x69696900,0x50505000,
242 0xaaaaaa00,0xd0d0d000,0xa0a0a000,0x7d7d7d00,
243 0xa1a1a100,0x89898900,0x62626200,0x97979700,
244 0x54545400,0x5b5b5b00,0x1e1e1e00,0x95959500,
245 0xe0e0e000,0xffffff00,0x64646400,0xd2d2d200,
246 0x10101000,0xc4c4c400,0x00000000,0x48484800,
247 0xa3a3a300,0xf7f7f700,0x75757500,0xdbdbdb00,
248 0x8a8a8a00,0x03030300,0xe6e6e600,0xdadada00,
249 0x09090900,0x3f3f3f00,0xdddddd00,0x94949400,
250 0x87878700,0x5c5c5c00,0x83838300,0x02020200,
251 0xcdcdcd00,0x4a4a4a00,0x90909000,0x33333300,
252 0x73737300,0x67676700,0xf6f6f600,0xf3f3f300,
253 0x9d9d9d00,0x7f7f7f00,0xbfbfbf00,0xe2e2e200,
254 0x52525200,0x9b9b9b00,0xd8d8d800,0x26262600,
255 0xc8c8c800,0x37373700,0xc6c6c600,0x3b3b3b00,
256 0x81818100,0x96969600,0x6f6f6f00,0x4b4b4b00,
257 0x13131300,0xbebebe00,0x63636300,0x2e2e2e00,
258 0xe9e9e900,0x79797900,0xa7a7a700,0x8c8c8c00,
259 0x9f9f9f00,0x6e6e6e00,0xbcbcbc00,0x8e8e8e00,
260 0x29292900,0xf5f5f500,0xf9f9f900,0xb6b6b600,
261 0x2f2f2f00,0xfdfdfd00,0xb4b4b400,0x59595900,
262 0x78787800,0x98989800,0x06060600,0x6a6a6a00,
263 0xe7e7e700,0x46464600,0x71717100,0xbababa00,
264 0xd4d4d400,0x25252500,0xababab00,0x42424200,
265 0x88888800,0xa2a2a200,0x8d8d8d00,0xfafafa00,
266 0x72727200,0x07070700,0xb9b9b900,0x55555500,
267 0xf8f8f800,0xeeeeee00,0xacacac00,0x0a0a0a00,
268 0x36363600,0x49494900,0x2a2a2a00,0x68686800,
269 0x3c3c3c00,0x38383800,0xf1f1f100,0xa4a4a400,
270 0x40404000,0x28282800,0xd3d3d300,0x7b7b7b00,
271 0xbbbbbb00,0xc9c9c900,0x43434300,0xc1c1c100,
272 0x15151500,0xe3e3e300,0xadadad00,0xf4f4f400,
273 0x77777700,0xc7c7c700,0x80808000,0x9e9e9e00,
274};
275
276static const u32 camellia_sp0222[256] = {
277 0x00e0e0e0,0x00050505,0x00585858,0x00d9d9d9,
278 0x00676767,0x004e4e4e,0x00818181,0x00cbcbcb,
279 0x00c9c9c9,0x000b0b0b,0x00aeaeae,0x006a6a6a,
280 0x00d5d5d5,0x00181818,0x005d5d5d,0x00828282,
281 0x00464646,0x00dfdfdf,0x00d6d6d6,0x00272727,
282 0x008a8a8a,0x00323232,0x004b4b4b,0x00424242,
283 0x00dbdbdb,0x001c1c1c,0x009e9e9e,0x009c9c9c,
284 0x003a3a3a,0x00cacaca,0x00252525,0x007b7b7b,
285 0x000d0d0d,0x00717171,0x005f5f5f,0x001f1f1f,
286 0x00f8f8f8,0x00d7d7d7,0x003e3e3e,0x009d9d9d,
287 0x007c7c7c,0x00606060,0x00b9b9b9,0x00bebebe,
288 0x00bcbcbc,0x008b8b8b,0x00161616,0x00343434,
289 0x004d4d4d,0x00c3c3c3,0x00727272,0x00959595,
290 0x00ababab,0x008e8e8e,0x00bababa,0x007a7a7a,
291 0x00b3b3b3,0x00020202,0x00b4b4b4,0x00adadad,
292 0x00a2a2a2,0x00acacac,0x00d8d8d8,0x009a9a9a,
293 0x00171717,0x001a1a1a,0x00353535,0x00cccccc,
294 0x00f7f7f7,0x00999999,0x00616161,0x005a5a5a,
295 0x00e8e8e8,0x00242424,0x00565656,0x00404040,
296 0x00e1e1e1,0x00636363,0x00090909,0x00333333,
297 0x00bfbfbf,0x00989898,0x00979797,0x00858585,
298 0x00686868,0x00fcfcfc,0x00ececec,0x000a0a0a,
299 0x00dadada,0x006f6f6f,0x00535353,0x00626262,
300 0x00a3a3a3,0x002e2e2e,0x00080808,0x00afafaf,
301 0x00282828,0x00b0b0b0,0x00747474,0x00c2c2c2,
302 0x00bdbdbd,0x00363636,0x00222222,0x00383838,
303 0x00646464,0x001e1e1e,0x00393939,0x002c2c2c,
304 0x00a6a6a6,0x00303030,0x00e5e5e5,0x00444444,
305 0x00fdfdfd,0x00888888,0x009f9f9f,0x00656565,
306 0x00878787,0x006b6b6b,0x00f4f4f4,0x00232323,
307 0x00484848,0x00101010,0x00d1d1d1,0x00515151,
308 0x00c0c0c0,0x00f9f9f9,0x00d2d2d2,0x00a0a0a0,
309 0x00555555,0x00a1a1a1,0x00414141,0x00fafafa,
310 0x00434343,0x00131313,0x00c4c4c4,0x002f2f2f,
311 0x00a8a8a8,0x00b6b6b6,0x003c3c3c,0x002b2b2b,
312 0x00c1c1c1,0x00ffffff,0x00c8c8c8,0x00a5a5a5,
313 0x00202020,0x00898989,0x00000000,0x00909090,
314 0x00474747,0x00efefef,0x00eaeaea,0x00b7b7b7,
315 0x00151515,0x00060606,0x00cdcdcd,0x00b5b5b5,
316 0x00121212,0x007e7e7e,0x00bbbbbb,0x00292929,
317 0x000f0f0f,0x00b8b8b8,0x00070707,0x00040404,
318 0x009b9b9b,0x00949494,0x00212121,0x00666666,
319 0x00e6e6e6,0x00cecece,0x00ededed,0x00e7e7e7,
320 0x003b3b3b,0x00fefefe,0x007f7f7f,0x00c5c5c5,
321 0x00a4a4a4,0x00373737,0x00b1b1b1,0x004c4c4c,
322 0x00919191,0x006e6e6e,0x008d8d8d,0x00767676,
323 0x00030303,0x002d2d2d,0x00dedede,0x00969696,
324 0x00262626,0x007d7d7d,0x00c6c6c6,0x005c5c5c,
325 0x00d3d3d3,0x00f2f2f2,0x004f4f4f,0x00191919,
326 0x003f3f3f,0x00dcdcdc,0x00797979,0x001d1d1d,
327 0x00525252,0x00ebebeb,0x00f3f3f3,0x006d6d6d,
328 0x005e5e5e,0x00fbfbfb,0x00696969,0x00b2b2b2,
329 0x00f0f0f0,0x00313131,0x000c0c0c,0x00d4d4d4,
330 0x00cfcfcf,0x008c8c8c,0x00e2e2e2,0x00757575,
331 0x00a9a9a9,0x004a4a4a,0x00575757,0x00848484,
332 0x00111111,0x00454545,0x001b1b1b,0x00f5f5f5,
333 0x00e4e4e4,0x000e0e0e,0x00737373,0x00aaaaaa,
334 0x00f1f1f1,0x00dddddd,0x00595959,0x00141414,
335 0x006c6c6c,0x00929292,0x00545454,0x00d0d0d0,
336 0x00787878,0x00707070,0x00e3e3e3,0x00494949,
337 0x00808080,0x00505050,0x00a7a7a7,0x00f6f6f6,
338 0x00777777,0x00939393,0x00868686,0x00838383,
339 0x002a2a2a,0x00c7c7c7,0x005b5b5b,0x00e9e9e9,
340 0x00eeeeee,0x008f8f8f,0x00010101,0x003d3d3d,
341};
342
343static const u32 camellia_sp3033[256] = {
344 0x38003838,0x41004141,0x16001616,0x76007676,
345 0xd900d9d9,0x93009393,0x60006060,0xf200f2f2,
346 0x72007272,0xc200c2c2,0xab00abab,0x9a009a9a,
347 0x75007575,0x06000606,0x57005757,0xa000a0a0,
348 0x91009191,0xf700f7f7,0xb500b5b5,0xc900c9c9,
349 0xa200a2a2,0x8c008c8c,0xd200d2d2,0x90009090,
350 0xf600f6f6,0x07000707,0xa700a7a7,0x27002727,
351 0x8e008e8e,0xb200b2b2,0x49004949,0xde00dede,
352 0x43004343,0x5c005c5c,0xd700d7d7,0xc700c7c7,
353 0x3e003e3e,0xf500f5f5,0x8f008f8f,0x67006767,
354 0x1f001f1f,0x18001818,0x6e006e6e,0xaf00afaf,
355 0x2f002f2f,0xe200e2e2,0x85008585,0x0d000d0d,
356 0x53005353,0xf000f0f0,0x9c009c9c,0x65006565,
357 0xea00eaea,0xa300a3a3,0xae00aeae,0x9e009e9e,
358 0xec00ecec,0x80008080,0x2d002d2d,0x6b006b6b,
359 0xa800a8a8,0x2b002b2b,0x36003636,0xa600a6a6,
360 0xc500c5c5,0x86008686,0x4d004d4d,0x33003333,
361 0xfd00fdfd,0x66006666,0x58005858,0x96009696,
362 0x3a003a3a,0x09000909,0x95009595,0x10001010,
363 0x78007878,0xd800d8d8,0x42004242,0xcc00cccc,
364 0xef00efef,0x26002626,0xe500e5e5,0x61006161,
365 0x1a001a1a,0x3f003f3f,0x3b003b3b,0x82008282,
366 0xb600b6b6,0xdb00dbdb,0xd400d4d4,0x98009898,
367 0xe800e8e8,0x8b008b8b,0x02000202,0xeb00ebeb,
368 0x0a000a0a,0x2c002c2c,0x1d001d1d,0xb000b0b0,
369 0x6f006f6f,0x8d008d8d,0x88008888,0x0e000e0e,
370 0x19001919,0x87008787,0x4e004e4e,0x0b000b0b,
371 0xa900a9a9,0x0c000c0c,0x79007979,0x11001111,
372 0x7f007f7f,0x22002222,0xe700e7e7,0x59005959,
373 0xe100e1e1,0xda00dada,0x3d003d3d,0xc800c8c8,
374 0x12001212,0x04000404,0x74007474,0x54005454,
375 0x30003030,0x7e007e7e,0xb400b4b4,0x28002828,
376 0x55005555,0x68006868,0x50005050,0xbe00bebe,
377 0xd000d0d0,0xc400c4c4,0x31003131,0xcb00cbcb,
378 0x2a002a2a,0xad00adad,0x0f000f0f,0xca00caca,
379 0x70007070,0xff00ffff,0x32003232,0x69006969,
380 0x08000808,0x62006262,0x00000000,0x24002424,
381 0xd100d1d1,0xfb00fbfb,0xba00baba,0xed00eded,
382 0x45004545,0x81008181,0x73007373,0x6d006d6d,
383 0x84008484,0x9f009f9f,0xee00eeee,0x4a004a4a,
384 0xc300c3c3,0x2e002e2e,0xc100c1c1,0x01000101,
385 0xe600e6e6,0x25002525,0x48004848,0x99009999,
386 0xb900b9b9,0xb300b3b3,0x7b007b7b,0xf900f9f9,
387 0xce00cece,0xbf00bfbf,0xdf00dfdf,0x71007171,
388 0x29002929,0xcd00cdcd,0x6c006c6c,0x13001313,
389 0x64006464,0x9b009b9b,0x63006363,0x9d009d9d,
390 0xc000c0c0,0x4b004b4b,0xb700b7b7,0xa500a5a5,
391 0x89008989,0x5f005f5f,0xb100b1b1,0x17001717,
392 0xf400f4f4,0xbc00bcbc,0xd300d3d3,0x46004646,
393 0xcf00cfcf,0x37003737,0x5e005e5e,0x47004747,
394 0x94009494,0xfa00fafa,0xfc00fcfc,0x5b005b5b,
395 0x97009797,0xfe00fefe,0x5a005a5a,0xac00acac,
396 0x3c003c3c,0x4c004c4c,0x03000303,0x35003535,
397 0xf300f3f3,0x23002323,0xb800b8b8,0x5d005d5d,
398 0x6a006a6a,0x92009292,0xd500d5d5,0x21002121,
399 0x44004444,0x51005151,0xc600c6c6,0x7d007d7d,
400 0x39003939,0x83008383,0xdc00dcdc,0xaa00aaaa,
401 0x7c007c7c,0x77007777,0x56005656,0x05000505,
402 0x1b001b1b,0xa400a4a4,0x15001515,0x34003434,
403 0x1e001e1e,0x1c001c1c,0xf800f8f8,0x52005252,
404 0x20002020,0x14001414,0xe900e9e9,0xbd00bdbd,
405 0xdd00dddd,0xe400e4e4,0xa100a1a1,0xe000e0e0,
406 0x8a008a8a,0xf100f1f1,0xd600d6d6,0x7a007a7a,
407 0xbb00bbbb,0xe300e3e3,0x40004040,0x4f004f4f,
408};
409
410static const u32 camellia_sp4404[256] = {
411 0x70700070,0x2c2c002c,0xb3b300b3,0xc0c000c0,
412 0xe4e400e4,0x57570057,0xeaea00ea,0xaeae00ae,
413 0x23230023,0x6b6b006b,0x45450045,0xa5a500a5,
414 0xeded00ed,0x4f4f004f,0x1d1d001d,0x92920092,
415 0x86860086,0xafaf00af,0x7c7c007c,0x1f1f001f,
416 0x3e3e003e,0xdcdc00dc,0x5e5e005e,0x0b0b000b,
417 0xa6a600a6,0x39390039,0xd5d500d5,0x5d5d005d,
418 0xd9d900d9,0x5a5a005a,0x51510051,0x6c6c006c,
419 0x8b8b008b,0x9a9a009a,0xfbfb00fb,0xb0b000b0,
420 0x74740074,0x2b2b002b,0xf0f000f0,0x84840084,
421 0xdfdf00df,0xcbcb00cb,0x34340034,0x76760076,
422 0x6d6d006d,0xa9a900a9,0xd1d100d1,0x04040004,
423 0x14140014,0x3a3a003a,0xdede00de,0x11110011,
424 0x32320032,0x9c9c009c,0x53530053,0xf2f200f2,
425 0xfefe00fe,0xcfcf00cf,0xc3c300c3,0x7a7a007a,
426 0x24240024,0xe8e800e8,0x60600060,0x69690069,
427 0xaaaa00aa,0xa0a000a0,0xa1a100a1,0x62620062,
428 0x54540054,0x1e1e001e,0xe0e000e0,0x64640064,
429 0x10100010,0x00000000,0xa3a300a3,0x75750075,
430 0x8a8a008a,0xe6e600e6,0x09090009,0xdddd00dd,
431 0x87870087,0x83830083,0xcdcd00cd,0x90900090,
432 0x73730073,0xf6f600f6,0x9d9d009d,0xbfbf00bf,
433 0x52520052,0xd8d800d8,0xc8c800c8,0xc6c600c6,
434 0x81810081,0x6f6f006f,0x13130013,0x63630063,
435 0xe9e900e9,0xa7a700a7,0x9f9f009f,0xbcbc00bc,
436 0x29290029,0xf9f900f9,0x2f2f002f,0xb4b400b4,
437 0x78780078,0x06060006,0xe7e700e7,0x71710071,
438 0xd4d400d4,0xabab00ab,0x88880088,0x8d8d008d,
439 0x72720072,0xb9b900b9,0xf8f800f8,0xacac00ac,
440 0x36360036,0x2a2a002a,0x3c3c003c,0xf1f100f1,
441 0x40400040,0xd3d300d3,0xbbbb00bb,0x43430043,
442 0x15150015,0xadad00ad,0x77770077,0x80800080,
443 0x82820082,0xecec00ec,0x27270027,0xe5e500e5,
444 0x85850085,0x35350035,0x0c0c000c,0x41410041,
445 0xefef00ef,0x93930093,0x19190019,0x21210021,
446 0x0e0e000e,0x4e4e004e,0x65650065,0xbdbd00bd,
447 0xb8b800b8,0x8f8f008f,0xebeb00eb,0xcece00ce,
448 0x30300030,0x5f5f005f,0xc5c500c5,0x1a1a001a,
449 0xe1e100e1,0xcaca00ca,0x47470047,0x3d3d003d,
450 0x01010001,0xd6d600d6,0x56560056,0x4d4d004d,
451 0x0d0d000d,0x66660066,0xcccc00cc,0x2d2d002d,
452 0x12120012,0x20200020,0xb1b100b1,0x99990099,
453 0x4c4c004c,0xc2c200c2,0x7e7e007e,0x05050005,
454 0xb7b700b7,0x31310031,0x17170017,0xd7d700d7,
455 0x58580058,0x61610061,0x1b1b001b,0x1c1c001c,
456 0x0f0f000f,0x16160016,0x18180018,0x22220022,
457 0x44440044,0xb2b200b2,0xb5b500b5,0x91910091,
458 0x08080008,0xa8a800a8,0xfcfc00fc,0x50500050,
459 0xd0d000d0,0x7d7d007d,0x89890089,0x97970097,
460 0x5b5b005b,0x95950095,0xffff00ff,0xd2d200d2,
461 0xc4c400c4,0x48480048,0xf7f700f7,0xdbdb00db,
462 0x03030003,0xdada00da,0x3f3f003f,0x94940094,
463 0x5c5c005c,0x02020002,0x4a4a004a,0x33330033,
464 0x67670067,0xf3f300f3,0x7f7f007f,0xe2e200e2,
465 0x9b9b009b,0x26260026,0x37370037,0x3b3b003b,
466 0x96960096,0x4b4b004b,0xbebe00be,0x2e2e002e,
467 0x79790079,0x8c8c008c,0x6e6e006e,0x8e8e008e,
468 0xf5f500f5,0xb6b600b6,0xfdfd00fd,0x59590059,
469 0x98980098,0x6a6a006a,0x46460046,0xbaba00ba,
470 0x25250025,0x42420042,0xa2a200a2,0xfafa00fa,
471 0x07070007,0x55550055,0xeeee00ee,0x0a0a000a,
472 0x49490049,0x68680068,0x38380038,0xa4a400a4,
473 0x28280028,0x7b7b007b,0xc9c900c9,0xc1c100c1,
474 0xe3e300e3,0xf4f400f4,0xc7c700c7,0x9e9e009e,
475};
476
477
478
479static void camellia_setup128(const unsigned char *key, u32 *subkey)
480{
481 u32 kll, klr, krl, krr;
482 u32 il, ir, t0, t1, w0, w1;
483 u32 kw4l, kw4r, dw, tl, tr;
484 u32 subL[26];
485 u32 subR[26];
486
487 /**
488 * k == kll || klr || krl || krr (|| is concatination)
489 */
490 kll = GETU32(key );
491 klr = GETU32(key + 4);
492 krl = GETU32(key + 8);
493 krr = GETU32(key + 12);
494 /**
495 * generate KL dependent subkeys
496 */
497 /* kw1 */
498 SUBL(0) = kll; SUBR(0) = klr;
499 /* kw2 */
500 SUBL(1) = krl; SUBR(1) = krr;
501 /* rotation left shift 15bit */
502 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
503 /* k3 */
504 SUBL(4) = kll; SUBR(4) = klr;
505 /* k4 */
506 SUBL(5) = krl; SUBR(5) = krr;
507 /* rotation left shift 15+30bit */
508 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30);
509 /* k7 */
510 SUBL(10) = kll; SUBR(10) = klr;
511 /* k8 */
512 SUBL(11) = krl; SUBR(11) = krr;
513 /* rotation left shift 15+30+15bit */
514 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
515 /* k10 */
516 SUBL(13) = krl; SUBR(13) = krr;
517 /* rotation left shift 15+30+15+17 bit */
518 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
519 /* kl3 */
520 SUBL(16) = kll; SUBR(16) = klr;
521 /* kl4 */
522 SUBL(17) = krl; SUBR(17) = krr;
523 /* rotation left shift 15+30+15+17+17 bit */
524 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
525 /* k13 */
526 SUBL(18) = kll; SUBR(18) = klr;
527 /* k14 */
528 SUBL(19) = krl; SUBR(19) = krr;
529 /* rotation left shift 15+30+15+17+17+17 bit */
530 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
531 /* k17 */
532 SUBL(22) = kll; SUBR(22) = klr;
533 /* k18 */
534 SUBL(23) = krl; SUBR(23) = krr;
535
536 /* generate KA */
537 kll = SUBL(0); klr = SUBR(0);
538 krl = SUBL(1); krr = SUBR(1);
539 CAMELLIA_F(kll, klr,
540 CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
541 w0, w1, il, ir, t0, t1);
542 krl ^= w0; krr ^= w1;
543 CAMELLIA_F(krl, krr,
544 CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R,
545 kll, klr, il, ir, t0, t1);
546 /* current status == (kll, klr, w0, w1) */
547 CAMELLIA_F(kll, klr,
548 CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R,
549 krl, krr, il, ir, t0, t1);
550 krl ^= w0; krr ^= w1;
551 CAMELLIA_F(krl, krr,
552 CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R,
553 w0, w1, il, ir, t0, t1);
554 kll ^= w0; klr ^= w1;
555
556 /* generate KA dependent subkeys */
557 /* k1, k2 */
558 SUBL(2) = kll; SUBR(2) = klr;
559 SUBL(3) = krl; SUBR(3) = krr;
560 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
561 /* k5,k6 */
562 SUBL(6) = kll; SUBR(6) = klr;
563 SUBL(7) = krl; SUBR(7) = krr;
564 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
565 /* kl1, kl2 */
566 SUBL(8) = kll; SUBR(8) = klr;
567 SUBL(9) = krl; SUBR(9) = krr;
568 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
569 /* k9 */
570 SUBL(12) = kll; SUBR(12) = klr;
571 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
572 /* k11, k12 */
573 SUBL(14) = kll; SUBR(14) = klr;
574 SUBL(15) = krl; SUBR(15) = krr;
575 CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
576 /* k15, k16 */
577 SUBL(20) = kll; SUBR(20) = klr;
578 SUBL(21) = krl; SUBR(21) = krr;
579 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
580 /* kw3, kw4 */
581 SUBL(24) = kll; SUBR(24) = klr;
582 SUBL(25) = krl; SUBR(25) = krr;
583
584
585 /* absorb kw2 to other subkeys */
586 /* round 2 */
587 SUBL(3) ^= SUBL(1); SUBR(3) ^= SUBR(1);
588 /* round 4 */
589 SUBL(5) ^= SUBL(1); SUBR(5) ^= SUBR(1);
590 /* round 6 */
591 SUBL(7) ^= SUBL(1); SUBR(7) ^= SUBR(1);
592 SUBL(1) ^= SUBR(1) & ~SUBR(9);
593 dw = SUBL(1) & SUBL(9),
594 SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */
595 /* round 8 */
596 SUBL(11) ^= SUBL(1); SUBR(11) ^= SUBR(1);
597 /* round 10 */
598 SUBL(13) ^= SUBL(1); SUBR(13) ^= SUBR(1);
599 /* round 12 */
600 SUBL(15) ^= SUBL(1); SUBR(15) ^= SUBR(1);
601 SUBL(1) ^= SUBR(1) & ~SUBR(17);
602 dw = SUBL(1) & SUBL(17),
603 SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */
604 /* round 14 */
605 SUBL(19) ^= SUBL(1); SUBR(19) ^= SUBR(1);
606 /* round 16 */
607 SUBL(21) ^= SUBL(1); SUBR(21) ^= SUBR(1);
608 /* round 18 */
609 SUBL(23) ^= SUBL(1); SUBR(23) ^= SUBR(1);
610 /* kw3 */
611 SUBL(24) ^= SUBL(1); SUBR(24) ^= SUBR(1);
612
613 /* absorb kw4 to other subkeys */
614 kw4l = SUBL(25); kw4r = SUBR(25);
615 /* round 17 */
616 SUBL(22) ^= kw4l; SUBR(22) ^= kw4r;
617 /* round 15 */
618 SUBL(20) ^= kw4l; SUBR(20) ^= kw4r;
619 /* round 13 */
620 SUBL(18) ^= kw4l; SUBR(18) ^= kw4r;
621 kw4l ^= kw4r & ~SUBR(16);
622 dw = kw4l & SUBL(16),
623 kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */
624 /* round 11 */
625 SUBL(14) ^= kw4l; SUBR(14) ^= kw4r;
626 /* round 9 */
627 SUBL(12) ^= kw4l; SUBR(12) ^= kw4r;
628 /* round 7 */
629 SUBL(10) ^= kw4l; SUBR(10) ^= kw4r;
630 kw4l ^= kw4r & ~SUBR(8);
631 dw = kw4l & SUBL(8),
632 kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */
633 /* round 5 */
634 SUBL(6) ^= kw4l; SUBR(6) ^= kw4r;
635 /* round 3 */
636 SUBL(4) ^= kw4l; SUBR(4) ^= kw4r;
637 /* round 1 */
638 SUBL(2) ^= kw4l; SUBR(2) ^= kw4r;
639 /* kw1 */
640 SUBL(0) ^= kw4l; SUBR(0) ^= kw4r;
641
642
643 /* key XOR is end of F-function */
644 CAMELLIA_SUBKEY_L(0) = SUBL(0) ^ SUBL(2);/* kw1 */
645 CAMELLIA_SUBKEY_R(0) = SUBR(0) ^ SUBR(2);
646 CAMELLIA_SUBKEY_L(2) = SUBL(3); /* round 1 */
647 CAMELLIA_SUBKEY_R(2) = SUBR(3);
648 CAMELLIA_SUBKEY_L(3) = SUBL(2) ^ SUBL(4); /* round 2 */
649 CAMELLIA_SUBKEY_R(3) = SUBR(2) ^ SUBR(4);
650 CAMELLIA_SUBKEY_L(4) = SUBL(3) ^ SUBL(5); /* round 3 */
651 CAMELLIA_SUBKEY_R(4) = SUBR(3) ^ SUBR(5);
652 CAMELLIA_SUBKEY_L(5) = SUBL(4) ^ SUBL(6); /* round 4 */
653 CAMELLIA_SUBKEY_R(5) = SUBR(4) ^ SUBR(6);
654 CAMELLIA_SUBKEY_L(6) = SUBL(5) ^ SUBL(7); /* round 5 */
655 CAMELLIA_SUBKEY_R(6) = SUBR(5) ^ SUBR(7);
656 tl = SUBL(10) ^ (SUBR(10) & ~SUBR(8));
657 dw = tl & SUBL(8), /* FL(kl1) */
658 tr = SUBR(10) ^ CAMELLIA_RL1(dw);
659 CAMELLIA_SUBKEY_L(7) = SUBL(6) ^ tl; /* round 6 */
660 CAMELLIA_SUBKEY_R(7) = SUBR(6) ^ tr;
661 CAMELLIA_SUBKEY_L(8) = SUBL(8); /* FL(kl1) */
662 CAMELLIA_SUBKEY_R(8) = SUBR(8);
663 CAMELLIA_SUBKEY_L(9) = SUBL(9); /* FLinv(kl2) */
664 CAMELLIA_SUBKEY_R(9) = SUBR(9);
665 tl = SUBL(7) ^ (SUBR(7) & ~SUBR(9));
666 dw = tl & SUBL(9), /* FLinv(kl2) */
667 tr = SUBR(7) ^ CAMELLIA_RL1(dw);
668 CAMELLIA_SUBKEY_L(10) = tl ^ SUBL(11); /* round 7 */
669 CAMELLIA_SUBKEY_R(10) = tr ^ SUBR(11);
670 CAMELLIA_SUBKEY_L(11) = SUBL(10) ^ SUBL(12); /* round 8 */
671 CAMELLIA_SUBKEY_R(11) = SUBR(10) ^ SUBR(12);
672 CAMELLIA_SUBKEY_L(12) = SUBL(11) ^ SUBL(13); /* round 9 */
673 CAMELLIA_SUBKEY_R(12) = SUBR(11) ^ SUBR(13);
674 CAMELLIA_SUBKEY_L(13) = SUBL(12) ^ SUBL(14); /* round 10 */
675 CAMELLIA_SUBKEY_R(13) = SUBR(12) ^ SUBR(14);
676 CAMELLIA_SUBKEY_L(14) = SUBL(13) ^ SUBL(15); /* round 11 */
677 CAMELLIA_SUBKEY_R(14) = SUBR(13) ^ SUBR(15);
678 tl = SUBL(18) ^ (SUBR(18) & ~SUBR(16));
679 dw = tl & SUBL(16), /* FL(kl3) */
680 tr = SUBR(18) ^ CAMELLIA_RL1(dw);
681 CAMELLIA_SUBKEY_L(15) = SUBL(14) ^ tl; /* round 12 */
682 CAMELLIA_SUBKEY_R(15) = SUBR(14) ^ tr;
683 CAMELLIA_SUBKEY_L(16) = SUBL(16); /* FL(kl3) */
684 CAMELLIA_SUBKEY_R(16) = SUBR(16);
685 CAMELLIA_SUBKEY_L(17) = SUBL(17); /* FLinv(kl4) */
686 CAMELLIA_SUBKEY_R(17) = SUBR(17);
687 tl = SUBL(15) ^ (SUBR(15) & ~SUBR(17));
688 dw = tl & SUBL(17), /* FLinv(kl4) */
689 tr = SUBR(15) ^ CAMELLIA_RL1(dw);
690 CAMELLIA_SUBKEY_L(18) = tl ^ SUBL(19); /* round 13 */
691 CAMELLIA_SUBKEY_R(18) = tr ^ SUBR(19);
692 CAMELLIA_SUBKEY_L(19) = SUBL(18) ^ SUBL(20); /* round 14 */
693 CAMELLIA_SUBKEY_R(19) = SUBR(18) ^ SUBR(20);
694 CAMELLIA_SUBKEY_L(20) = SUBL(19) ^ SUBL(21); /* round 15 */
695 CAMELLIA_SUBKEY_R(20) = SUBR(19) ^ SUBR(21);
696 CAMELLIA_SUBKEY_L(21) = SUBL(20) ^ SUBL(22); /* round 16 */
697 CAMELLIA_SUBKEY_R(21) = SUBR(20) ^ SUBR(22);
698 CAMELLIA_SUBKEY_L(22) = SUBL(21) ^ SUBL(23); /* round 17 */
699 CAMELLIA_SUBKEY_R(22) = SUBR(21) ^ SUBR(23);
700 CAMELLIA_SUBKEY_L(23) = SUBL(22); /* round 18 */
701 CAMELLIA_SUBKEY_R(23) = SUBR(22);
702 CAMELLIA_SUBKEY_L(24) = SUBL(24) ^ SUBL(23); /* kw3 */
703 CAMELLIA_SUBKEY_R(24) = SUBR(24) ^ SUBR(23);
704
705 /* apply the inverse of the last half of P-function */
706 dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2),
707 dw = CAMELLIA_RL8(dw);/* round 1 */
708 CAMELLIA_SUBKEY_R(2) = CAMELLIA_SUBKEY_L(2) ^ dw,
709 CAMELLIA_SUBKEY_L(2) = dw;
710 dw = CAMELLIA_SUBKEY_L(3) ^ CAMELLIA_SUBKEY_R(3),
711 dw = CAMELLIA_RL8(dw);/* round 2 */
712 CAMELLIA_SUBKEY_R(3) = CAMELLIA_SUBKEY_L(3) ^ dw,
713 CAMELLIA_SUBKEY_L(3) = dw;
714 dw = CAMELLIA_SUBKEY_L(4) ^ CAMELLIA_SUBKEY_R(4),
715 dw = CAMELLIA_RL8(dw);/* round 3 */
716 CAMELLIA_SUBKEY_R(4) = CAMELLIA_SUBKEY_L(4) ^ dw,
717 CAMELLIA_SUBKEY_L(4) = dw;
718 dw = CAMELLIA_SUBKEY_L(5) ^ CAMELLIA_SUBKEY_R(5),
719 dw = CAMELLIA_RL8(dw);/* round 4 */
720 CAMELLIA_SUBKEY_R(5) = CAMELLIA_SUBKEY_L(5) ^ dw,
721 CAMELLIA_SUBKEY_L(5) = dw;
722 dw = CAMELLIA_SUBKEY_L(6) ^ CAMELLIA_SUBKEY_R(6),
723 dw = CAMELLIA_RL8(dw);/* round 5 */
724 CAMELLIA_SUBKEY_R(6) = CAMELLIA_SUBKEY_L(6) ^ dw,
725 CAMELLIA_SUBKEY_L(6) = dw;
726 dw = CAMELLIA_SUBKEY_L(7) ^ CAMELLIA_SUBKEY_R(7),
727 dw = CAMELLIA_RL8(dw);/* round 6 */
728 CAMELLIA_SUBKEY_R(7) = CAMELLIA_SUBKEY_L(7) ^ dw,
729 CAMELLIA_SUBKEY_L(7) = dw;
730 dw = CAMELLIA_SUBKEY_L(10) ^ CAMELLIA_SUBKEY_R(10),
731 dw = CAMELLIA_RL8(dw);/* round 7 */
732 CAMELLIA_SUBKEY_R(10) = CAMELLIA_SUBKEY_L(10) ^ dw,
733 CAMELLIA_SUBKEY_L(10) = dw;
734 dw = CAMELLIA_SUBKEY_L(11) ^ CAMELLIA_SUBKEY_R(11),
735 dw = CAMELLIA_RL8(dw);/* round 8 */
736 CAMELLIA_SUBKEY_R(11) = CAMELLIA_SUBKEY_L(11) ^ dw,
737 CAMELLIA_SUBKEY_L(11) = dw;
738 dw = CAMELLIA_SUBKEY_L(12) ^ CAMELLIA_SUBKEY_R(12),
739 dw = CAMELLIA_RL8(dw);/* round 9 */
740 CAMELLIA_SUBKEY_R(12) = CAMELLIA_SUBKEY_L(12) ^ dw,
741 CAMELLIA_SUBKEY_L(12) = dw;
742 dw = CAMELLIA_SUBKEY_L(13) ^ CAMELLIA_SUBKEY_R(13),
743 dw = CAMELLIA_RL8(dw);/* round 10 */
744 CAMELLIA_SUBKEY_R(13) = CAMELLIA_SUBKEY_L(13) ^ dw,
745 CAMELLIA_SUBKEY_L(13) = dw;
746 dw = CAMELLIA_SUBKEY_L(14) ^ CAMELLIA_SUBKEY_R(14),
747 dw = CAMELLIA_RL8(dw);/* round 11 */
748 CAMELLIA_SUBKEY_R(14) = CAMELLIA_SUBKEY_L(14) ^ dw,
749 CAMELLIA_SUBKEY_L(14) = dw;
750 dw = CAMELLIA_SUBKEY_L(15) ^ CAMELLIA_SUBKEY_R(15),
751 dw = CAMELLIA_RL8(dw);/* round 12 */
752 CAMELLIA_SUBKEY_R(15) = CAMELLIA_SUBKEY_L(15) ^ dw,
753 CAMELLIA_SUBKEY_L(15) = dw;
754 dw = CAMELLIA_SUBKEY_L(18) ^ CAMELLIA_SUBKEY_R(18),
755 dw = CAMELLIA_RL8(dw);/* round 13 */
756 CAMELLIA_SUBKEY_R(18) = CAMELLIA_SUBKEY_L(18) ^ dw,
757 CAMELLIA_SUBKEY_L(18) = dw;
758 dw = CAMELLIA_SUBKEY_L(19) ^ CAMELLIA_SUBKEY_R(19),
759 dw = CAMELLIA_RL8(dw);/* round 14 */
760 CAMELLIA_SUBKEY_R(19) = CAMELLIA_SUBKEY_L(19) ^ dw,
761 CAMELLIA_SUBKEY_L(19) = dw;
762 dw = CAMELLIA_SUBKEY_L(20) ^ CAMELLIA_SUBKEY_R(20),
763 dw = CAMELLIA_RL8(dw);/* round 15 */
764 CAMELLIA_SUBKEY_R(20) = CAMELLIA_SUBKEY_L(20) ^ dw,
765 CAMELLIA_SUBKEY_L(20) = dw;
766 dw = CAMELLIA_SUBKEY_L(21) ^ CAMELLIA_SUBKEY_R(21),
767 dw = CAMELLIA_RL8(dw);/* round 16 */
768 CAMELLIA_SUBKEY_R(21) = CAMELLIA_SUBKEY_L(21) ^ dw,
769 CAMELLIA_SUBKEY_L(21) = dw;
770 dw = CAMELLIA_SUBKEY_L(22) ^ CAMELLIA_SUBKEY_R(22),
771 dw = CAMELLIA_RL8(dw);/* round 17 */
772 CAMELLIA_SUBKEY_R(22) = CAMELLIA_SUBKEY_L(22) ^ dw,
773 CAMELLIA_SUBKEY_L(22) = dw;
774 dw = CAMELLIA_SUBKEY_L(23) ^ CAMELLIA_SUBKEY_R(23),
775 dw = CAMELLIA_RL8(dw);/* round 18 */
776 CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw,
777 CAMELLIA_SUBKEY_L(23) = dw;
778
779 return;
780}
781
782
783static void camellia_setup256(const unsigned char *key, u32 *subkey)
784{
785 u32 kll,klr,krl,krr; /* left half of key */
786 u32 krll,krlr,krrl,krrr; /* right half of key */
787 u32 il, ir, t0, t1, w0, w1; /* temporary variables */
788 u32 kw4l, kw4r, dw, tl, tr;
789 u32 subL[34];
790 u32 subR[34];
791
792 /**
793 * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
794 * (|| is concatination)
795 */
796
797 kll = GETU32(key );
798 klr = GETU32(key + 4);
799 krl = GETU32(key + 8);
800 krr = GETU32(key + 12);
801 krll = GETU32(key + 16);
802 krlr = GETU32(key + 20);
803 krrl = GETU32(key + 24);
804 krrr = GETU32(key + 28);
805
806 /* generate KL dependent subkeys */
807 /* kw1 */
808 SUBL(0) = kll; SUBR(0) = klr;
809 /* kw2 */
810 SUBL(1) = krl; SUBR(1) = krr;
811 CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 45);
812 /* k9 */
813 SUBL(12) = kll; SUBR(12) = klr;
814 /* k10 */
815 SUBL(13) = krl; SUBR(13) = krr;
816 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
817 /* kl3 */
818 SUBL(16) = kll; SUBR(16) = klr;
819 /* kl4 */
820 SUBL(17) = krl; SUBR(17) = krr;
821 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
822 /* k17 */
823 SUBL(22) = kll; SUBR(22) = klr;
824 /* k18 */
825 SUBL(23) = krl; SUBR(23) = krr;
826 CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
827 /* k23 */
828 SUBL(30) = kll; SUBR(30) = klr;
829 /* k24 */
830 SUBL(31) = krl; SUBR(31) = krr;
831
832 /* generate KR dependent subkeys */
833 CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
834 /* k3 */
835 SUBL(4) = krll; SUBR(4) = krlr;
836 /* k4 */
837 SUBL(5) = krrl; SUBR(5) = krrr;
838 CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
839 /* kl1 */
840 SUBL(8) = krll; SUBR(8) = krlr;
841 /* kl2 */
842 SUBL(9) = krrl; SUBR(9) = krrr;
843 CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
844 /* k13 */
845 SUBL(18) = krll; SUBR(18) = krlr;
846 /* k14 */
847 SUBL(19) = krrl; SUBR(19) = krrr;
848 CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
849 /* k19 */
850 SUBL(26) = krll; SUBR(26) = krlr;
851 /* k20 */
852 SUBL(27) = krrl; SUBR(27) = krrr;
853 CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
854
855 /* generate KA */
856 kll = SUBL(0) ^ krll; klr = SUBR(0) ^ krlr;
857 krl = SUBL(1) ^ krrl; krr = SUBR(1) ^ krrr;
858 CAMELLIA_F(kll, klr,
859 CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
860 w0, w1, il, ir, t0, t1);
861 krl ^= w0; krr ^= w1;
862 CAMELLIA_F(krl, krr,
863 CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R,
864 kll, klr, il, ir, t0, t1);
865 kll ^= krll; klr ^= krlr;
866 CAMELLIA_F(kll, klr,
867 CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R,
868 krl, krr, il, ir, t0, t1);
869 krl ^= w0 ^ krrl; krr ^= w1 ^ krrr;
870 CAMELLIA_F(krl, krr,
871 CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R,
872 w0, w1, il, ir, t0, t1);
873 kll ^= w0; klr ^= w1;
874
875 /* generate KB */
876 krll ^= kll; krlr ^= klr;
877 krrl ^= krl; krrr ^= krr;
878 CAMELLIA_F(krll, krlr,
879 CAMELLIA_SIGMA5L, CAMELLIA_SIGMA5R,
880 w0, w1, il, ir, t0, t1);
881 krrl ^= w0; krrr ^= w1;
882 CAMELLIA_F(krrl, krrr,
883 CAMELLIA_SIGMA6L, CAMELLIA_SIGMA6R,
884 w0, w1, il, ir, t0, t1);
885 krll ^= w0; krlr ^= w1;
886
887 /* generate KA dependent subkeys */
888 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
889 /* k5 */
890 SUBL(6) = kll; SUBR(6) = klr;
891 /* k6 */
892 SUBL(7) = krl; SUBR(7) = krr;
893 CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30);
894 /* k11 */
895 SUBL(14) = kll; SUBR(14) = klr;
896 /* k12 */
897 SUBL(15) = krl; SUBR(15) = krr;
898 /* rotation left shift 32bit */
899 /* kl5 */
900 SUBL(24) = klr; SUBR(24) = krl;
901 /* kl6 */
902 SUBL(25) = krr; SUBR(25) = kll;
903 /* rotation left shift 49 from k11,k12 -> k21,k22 */
904 CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 49);
905 /* k21 */
906 SUBL(28) = kll; SUBR(28) = klr;
907 /* k22 */
908 SUBL(29) = krl; SUBR(29) = krr;
909
910 /* generate KB dependent subkeys */
911 /* k1 */
912 SUBL(2) = krll; SUBR(2) = krlr;
913 /* k2 */
914 SUBL(3) = krrl; SUBR(3) = krrr;
915 CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
916 /* k7 */
917 SUBL(10) = krll; SUBR(10) = krlr;
918 /* k8 */
919 SUBL(11) = krrl; SUBR(11) = krrr;
920 CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
921 /* k15 */
922 SUBL(20) = krll; SUBR(20) = krlr;
923 /* k16 */
924 SUBL(21) = krrl; SUBR(21) = krrr;
925 CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51);
926 /* kw3 */
927 SUBL(32) = krll; SUBR(32) = krlr;
928 /* kw4 */
929 SUBL(33) = krrl; SUBR(33) = krrr;
930
931 /* absorb kw2 to other subkeys */
932 /* round 2 */
933 SUBL(3) ^= SUBL(1); SUBR(3) ^= SUBR(1);
934 /* round 4 */
935 SUBL(5) ^= SUBL(1); SUBR(5) ^= SUBR(1);
936 /* round 6 */
937 SUBL(7) ^= SUBL(1); SUBR(7) ^= SUBR(1);
938 SUBL(1) ^= SUBR(1) & ~SUBR(9);
939 dw = SUBL(1) & SUBL(9),
940 SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */
941 /* round 8 */
942 SUBL(11) ^= SUBL(1); SUBR(11) ^= SUBR(1);
943 /* round 10 */
944 SUBL(13) ^= SUBL(1); SUBR(13) ^= SUBR(1);
945 /* round 12 */
946 SUBL(15) ^= SUBL(1); SUBR(15) ^= SUBR(1);
947 SUBL(1) ^= SUBR(1) & ~SUBR(17);
948 dw = SUBL(1) & SUBL(17),
949 SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */
950 /* round 14 */
951 SUBL(19) ^= SUBL(1); SUBR(19) ^= SUBR(1);
952 /* round 16 */
953 SUBL(21) ^= SUBL(1); SUBR(21) ^= SUBR(1);
954 /* round 18 */
955 SUBL(23) ^= SUBL(1); SUBR(23) ^= SUBR(1);
956 SUBL(1) ^= SUBR(1) & ~SUBR(25);
957 dw = SUBL(1) & SUBL(25),
958 SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl6) */
959 /* round 20 */
960 SUBL(27) ^= SUBL(1); SUBR(27) ^= SUBR(1);
961 /* round 22 */
962 SUBL(29) ^= SUBL(1); SUBR(29) ^= SUBR(1);
963 /* round 24 */
964 SUBL(31) ^= SUBL(1); SUBR(31) ^= SUBR(1);
965 /* kw3 */
966 SUBL(32) ^= SUBL(1); SUBR(32) ^= SUBR(1);
967
968
969 /* absorb kw4 to other subkeys */
970 kw4l = SUBL(33); kw4r = SUBR(33);
971 /* round 23 */
972 SUBL(30) ^= kw4l; SUBR(30) ^= kw4r;
973 /* round 21 */
974 SUBL(28) ^= kw4l; SUBR(28) ^= kw4r;
975 /* round 19 */
976 SUBL(26) ^= kw4l; SUBR(26) ^= kw4r;
977 kw4l ^= kw4r & ~SUBR(24);
978 dw = kw4l & SUBL(24),
979 kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl5) */
980 /* round 17 */
981 SUBL(22) ^= kw4l; SUBR(22) ^= kw4r;
982 /* round 15 */
983 SUBL(20) ^= kw4l; SUBR(20) ^= kw4r;
984 /* round 13 */
985 SUBL(18) ^= kw4l; SUBR(18) ^= kw4r;
986 kw4l ^= kw4r & ~SUBR(16);
987 dw = kw4l & SUBL(16),
988 kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */
989 /* round 11 */
990 SUBL(14) ^= kw4l; SUBR(14) ^= kw4r;
991 /* round 9 */
992 SUBL(12) ^= kw4l; SUBR(12) ^= kw4r;
993 /* round 7 */
994 SUBL(10) ^= kw4l; SUBR(10) ^= kw4r;
995 kw4l ^= kw4r & ~SUBR(8);
996 dw = kw4l & SUBL(8),
997 kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */
998 /* round 5 */
999 SUBL(6) ^= kw4l; SUBR(6) ^= kw4r;
1000 /* round 3 */
1001 SUBL(4) ^= kw4l; SUBR(4) ^= kw4r;
1002 /* round 1 */
1003 SUBL(2) ^= kw4l; SUBR(2) ^= kw4r;
1004 /* kw1 */
1005 SUBL(0) ^= kw4l; SUBR(0) ^= kw4r;
1006
1007 /* key XOR is end of F-function */
1008 CAMELLIA_SUBKEY_L(0) = SUBL(0) ^ SUBL(2);/* kw1 */
1009 CAMELLIA_SUBKEY_R(0) = SUBR(0) ^ SUBR(2);
1010 CAMELLIA_SUBKEY_L(2) = SUBL(3); /* round 1 */
1011 CAMELLIA_SUBKEY_R(2) = SUBR(3);
1012 CAMELLIA_SUBKEY_L(3) = SUBL(2) ^ SUBL(4); /* round 2 */
1013 CAMELLIA_SUBKEY_R(3) = SUBR(2) ^ SUBR(4);
1014 CAMELLIA_SUBKEY_L(4) = SUBL(3) ^ SUBL(5); /* round 3 */
1015 CAMELLIA_SUBKEY_R(4) = SUBR(3) ^ SUBR(5);
1016 CAMELLIA_SUBKEY_L(5) = SUBL(4) ^ SUBL(6); /* round 4 */
1017 CAMELLIA_SUBKEY_R(5) = SUBR(4) ^ SUBR(6);
1018 CAMELLIA_SUBKEY_L(6) = SUBL(5) ^ SUBL(7); /* round 5 */
1019 CAMELLIA_SUBKEY_R(6) = SUBR(5) ^ SUBR(7);
1020 tl = SUBL(10) ^ (SUBR(10) & ~SUBR(8));
1021 dw = tl & SUBL(8), /* FL(kl1) */
1022 tr = SUBR(10) ^ CAMELLIA_RL1(dw);
1023 CAMELLIA_SUBKEY_L(7) = SUBL(6) ^ tl; /* round 6 */
1024 CAMELLIA_SUBKEY_R(7) = SUBR(6) ^ tr;
1025 CAMELLIA_SUBKEY_L(8) = SUBL(8); /* FL(kl1) */
1026 CAMELLIA_SUBKEY_R(8) = SUBR(8);
1027 CAMELLIA_SUBKEY_L(9) = SUBL(9); /* FLinv(kl2) */
1028 CAMELLIA_SUBKEY_R(9) = SUBR(9);
1029 tl = SUBL(7) ^ (SUBR(7) & ~SUBR(9));
1030 dw = tl & SUBL(9), /* FLinv(kl2) */
1031 tr = SUBR(7) ^ CAMELLIA_RL1(dw);
1032 CAMELLIA_SUBKEY_L(10) = tl ^ SUBL(11); /* round 7 */
1033 CAMELLIA_SUBKEY_R(10) = tr ^ SUBR(11);
1034 CAMELLIA_SUBKEY_L(11) = SUBL(10) ^ SUBL(12); /* round 8 */
1035 CAMELLIA_SUBKEY_R(11) = SUBR(10) ^ SUBR(12);
1036 CAMELLIA_SUBKEY_L(12) = SUBL(11) ^ SUBL(13); /* round 9 */
1037 CAMELLIA_SUBKEY_R(12) = SUBR(11) ^ SUBR(13);
1038 CAMELLIA_SUBKEY_L(13) = SUBL(12) ^ SUBL(14); /* round 10 */
1039 CAMELLIA_SUBKEY_R(13) = SUBR(12) ^ SUBR(14);
1040 CAMELLIA_SUBKEY_L(14) = SUBL(13) ^ SUBL(15); /* round 11 */
1041 CAMELLIA_SUBKEY_R(14) = SUBR(13) ^ SUBR(15);
1042 tl = SUBL(18) ^ (SUBR(18) & ~SUBR(16));
1043 dw = tl & SUBL(16), /* FL(kl3) */
1044 tr = SUBR(18) ^ CAMELLIA_RL1(dw);
1045 CAMELLIA_SUBKEY_L(15) = SUBL(14) ^ tl; /* round 12 */
1046 CAMELLIA_SUBKEY_R(15) = SUBR(14) ^ tr;
1047 CAMELLIA_SUBKEY_L(16) = SUBL(16); /* FL(kl3) */
1048 CAMELLIA_SUBKEY_R(16) = SUBR(16);
1049 CAMELLIA_SUBKEY_L(17) = SUBL(17); /* FLinv(kl4) */
1050 CAMELLIA_SUBKEY_R(17) = SUBR(17);
1051 tl = SUBL(15) ^ (SUBR(15) & ~SUBR(17));
1052 dw = tl & SUBL(17), /* FLinv(kl4) */
1053 tr = SUBR(15) ^ CAMELLIA_RL1(dw);
1054 CAMELLIA_SUBKEY_L(18) = tl ^ SUBL(19); /* round 13 */
1055 CAMELLIA_SUBKEY_R(18) = tr ^ SUBR(19);
1056 CAMELLIA_SUBKEY_L(19) = SUBL(18) ^ SUBL(20); /* round 14 */
1057 CAMELLIA_SUBKEY_R(19) = SUBR(18) ^ SUBR(20);
1058 CAMELLIA_SUBKEY_L(20) = SUBL(19) ^ SUBL(21); /* round 15 */
1059 CAMELLIA_SUBKEY_R(20) = SUBR(19) ^ SUBR(21);
1060 CAMELLIA_SUBKEY_L(21) = SUBL(20) ^ SUBL(22); /* round 16 */
1061 CAMELLIA_SUBKEY_R(21) = SUBR(20) ^ SUBR(22);
1062 CAMELLIA_SUBKEY_L(22) = SUBL(21) ^ SUBL(23); /* round 17 */
1063 CAMELLIA_SUBKEY_R(22) = SUBR(21) ^ SUBR(23);
1064 tl = SUBL(26) ^ (SUBR(26)
1065 & ~SUBR(24));
1066 dw = tl & SUBL(24), /* FL(kl5) */
1067 tr = SUBR(26) ^ CAMELLIA_RL1(dw);
1068 CAMELLIA_SUBKEY_L(23) = SUBL(22) ^ tl; /* round 18 */
1069 CAMELLIA_SUBKEY_R(23) = SUBR(22) ^ tr;
1070 CAMELLIA_SUBKEY_L(24) = SUBL(24); /* FL(kl5) */
1071 CAMELLIA_SUBKEY_R(24) = SUBR(24);
1072 CAMELLIA_SUBKEY_L(25) = SUBL(25); /* FLinv(kl6) */
1073 CAMELLIA_SUBKEY_R(25) = SUBR(25);
1074 tl = SUBL(23) ^ (SUBR(23) &
1075 ~SUBR(25));
1076 dw = tl & SUBL(25), /* FLinv(kl6) */
1077 tr = SUBR(23) ^ CAMELLIA_RL1(dw);
1078 CAMELLIA_SUBKEY_L(26) = tl ^ SUBL(27); /* round 19 */
1079 CAMELLIA_SUBKEY_R(26) = tr ^ SUBR(27);
1080 CAMELLIA_SUBKEY_L(27) = SUBL(26) ^ SUBL(28); /* round 20 */
1081 CAMELLIA_SUBKEY_R(27) = SUBR(26) ^ SUBR(28);
1082 CAMELLIA_SUBKEY_L(28) = SUBL(27) ^ SUBL(29); /* round 21 */
1083 CAMELLIA_SUBKEY_R(28) = SUBR(27) ^ SUBR(29);
1084 CAMELLIA_SUBKEY_L(29) = SUBL(28) ^ SUBL(30); /* round 22 */
1085 CAMELLIA_SUBKEY_R(29) = SUBR(28) ^ SUBR(30);
1086 CAMELLIA_SUBKEY_L(30) = SUBL(29) ^ SUBL(31); /* round 23 */
1087 CAMELLIA_SUBKEY_R(30) = SUBR(29) ^ SUBR(31);
1088 CAMELLIA_SUBKEY_L(31) = SUBL(30); /* round 24 */
1089 CAMELLIA_SUBKEY_R(31) = SUBR(30);
1090 CAMELLIA_SUBKEY_L(32) = SUBL(32) ^ SUBL(31); /* kw3 */
1091 CAMELLIA_SUBKEY_R(32) = SUBR(32) ^ SUBR(31);
1092
1093 /* apply the inverse of the last half of P-function */
1094 dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2),
1095 dw = CAMELLIA_RL8(dw);/* round 1 */
1096 CAMELLIA_SUBKEY_R(2) = CAMELLIA_SUBKEY_L(2) ^ dw,
1097 CAMELLIA_SUBKEY_L(2) = dw;
1098 dw = CAMELLIA_SUBKEY_L(3) ^ CAMELLIA_SUBKEY_R(3),
1099 dw = CAMELLIA_RL8(dw);/* round 2 */
1100 CAMELLIA_SUBKEY_R(3) = CAMELLIA_SUBKEY_L(3) ^ dw,
1101 CAMELLIA_SUBKEY_L(3) = dw;
1102 dw = CAMELLIA_SUBKEY_L(4) ^ CAMELLIA_SUBKEY_R(4),
1103 dw = CAMELLIA_RL8(dw);/* round 3 */
1104 CAMELLIA_SUBKEY_R(4) = CAMELLIA_SUBKEY_L(4) ^ dw,
1105 CAMELLIA_SUBKEY_L(4) = dw;
1106 dw = CAMELLIA_SUBKEY_L(5) ^ CAMELLIA_SUBKEY_R(5),
1107 dw = CAMELLIA_RL8(dw);/* round 4 */
1108 CAMELLIA_SUBKEY_R(5) = CAMELLIA_SUBKEY_L(5) ^ dw,
1109 CAMELLIA_SUBKEY_L(5) = dw;
1110 dw = CAMELLIA_SUBKEY_L(6) ^ CAMELLIA_SUBKEY_R(6),
1111 dw = CAMELLIA_RL8(dw);/* round 5 */
1112 CAMELLIA_SUBKEY_R(6) = CAMELLIA_SUBKEY_L(6) ^ dw,
1113 CAMELLIA_SUBKEY_L(6) = dw;
1114 dw = CAMELLIA_SUBKEY_L(7) ^ CAMELLIA_SUBKEY_R(7),
1115 dw = CAMELLIA_RL8(dw);/* round 6 */
1116 CAMELLIA_SUBKEY_R(7) = CAMELLIA_SUBKEY_L(7) ^ dw,
1117 CAMELLIA_SUBKEY_L(7) = dw;
1118 dw = CAMELLIA_SUBKEY_L(10) ^ CAMELLIA_SUBKEY_R(10),
1119 dw = CAMELLIA_RL8(dw);/* round 7 */
1120 CAMELLIA_SUBKEY_R(10) = CAMELLIA_SUBKEY_L(10) ^ dw,
1121 CAMELLIA_SUBKEY_L(10) = dw;
1122 dw = CAMELLIA_SUBKEY_L(11) ^ CAMELLIA_SUBKEY_R(11),
1123 dw = CAMELLIA_RL8(dw);/* round 8 */
1124 CAMELLIA_SUBKEY_R(11) = CAMELLIA_SUBKEY_L(11) ^ dw,
1125 CAMELLIA_SUBKEY_L(11) = dw;
1126 dw = CAMELLIA_SUBKEY_L(12) ^ CAMELLIA_SUBKEY_R(12),
1127 dw = CAMELLIA_RL8(dw);/* round 9 */
1128 CAMELLIA_SUBKEY_R(12) = CAMELLIA_SUBKEY_L(12) ^ dw,
1129 CAMELLIA_SUBKEY_L(12) = dw;
1130 dw = CAMELLIA_SUBKEY_L(13) ^ CAMELLIA_SUBKEY_R(13),
1131 dw = CAMELLIA_RL8(dw);/* round 10 */
1132 CAMELLIA_SUBKEY_R(13) = CAMELLIA_SUBKEY_L(13) ^ dw,
1133 CAMELLIA_SUBKEY_L(13) = dw;
1134 dw = CAMELLIA_SUBKEY_L(14) ^ CAMELLIA_SUBKEY_R(14),
1135 dw = CAMELLIA_RL8(dw);/* round 11 */
1136 CAMELLIA_SUBKEY_R(14) = CAMELLIA_SUBKEY_L(14) ^ dw,
1137 CAMELLIA_SUBKEY_L(14) = dw;
1138 dw = CAMELLIA_SUBKEY_L(15) ^ CAMELLIA_SUBKEY_R(15),
1139 dw = CAMELLIA_RL8(dw);/* round 12 */
1140 CAMELLIA_SUBKEY_R(15) = CAMELLIA_SUBKEY_L(15) ^ dw,
1141 CAMELLIA_SUBKEY_L(15) = dw;
1142 dw = CAMELLIA_SUBKEY_L(18) ^ CAMELLIA_SUBKEY_R(18),
1143 dw = CAMELLIA_RL8(dw);/* round 13 */
1144 CAMELLIA_SUBKEY_R(18) = CAMELLIA_SUBKEY_L(18) ^ dw,
1145 CAMELLIA_SUBKEY_L(18) = dw;
1146 dw = CAMELLIA_SUBKEY_L(19) ^ CAMELLIA_SUBKEY_R(19),
1147 dw = CAMELLIA_RL8(dw);/* round 14 */
1148 CAMELLIA_SUBKEY_R(19) = CAMELLIA_SUBKEY_L(19) ^ dw,
1149 CAMELLIA_SUBKEY_L(19) = dw;
1150 dw = CAMELLIA_SUBKEY_L(20) ^ CAMELLIA_SUBKEY_R(20),
1151 dw = CAMELLIA_RL8(dw);/* round 15 */
1152 CAMELLIA_SUBKEY_R(20) = CAMELLIA_SUBKEY_L(20) ^ dw,
1153 CAMELLIA_SUBKEY_L(20) = dw;
1154 dw = CAMELLIA_SUBKEY_L(21) ^ CAMELLIA_SUBKEY_R(21),
1155 dw = CAMELLIA_RL8(dw);/* round 16 */
1156 CAMELLIA_SUBKEY_R(21) = CAMELLIA_SUBKEY_L(21) ^ dw,
1157 CAMELLIA_SUBKEY_L(21) = dw;
1158 dw = CAMELLIA_SUBKEY_L(22) ^ CAMELLIA_SUBKEY_R(22),
1159 dw = CAMELLIA_RL8(dw);/* round 17 */
1160 CAMELLIA_SUBKEY_R(22) = CAMELLIA_SUBKEY_L(22) ^ dw,
1161 CAMELLIA_SUBKEY_L(22) = dw;
1162 dw = CAMELLIA_SUBKEY_L(23) ^ CAMELLIA_SUBKEY_R(23),
1163 dw = CAMELLIA_RL8(dw);/* round 18 */
1164 CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw,
1165 CAMELLIA_SUBKEY_L(23) = dw;
1166 dw = CAMELLIA_SUBKEY_L(26) ^ CAMELLIA_SUBKEY_R(26),
1167 dw = CAMELLIA_RL8(dw);/* round 19 */
1168 CAMELLIA_SUBKEY_R(26) = CAMELLIA_SUBKEY_L(26) ^ dw,
1169 CAMELLIA_SUBKEY_L(26) = dw;
1170 dw = CAMELLIA_SUBKEY_L(27) ^ CAMELLIA_SUBKEY_R(27),
1171 dw = CAMELLIA_RL8(dw);/* round 20 */
1172 CAMELLIA_SUBKEY_R(27) = CAMELLIA_SUBKEY_L(27) ^ dw,
1173 CAMELLIA_SUBKEY_L(27) = dw;
1174 dw = CAMELLIA_SUBKEY_L(28) ^ CAMELLIA_SUBKEY_R(28),
1175 dw = CAMELLIA_RL8(dw);/* round 21 */
1176 CAMELLIA_SUBKEY_R(28) = CAMELLIA_SUBKEY_L(28) ^ dw,
1177 CAMELLIA_SUBKEY_L(28) = dw;
1178 dw = CAMELLIA_SUBKEY_L(29) ^ CAMELLIA_SUBKEY_R(29),
1179 dw = CAMELLIA_RL8(dw);/* round 22 */
1180 CAMELLIA_SUBKEY_R(29) = CAMELLIA_SUBKEY_L(29) ^ dw,
1181 CAMELLIA_SUBKEY_L(29) = dw;
1182 dw = CAMELLIA_SUBKEY_L(30) ^ CAMELLIA_SUBKEY_R(30),
1183 dw = CAMELLIA_RL8(dw);/* round 23 */
1184 CAMELLIA_SUBKEY_R(30) = CAMELLIA_SUBKEY_L(30) ^ dw,
1185 CAMELLIA_SUBKEY_L(30) = dw;
1186 dw = CAMELLIA_SUBKEY_L(31) ^ CAMELLIA_SUBKEY_R(31),
1187 dw = CAMELLIA_RL8(dw);/* round 24 */
1188 CAMELLIA_SUBKEY_R(31) = CAMELLIA_SUBKEY_L(31) ^ dw,
1189 CAMELLIA_SUBKEY_L(31) = dw;
1190
1191 return;
1192}
1193
1194static void camellia_setup192(const unsigned char *key, u32 *subkey)
1195{
1196 unsigned char kk[32];
1197 u32 krll, krlr, krrl,krrr;
1198
1199 memcpy(kk, key, 24);
1200 memcpy((unsigned char *)&krll, key+16,4);
1201 memcpy((unsigned char *)&krlr, key+20,4);
1202 krrl = ~krll;
1203 krrr = ~krlr;
1204 memcpy(kk+24, (unsigned char *)&krrl, 4);
1205 memcpy(kk+28, (unsigned char *)&krrr, 4);
1206 camellia_setup256(kk, subkey);
1207 return;
1208}
1209
1210
1211/**
1212 * Stuff related to camellia encryption/decryption
1213 */
1214static void camellia_encrypt128(const u32 *subkey, __be32 *io_text)
1215{
1216 u32 il,ir,t0,t1; /* temporary valiables */
1217
1218 u32 io[4];
1219
1220 io[0] = be32_to_cpu(io_text[0]);
1221 io[1] = be32_to_cpu(io_text[1]);
1222 io[2] = be32_to_cpu(io_text[2]);
1223 io[3] = be32_to_cpu(io_text[3]);
1224
1225 /* pre whitening but absorb kw2*/
1226 io[0] ^= CAMELLIA_SUBKEY_L(0);
1227 io[1] ^= CAMELLIA_SUBKEY_R(0);
1228 /* main iteration */
1229
1230 CAMELLIA_ROUNDSM(io[0],io[1],
1231 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
1232 io[2],io[3],il,ir,t0,t1);
1233 CAMELLIA_ROUNDSM(io[2],io[3],
1234 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
1235 io[0],io[1],il,ir,t0,t1);
1236 CAMELLIA_ROUNDSM(io[0],io[1],
1237 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
1238 io[2],io[3],il,ir,t0,t1);
1239 CAMELLIA_ROUNDSM(io[2],io[3],
1240 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
1241 io[0],io[1],il,ir,t0,t1);
1242 CAMELLIA_ROUNDSM(io[0],io[1],
1243 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
1244 io[2],io[3],il,ir,t0,t1);
1245 CAMELLIA_ROUNDSM(io[2],io[3],
1246 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
1247 io[0],io[1],il,ir,t0,t1);
1248
1249 CAMELLIA_FLS(io[0],io[1],io[2],io[3],
1250 CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
1251 CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
1252 t0,t1,il,ir);
1253
1254 CAMELLIA_ROUNDSM(io[0],io[1],
1255 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
1256 io[2],io[3],il,ir,t0,t1);
1257 CAMELLIA_ROUNDSM(io[2],io[3],
1258 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
1259 io[0],io[1],il,ir,t0,t1);
1260 CAMELLIA_ROUNDSM(io[0],io[1],
1261 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
1262 io[2],io[3],il,ir,t0,t1);
1263 CAMELLIA_ROUNDSM(io[2],io[3],
1264 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
1265 io[0],io[1],il,ir,t0,t1);
1266 CAMELLIA_ROUNDSM(io[0],io[1],
1267 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
1268 io[2],io[3],il,ir,t0,t1);
1269 CAMELLIA_ROUNDSM(io[2],io[3],
1270 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
1271 io[0],io[1],il,ir,t0,t1);
1272
1273 CAMELLIA_FLS(io[0],io[1],io[2],io[3],
1274 CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
1275 CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
1276 t0,t1,il,ir);
1277
1278 CAMELLIA_ROUNDSM(io[0],io[1],
1279 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
1280 io[2],io[3],il,ir,t0,t1);
1281 CAMELLIA_ROUNDSM(io[2],io[3],
1282 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
1283 io[0],io[1],il,ir,t0,t1);
1284 CAMELLIA_ROUNDSM(io[0],io[1],
1285 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
1286 io[2],io[3],il,ir,t0,t1);
1287 CAMELLIA_ROUNDSM(io[2],io[3],
1288 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
1289 io[0],io[1],il,ir,t0,t1);
1290 CAMELLIA_ROUNDSM(io[0],io[1],
1291 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
1292 io[2],io[3],il,ir,t0,t1);
1293 CAMELLIA_ROUNDSM(io[2],io[3],
1294 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
1295 io[0],io[1],il,ir,t0,t1);
1296
1297 /* post whitening but kw4 */
1298 io[2] ^= CAMELLIA_SUBKEY_L(24);
1299 io[3] ^= CAMELLIA_SUBKEY_R(24);
1300
1301 t0 = io[0];
1302 t1 = io[1];
1303 io[0] = io[2];
1304 io[1] = io[3];
1305 io[2] = t0;
1306 io[3] = t1;
1307
1308 io_text[0] = cpu_to_be32(io[0]);
1309 io_text[1] = cpu_to_be32(io[1]);
1310 io_text[2] = cpu_to_be32(io[2]);
1311 io_text[3] = cpu_to_be32(io[3]);
1312
1313 return;
1314}
1315
1316static void camellia_decrypt128(const u32 *subkey, __be32 *io_text)
1317{
1318 u32 il,ir,t0,t1; /* temporary valiables */
1319
1320 u32 io[4];
1321
1322 io[0] = be32_to_cpu(io_text[0]);
1323 io[1] = be32_to_cpu(io_text[1]);
1324 io[2] = be32_to_cpu(io_text[2]);
1325 io[3] = be32_to_cpu(io_text[3]);
1326
1327 /* pre whitening but absorb kw2*/
1328 io[0] ^= CAMELLIA_SUBKEY_L(24);
1329 io[1] ^= CAMELLIA_SUBKEY_R(24);
1330
1331 /* main iteration */
1332 CAMELLIA_ROUNDSM(io[0],io[1],
1333 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
1334 io[2],io[3],il,ir,t0,t1);
1335 CAMELLIA_ROUNDSM(io[2],io[3],
1336 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
1337 io[0],io[1],il,ir,t0,t1);
1338 CAMELLIA_ROUNDSM(io[0],io[1],
1339 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
1340 io[2],io[3],il,ir,t0,t1);
1341 CAMELLIA_ROUNDSM(io[2],io[3],
1342 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
1343 io[0],io[1],il,ir,t0,t1);
1344 CAMELLIA_ROUNDSM(io[0],io[1],
1345 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
1346 io[2],io[3],il,ir,t0,t1);
1347 CAMELLIA_ROUNDSM(io[2],io[3],
1348 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
1349 io[0],io[1],il,ir,t0,t1);
1350
1351 CAMELLIA_FLS(io[0],io[1],io[2],io[3],
1352 CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
1353 CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
1354 t0,t1,il,ir);
1355
1356 CAMELLIA_ROUNDSM(io[0],io[1],
1357 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
1358 io[2],io[3],il,ir,t0,t1);
1359 CAMELLIA_ROUNDSM(io[2],io[3],
1360 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
1361 io[0],io[1],il,ir,t0,t1);
1362 CAMELLIA_ROUNDSM(io[0],io[1],
1363 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
1364 io[2],io[3],il,ir,t0,t1);
1365 CAMELLIA_ROUNDSM(io[2],io[3],
1366 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
1367 io[0],io[1],il,ir,t0,t1);
1368 CAMELLIA_ROUNDSM(io[0],io[1],
1369 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
1370 io[2],io[3],il,ir,t0,t1);
1371 CAMELLIA_ROUNDSM(io[2],io[3],
1372 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
1373 io[0],io[1],il,ir,t0,t1);
1374
1375 CAMELLIA_FLS(io[0],io[1],io[2],io[3],
1376 CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
1377 CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
1378 t0,t1,il,ir);
1379
1380 CAMELLIA_ROUNDSM(io[0],io[1],
1381 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
1382 io[2],io[3],il,ir,t0,t1);
1383 CAMELLIA_ROUNDSM(io[2],io[3],
1384 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
1385 io[0],io[1],il,ir,t0,t1);
1386 CAMELLIA_ROUNDSM(io[0],io[1],
1387 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
1388 io[2],io[3],il,ir,t0,t1);
1389 CAMELLIA_ROUNDSM(io[2],io[3],
1390 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
1391 io[0],io[1],il,ir,t0,t1);
1392 CAMELLIA_ROUNDSM(io[0],io[1],
1393 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
1394 io[2],io[3],il,ir,t0,t1);
1395 CAMELLIA_ROUNDSM(io[2],io[3],
1396 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
1397 io[0],io[1],il,ir,t0,t1);
1398
1399 /* post whitening but kw4 */
1400 io[2] ^= CAMELLIA_SUBKEY_L(0);
1401 io[3] ^= CAMELLIA_SUBKEY_R(0);
1402
1403 t0 = io[0];
1404 t1 = io[1];
1405 io[0] = io[2];
1406 io[1] = io[3];
1407 io[2] = t0;
1408 io[3] = t1;
1409
1410 io_text[0] = cpu_to_be32(io[0]);
1411 io_text[1] = cpu_to_be32(io[1]);
1412 io_text[2] = cpu_to_be32(io[2]);
1413 io_text[3] = cpu_to_be32(io[3]);
1414
1415 return;
1416}
1417
1418
1419/**
1420 * stuff for 192 and 256bit encryption/decryption
1421 */
1422static void camellia_encrypt256(const u32 *subkey, __be32 *io_text)
1423{
1424 u32 il,ir,t0,t1; /* temporary valiables */
1425
1426 u32 io[4];
1427
1428 io[0] = be32_to_cpu(io_text[0]);
1429 io[1] = be32_to_cpu(io_text[1]);
1430 io[2] = be32_to_cpu(io_text[2]);
1431 io[3] = be32_to_cpu(io_text[3]);
1432
1433 /* pre whitening but absorb kw2*/
1434 io[0] ^= CAMELLIA_SUBKEY_L(0);
1435 io[1] ^= CAMELLIA_SUBKEY_R(0);
1436
1437 /* main iteration */
1438 CAMELLIA_ROUNDSM(io[0],io[1],
1439 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
1440 io[2],io[3],il,ir,t0,t1);
1441 CAMELLIA_ROUNDSM(io[2],io[3],
1442 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
1443 io[0],io[1],il,ir,t0,t1);
1444 CAMELLIA_ROUNDSM(io[0],io[1],
1445 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
1446 io[2],io[3],il,ir,t0,t1);
1447 CAMELLIA_ROUNDSM(io[2],io[3],
1448 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
1449 io[0],io[1],il,ir,t0,t1);
1450 CAMELLIA_ROUNDSM(io[0],io[1],
1451 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
1452 io[2],io[3],il,ir,t0,t1);
1453 CAMELLIA_ROUNDSM(io[2],io[3],
1454 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
1455 io[0],io[1],il,ir,t0,t1);
1456
1457 CAMELLIA_FLS(io[0],io[1],io[2],io[3],
1458 CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
1459 CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
1460 t0,t1,il,ir);
1461
1462 CAMELLIA_ROUNDSM(io[0],io[1],
1463 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
1464 io[2],io[3],il,ir,t0,t1);
1465 CAMELLIA_ROUNDSM(io[2],io[3],
1466 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
1467 io[0],io[1],il,ir,t0,t1);
1468 CAMELLIA_ROUNDSM(io[0],io[1],
1469 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
1470 io[2],io[3],il,ir,t0,t1);
1471 CAMELLIA_ROUNDSM(io[2],io[3],
1472 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
1473 io[0],io[1],il,ir,t0,t1);
1474 CAMELLIA_ROUNDSM(io[0],io[1],
1475 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
1476 io[2],io[3],il,ir,t0,t1);
1477 CAMELLIA_ROUNDSM(io[2],io[3],
1478 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
1479 io[0],io[1],il,ir,t0,t1);
1480
1481 CAMELLIA_FLS(io[0],io[1],io[2],io[3],
1482 CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
1483 CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
1484 t0,t1,il,ir);
1485
1486 CAMELLIA_ROUNDSM(io[0],io[1],
1487 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
1488 io[2],io[3],il,ir,t0,t1);
1489 CAMELLIA_ROUNDSM(io[2],io[3],
1490 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
1491 io[0],io[1],il,ir,t0,t1);
1492 CAMELLIA_ROUNDSM(io[0],io[1],
1493 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
1494 io[2],io[3],il,ir,t0,t1);
1495 CAMELLIA_ROUNDSM(io[2],io[3],
1496 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
1497 io[0],io[1],il,ir,t0,t1);
1498 CAMELLIA_ROUNDSM(io[0],io[1],
1499 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
1500 io[2],io[3],il,ir,t0,t1);
1501 CAMELLIA_ROUNDSM(io[2],io[3],
1502 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
1503 io[0],io[1],il,ir,t0,t1);
1504
1505 CAMELLIA_FLS(io[0],io[1],io[2],io[3],
1506 CAMELLIA_SUBKEY_L(24),CAMELLIA_SUBKEY_R(24),
1507 CAMELLIA_SUBKEY_L(25),CAMELLIA_SUBKEY_R(25),
1508 t0,t1,il,ir);
1509
1510 CAMELLIA_ROUNDSM(io[0],io[1],
1511 CAMELLIA_SUBKEY_L(26),CAMELLIA_SUBKEY_R(26),
1512 io[2],io[3],il,ir,t0,t1);
1513 CAMELLIA_ROUNDSM(io[2],io[3],
1514 CAMELLIA_SUBKEY_L(27),CAMELLIA_SUBKEY_R(27),
1515 io[0],io[1],il,ir,t0,t1);
1516 CAMELLIA_ROUNDSM(io[0],io[1],
1517 CAMELLIA_SUBKEY_L(28),CAMELLIA_SUBKEY_R(28),
1518 io[2],io[3],il,ir,t0,t1);
1519 CAMELLIA_ROUNDSM(io[2],io[3],
1520 CAMELLIA_SUBKEY_L(29),CAMELLIA_SUBKEY_R(29),
1521 io[0],io[1],il,ir,t0,t1);
1522 CAMELLIA_ROUNDSM(io[0],io[1],
1523 CAMELLIA_SUBKEY_L(30),CAMELLIA_SUBKEY_R(30),
1524 io[2],io[3],il,ir,t0,t1);
1525 CAMELLIA_ROUNDSM(io[2],io[3],
1526 CAMELLIA_SUBKEY_L(31),CAMELLIA_SUBKEY_R(31),
1527 io[0],io[1],il,ir,t0,t1);
1528
1529 /* post whitening but kw4 */
1530 io[2] ^= CAMELLIA_SUBKEY_L(32);
1531 io[3] ^= CAMELLIA_SUBKEY_R(32);
1532
1533 t0 = io[0];
1534 t1 = io[1];
1535 io[0] = io[2];
1536 io[1] = io[3];
1537 io[2] = t0;
1538 io[3] = t1;
1539
1540 io_text[0] = cpu_to_be32(io[0]);
1541 io_text[1] = cpu_to_be32(io[1]);
1542 io_text[2] = cpu_to_be32(io[2]);
1543 io_text[3] = cpu_to_be32(io[3]);
1544
1545 return;
1546}
1547
1548
1549static void camellia_decrypt256(const u32 *subkey, __be32 *io_text)
1550{
1551 u32 il,ir,t0,t1; /* temporary valiables */
1552
1553 u32 io[4];
1554
1555 io[0] = be32_to_cpu(io_text[0]);
1556 io[1] = be32_to_cpu(io_text[1]);
1557 io[2] = be32_to_cpu(io_text[2]);
1558 io[3] = be32_to_cpu(io_text[3]);
1559
1560 /* pre whitening but absorb kw2*/
1561 io[0] ^= CAMELLIA_SUBKEY_L(32);
1562 io[1] ^= CAMELLIA_SUBKEY_R(32);
1563
1564 /* main iteration */
1565 CAMELLIA_ROUNDSM(io[0],io[1],
1566 CAMELLIA_SUBKEY_L(31),CAMELLIA_SUBKEY_R(31),
1567 io[2],io[3],il,ir,t0,t1);
1568 CAMELLIA_ROUNDSM(io[2],io[3],
1569 CAMELLIA_SUBKEY_L(30),CAMELLIA_SUBKEY_R(30),
1570 io[0],io[1],il,ir,t0,t1);
1571 CAMELLIA_ROUNDSM(io[0],io[1],
1572 CAMELLIA_SUBKEY_L(29),CAMELLIA_SUBKEY_R(29),
1573 io[2],io[3],il,ir,t0,t1);
1574 CAMELLIA_ROUNDSM(io[2],io[3],
1575 CAMELLIA_SUBKEY_L(28),CAMELLIA_SUBKEY_R(28),
1576 io[0],io[1],il,ir,t0,t1);
1577 CAMELLIA_ROUNDSM(io[0],io[1],
1578 CAMELLIA_SUBKEY_L(27),CAMELLIA_SUBKEY_R(27),
1579 io[2],io[3],il,ir,t0,t1);
1580 CAMELLIA_ROUNDSM(io[2],io[3],
1581 CAMELLIA_SUBKEY_L(26),CAMELLIA_SUBKEY_R(26),
1582 io[0],io[1],il,ir,t0,t1);
1583
1584 CAMELLIA_FLS(io[0],io[1],io[2],io[3],
1585 CAMELLIA_SUBKEY_L(25),CAMELLIA_SUBKEY_R(25),
1586 CAMELLIA_SUBKEY_L(24),CAMELLIA_SUBKEY_R(24),
1587 t0,t1,il,ir);
1588
1589 CAMELLIA_ROUNDSM(io[0],io[1],
1590 CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
1591 io[2],io[3],il,ir,t0,t1);
1592 CAMELLIA_ROUNDSM(io[2],io[3],
1593 CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
1594 io[0],io[1],il,ir,t0,t1);
1595 CAMELLIA_ROUNDSM(io[0],io[1],
1596 CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
1597 io[2],io[3],il,ir,t0,t1);
1598 CAMELLIA_ROUNDSM(io[2],io[3],
1599 CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
1600 io[0],io[1],il,ir,t0,t1);
1601 CAMELLIA_ROUNDSM(io[0],io[1],
1602 CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
1603 io[2],io[3],il,ir,t0,t1);
1604 CAMELLIA_ROUNDSM(io[2],io[3],
1605 CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
1606 io[0],io[1],il,ir,t0,t1);
1607
1608 CAMELLIA_FLS(io[0],io[1],io[2],io[3],
1609 CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
1610 CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
1611 t0,t1,il,ir);
1612
1613 CAMELLIA_ROUNDSM(io[0],io[1],
1614 CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
1615 io[2],io[3],il,ir,t0,t1);
1616 CAMELLIA_ROUNDSM(io[2],io[3],
1617 CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
1618 io[0],io[1],il,ir,t0,t1);
1619 CAMELLIA_ROUNDSM(io[0],io[1],
1620 CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
1621 io[2],io[3],il,ir,t0,t1);
1622 CAMELLIA_ROUNDSM(io[2],io[3],
1623 CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
1624 io[0],io[1],il,ir,t0,t1);
1625 CAMELLIA_ROUNDSM(io[0],io[1],
1626 CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
1627 io[2],io[3],il,ir,t0,t1);
1628 CAMELLIA_ROUNDSM(io[2],io[3],
1629 CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
1630 io[0],io[1],il,ir,t0,t1);
1631
1632 CAMELLIA_FLS(io[0],io[1],io[2],io[3],
1633 CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
1634 CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
1635 t0,t1,il,ir);
1636
1637 CAMELLIA_ROUNDSM(io[0],io[1],
1638 CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
1639 io[2],io[3],il,ir,t0,t1);
1640 CAMELLIA_ROUNDSM(io[2],io[3],
1641 CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
1642 io[0],io[1],il,ir,t0,t1);
1643 CAMELLIA_ROUNDSM(io[0],io[1],
1644 CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
1645 io[2],io[3],il,ir,t0,t1);
1646 CAMELLIA_ROUNDSM(io[2],io[3],
1647 CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
1648 io[0],io[1],il,ir,t0,t1);
1649 CAMELLIA_ROUNDSM(io[0],io[1],
1650 CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
1651 io[2],io[3],il,ir,t0,t1);
1652 CAMELLIA_ROUNDSM(io[2],io[3],
1653 CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
1654 io[0],io[1],il,ir,t0,t1);
1655
1656 /* post whitening but kw4 */
1657 io[2] ^= CAMELLIA_SUBKEY_L(0);
1658 io[3] ^= CAMELLIA_SUBKEY_R(0);
1659
1660 t0 = io[0];
1661 t1 = io[1];
1662 io[0] = io[2];
1663 io[1] = io[3];
1664 io[2] = t0;
1665 io[3] = t1;
1666
1667 io_text[0] = cpu_to_be32(io[0]);
1668 io_text[1] = cpu_to_be32(io[1]);
1669 io_text[2] = cpu_to_be32(io[2]);
1670 io_text[3] = cpu_to_be32(io[3]);
1671
1672 return;
1673}
1674
1675
1676static int
1677camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
1678 unsigned int key_len)
1679{
1680 struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
1681 const unsigned char *key = (const unsigned char *)in_key;
1682 u32 *flags = &tfm->crt_flags;
1683
1684 if (key_len != 16 && key_len != 24 && key_len != 32) {
1685 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
1686 return -EINVAL;
1687 }
1688
1689 cctx->key_length = key_len;
1690
1691 switch(key_len) {
1692 case 16:
1693 camellia_setup128(key, cctx->key_table);
1694 break;
1695 case 24:
1696 camellia_setup192(key, cctx->key_table);
1697 break;
1698 case 32:
1699 camellia_setup256(key, cctx->key_table);
1700 break;
1701 default:
1702 break;
1703 }
1704
1705 return 0;
1706}
1707
1708
1709static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
1710{
1711 const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
1712 const __be32 *src = (const __be32 *)in;
1713 __be32 *dst = (__be32 *)out;
1714
1715 __be32 tmp[4];
1716
1717 memcpy(tmp, src, CAMELLIA_BLOCK_SIZE);
1718
1719 switch (cctx->key_length) {
1720 case 16:
1721 camellia_encrypt128(cctx->key_table, tmp);
1722 break;
1723 case 24:
1724 /* fall through */
1725 case 32:
1726 camellia_encrypt256(cctx->key_table, tmp);
1727 break;
1728 default:
1729 break;
1730 }
1731
1732 memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE);
1733}
1734
1735
1736static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
1737{
1738 const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
1739 const __be32 *src = (const __be32 *)in;
1740 __be32 *dst = (__be32 *)out;
1741
1742 __be32 tmp[4];
1743
1744 memcpy(tmp, src, CAMELLIA_BLOCK_SIZE);
1745
1746 switch (cctx->key_length) {
1747 case 16:
1748 camellia_decrypt128(cctx->key_table, tmp);
1749 break;
1750 case 24:
1751 /* fall through */
1752 case 32:
1753 camellia_decrypt256(cctx->key_table, tmp);
1754 break;
1755 default:
1756 break;
1757 }
1758
1759 memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE);
1760}
1761
1762
1763static struct crypto_alg camellia_alg = {
1764 .cra_name = "camellia",
1765 .cra_driver_name = "camellia-generic",
1766 .cra_priority = 100,
1767 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1768 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
1769 .cra_ctxsize = sizeof(struct camellia_ctx),
1770 .cra_alignmask = 3,
1771 .cra_module = THIS_MODULE,
1772 .cra_list = LIST_HEAD_INIT(camellia_alg.cra_list),
1773 .cra_u = {
1774 .cipher = {
1775 .cia_min_keysize = CAMELLIA_MIN_KEY_SIZE,
1776 .cia_max_keysize = CAMELLIA_MAX_KEY_SIZE,
1777 .cia_setkey = camellia_set_key,
1778 .cia_encrypt = camellia_encrypt,
1779 .cia_decrypt = camellia_decrypt
1780 }
1781 }
1782};
1783
1784static int __init camellia_init(void)
1785{
1786 return crypto_register_alg(&camellia_alg);
1787}
1788
1789
1790static void __exit camellia_fini(void)
1791{
1792 crypto_unregister_alg(&camellia_alg);
1793}
1794
1795
1796module_init(camellia_init);
1797module_exit(camellia_fini);
1798
1799
1800MODULE_DESCRIPTION("Camellia Cipher Algorithm");
1801MODULE_LICENSE("GPL");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index f5542b4db387..136fea7e7000 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -243,6 +243,7 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
243 struct crypto_instance *inst = (void *)tfm->__crt_alg; 243 struct crypto_instance *inst = (void *)tfm->__crt_alg;
244 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 244 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
245 struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 245 struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
246 struct crypto_cipher *cipher;
246 247
247 switch (crypto_tfm_alg_blocksize(tfm)) { 248 switch (crypto_tfm_alg_blocksize(tfm)) {
248 case 8: 249 case 8:
@@ -260,11 +261,11 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
260 ctx->xor = xor_quad; 261 ctx->xor = xor_quad;
261 } 262 }
262 263
263 tfm = crypto_spawn_tfm(spawn); 264 cipher = crypto_spawn_cipher(spawn);
264 if (IS_ERR(tfm)) 265 if (IS_ERR(cipher))
265 return PTR_ERR(tfm); 266 return PTR_ERR(cipher);
266 267
267 ctx->child = crypto_cipher_cast(tfm); 268 ctx->child = cipher;
268 return 0; 269 return 0;
269} 270}
270 271
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 9e03701cfdcc..333aab2f0277 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -12,274 +12,13 @@
12 * any later version. 12 * any later version.
13 * 13 *
14 */ 14 */
15#include <linux/compiler.h> 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/crypto.h> 17#include <linux/crypto.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/mm.h> 19#include <linux/scatterlist.h>
20#include <linux/slab.h>
21#include <linux/string.h> 20#include <linux/string.h>
22#include <asm/scatterlist.h>
23#include "internal.h" 21#include "internal.h"
24#include "scatterwalk.h"
25
26struct cipher_alg_compat {
27 unsigned int cia_min_keysize;
28 unsigned int cia_max_keysize;
29 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
30 unsigned int keylen);
31 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
32 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
33
34 unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
35 u8 *dst, const u8 *src,
36 unsigned int nbytes);
37 unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
38 u8 *dst, const u8 *src,
39 unsigned int nbytes);
40 unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
41 u8 *dst, const u8 *src,
42 unsigned int nbytes);
43 unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
44 u8 *dst, const u8 *src,
45 unsigned int nbytes);
46};
47
48static inline void xor_64(u8 *a, const u8 *b)
49{
50 ((u32 *)a)[0] ^= ((u32 *)b)[0];
51 ((u32 *)a)[1] ^= ((u32 *)b)[1];
52}
53
54static inline void xor_128(u8 *a, const u8 *b)
55{
56 ((u32 *)a)[0] ^= ((u32 *)b)[0];
57 ((u32 *)a)[1] ^= ((u32 *)b)[1];
58 ((u32 *)a)[2] ^= ((u32 *)b)[2];
59 ((u32 *)a)[3] ^= ((u32 *)b)[3];
60}
61
62static unsigned int crypt_slow(const struct cipher_desc *desc,
63 struct scatter_walk *in,
64 struct scatter_walk *out, unsigned int bsize)
65{
66 unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
67 u8 buffer[bsize * 2 + alignmask];
68 u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
69 u8 *dst = src + bsize;
70
71 scatterwalk_copychunks(src, in, bsize, 0);
72 desc->prfn(desc, dst, src, bsize);
73 scatterwalk_copychunks(dst, out, bsize, 1);
74
75 return bsize;
76}
77
78static inline unsigned int crypt_fast(const struct cipher_desc *desc,
79 struct scatter_walk *in,
80 struct scatter_walk *out,
81 unsigned int nbytes, u8 *tmp)
82{
83 u8 *src, *dst;
84 u8 *real_src, *real_dst;
85
86 real_src = scatterwalk_map(in, 0);
87 real_dst = scatterwalk_map(out, 1);
88
89 src = real_src;
90 dst = scatterwalk_samebuf(in, out) ? src : real_dst;
91
92 if (tmp) {
93 memcpy(tmp, src, nbytes);
94 src = tmp;
95 dst = tmp;
96 }
97
98 nbytes = desc->prfn(desc, dst, src, nbytes);
99
100 if (tmp)
101 memcpy(real_dst, tmp, nbytes);
102
103 scatterwalk_unmap(real_src, 0);
104 scatterwalk_unmap(real_dst, 1);
105
106 scatterwalk_advance(in, nbytes);
107 scatterwalk_advance(out, nbytes);
108
109 return nbytes;
110}
111
112/*
113 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
114 * multiple page boundaries by using temporary blocks. In user context,
115 * the kernel is given a chance to schedule us once per page.
116 */
117static int crypt(const struct cipher_desc *desc,
118 struct scatterlist *dst,
119 struct scatterlist *src,
120 unsigned int nbytes)
121{
122 struct scatter_walk walk_in, walk_out;
123 struct crypto_tfm *tfm = desc->tfm;
124 const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
125 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
126 unsigned long buffer = 0;
127
128 if (!nbytes)
129 return 0;
130
131 if (nbytes % bsize) {
132 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
133 return -EINVAL;
134 }
135
136 scatterwalk_start(&walk_in, src);
137 scatterwalk_start(&walk_out, dst);
138
139 for(;;) {
140 unsigned int n = nbytes;
141 u8 *tmp = NULL;
142
143 if (!scatterwalk_aligned(&walk_in, alignmask) ||
144 !scatterwalk_aligned(&walk_out, alignmask)) {
145 if (!buffer) {
146 buffer = __get_free_page(GFP_ATOMIC);
147 if (!buffer)
148 n = 0;
149 }
150 tmp = (u8 *)buffer;
151 }
152
153 n = scatterwalk_clamp(&walk_in, n);
154 n = scatterwalk_clamp(&walk_out, n);
155
156 if (likely(n >= bsize))
157 n = crypt_fast(desc, &walk_in, &walk_out, n, tmp);
158 else
159 n = crypt_slow(desc, &walk_in, &walk_out, bsize);
160
161 nbytes -= n;
162
163 scatterwalk_done(&walk_in, 0, nbytes);
164 scatterwalk_done(&walk_out, 1, nbytes);
165
166 if (!nbytes)
167 break;
168
169 crypto_yield(tfm->crt_flags);
170 }
171
172 if (buffer)
173 free_page(buffer);
174
175 return 0;
176}
177
178static int crypt_iv_unaligned(struct cipher_desc *desc,
179 struct scatterlist *dst,
180 struct scatterlist *src,
181 unsigned int nbytes)
182{
183 struct crypto_tfm *tfm = desc->tfm;
184 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
185 u8 *iv = desc->info;
186
187 if (unlikely(((unsigned long)iv & alignmask))) {
188 unsigned int ivsize = tfm->crt_cipher.cit_ivsize;
189 u8 buffer[ivsize + alignmask];
190 u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
191 int err;
192
193 desc->info = memcpy(tmp, iv, ivsize);
194 err = crypt(desc, dst, src, nbytes);
195 memcpy(iv, tmp, ivsize);
196
197 return err;
198 }
199
200 return crypt(desc, dst, src, nbytes);
201}
202
203static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
204 u8 *dst, const u8 *src,
205 unsigned int nbytes)
206{
207 struct crypto_tfm *tfm = desc->tfm;
208 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
209 int bsize = crypto_tfm_alg_blocksize(tfm);
210
211 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
212 u8 *iv = desc->info;
213 unsigned int done = 0;
214
215 nbytes -= bsize;
216
217 do {
218 xor(iv, src);
219 fn(tfm, dst, iv);
220 memcpy(iv, dst, bsize);
221
222 src += bsize;
223 dst += bsize;
224 } while ((done += bsize) <= nbytes);
225
226 return done;
227}
228
229static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
230 u8 *dst, const u8 *src,
231 unsigned int nbytes)
232{
233 struct crypto_tfm *tfm = desc->tfm;
234 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
235 int bsize = crypto_tfm_alg_blocksize(tfm);
236 unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
237
238 u8 stack[src == dst ? bsize + alignmask : 0];
239 u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
240 u8 **dst_p = src == dst ? &buf : &dst;
241
242 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
243 u8 *iv = desc->info;
244 unsigned int done = 0;
245
246 nbytes -= bsize;
247
248 do {
249 u8 *tmp_dst = *dst_p;
250
251 fn(tfm, tmp_dst, src);
252 xor(tmp_dst, iv);
253 memcpy(iv, src, bsize);
254 if (tmp_dst != dst)
255 memcpy(dst, tmp_dst, bsize);
256
257 src += bsize;
258 dst += bsize;
259 } while ((done += bsize) <= nbytes);
260
261 return done;
262}
263
264static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
265 const u8 *src, unsigned int nbytes)
266{
267 struct crypto_tfm *tfm = desc->tfm;
268 int bsize = crypto_tfm_alg_blocksize(tfm);
269 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
270 unsigned int done = 0;
271
272 nbytes -= bsize;
273
274 do {
275 fn(tfm, dst, src);
276
277 src += bsize;
278 dst += bsize;
279 } while ((done += bsize) <= nbytes);
280
281 return done;
282}
283 22
284static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) 23static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
285{ 24{
@@ -293,122 +32,6 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
293 return cia->cia_setkey(tfm, key, keylen); 32 return cia->cia_setkey(tfm, key, keylen);
294} 33}
295 34
296static int ecb_encrypt(struct crypto_tfm *tfm,
297 struct scatterlist *dst,
298 struct scatterlist *src, unsigned int nbytes)
299{
300 struct cipher_desc desc;
301 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
302
303 desc.tfm = tfm;
304 desc.crfn = cipher->cia_encrypt;
305 desc.prfn = cipher->cia_encrypt_ecb ?: ecb_process;
306
307 return crypt(&desc, dst, src, nbytes);
308}
309
310static int ecb_decrypt(struct crypto_tfm *tfm,
311 struct scatterlist *dst,
312 struct scatterlist *src,
313 unsigned int nbytes)
314{
315 struct cipher_desc desc;
316 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
317
318 desc.tfm = tfm;
319 desc.crfn = cipher->cia_decrypt;
320 desc.prfn = cipher->cia_decrypt_ecb ?: ecb_process;
321
322 return crypt(&desc, dst, src, nbytes);
323}
324
325static int cbc_encrypt(struct crypto_tfm *tfm,
326 struct scatterlist *dst,
327 struct scatterlist *src,
328 unsigned int nbytes)
329{
330 struct cipher_desc desc;
331 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
332
333 desc.tfm = tfm;
334 desc.crfn = cipher->cia_encrypt;
335 desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
336 desc.info = tfm->crt_cipher.cit_iv;
337
338 return crypt(&desc, dst, src, nbytes);
339}
340
341static int cbc_encrypt_iv(struct crypto_tfm *tfm,
342 struct scatterlist *dst,
343 struct scatterlist *src,
344 unsigned int nbytes, u8 *iv)
345{
346 struct cipher_desc desc;
347 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
348
349 desc.tfm = tfm;
350 desc.crfn = cipher->cia_encrypt;
351 desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
352 desc.info = iv;
353
354 return crypt_iv_unaligned(&desc, dst, src, nbytes);
355}
356
357static int cbc_decrypt(struct crypto_tfm *tfm,
358 struct scatterlist *dst,
359 struct scatterlist *src,
360 unsigned int nbytes)
361{
362 struct cipher_desc desc;
363 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
364
365 desc.tfm = tfm;
366 desc.crfn = cipher->cia_decrypt;
367 desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
368 desc.info = tfm->crt_cipher.cit_iv;
369
370 return crypt(&desc, dst, src, nbytes);
371}
372
373static int cbc_decrypt_iv(struct crypto_tfm *tfm,
374 struct scatterlist *dst,
375 struct scatterlist *src,
376 unsigned int nbytes, u8 *iv)
377{
378 struct cipher_desc desc;
379 struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
380
381 desc.tfm = tfm;
382 desc.crfn = cipher->cia_decrypt;
383 desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
384 desc.info = iv;
385
386 return crypt_iv_unaligned(&desc, dst, src, nbytes);
387}
388
389static int nocrypt(struct crypto_tfm *tfm,
390 struct scatterlist *dst,
391 struct scatterlist *src,
392 unsigned int nbytes)
393{
394 return -ENOSYS;
395}
396
397static int nocrypt_iv(struct crypto_tfm *tfm,
398 struct scatterlist *dst,
399 struct scatterlist *src,
400 unsigned int nbytes, u8 *iv)
401{
402 return -ENOSYS;
403}
404
405int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
406{
407 u32 mode = flags & CRYPTO_TFM_MODE_MASK;
408 tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
409 return 0;
410}
411
412static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *, 35static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
413 const u8 *), 36 const u8 *),
414 struct crypto_tfm *tfm, 37 struct crypto_tfm *tfm,
@@ -454,7 +77,6 @@ static void cipher_decrypt_unaligned(struct crypto_tfm *tfm,
454 77
455int crypto_init_cipher_ops(struct crypto_tfm *tfm) 78int crypto_init_cipher_ops(struct crypto_tfm *tfm)
456{ 79{
457 int ret = 0;
458 struct cipher_tfm *ops = &tfm->crt_cipher; 80 struct cipher_tfm *ops = &tfm->crt_cipher;
459 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; 81 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
460 82
@@ -464,70 +86,7 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm)
464 ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ? 86 ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ?
465 cipher_decrypt_unaligned : cipher->cia_decrypt; 87 cipher_decrypt_unaligned : cipher->cia_decrypt;
466 88
467 switch (tfm->crt_cipher.cit_mode) { 89 return 0;
468 case CRYPTO_TFM_MODE_ECB:
469 ops->cit_encrypt = ecb_encrypt;
470 ops->cit_decrypt = ecb_decrypt;
471 ops->cit_encrypt_iv = nocrypt_iv;
472 ops->cit_decrypt_iv = nocrypt_iv;
473 break;
474
475 case CRYPTO_TFM_MODE_CBC:
476 ops->cit_encrypt = cbc_encrypt;
477 ops->cit_decrypt = cbc_decrypt;
478 ops->cit_encrypt_iv = cbc_encrypt_iv;
479 ops->cit_decrypt_iv = cbc_decrypt_iv;
480 break;
481
482 case CRYPTO_TFM_MODE_CFB:
483 ops->cit_encrypt = nocrypt;
484 ops->cit_decrypt = nocrypt;
485 ops->cit_encrypt_iv = nocrypt_iv;
486 ops->cit_decrypt_iv = nocrypt_iv;
487 break;
488
489 case CRYPTO_TFM_MODE_CTR:
490 ops->cit_encrypt = nocrypt;
491 ops->cit_decrypt = nocrypt;
492 ops->cit_encrypt_iv = nocrypt_iv;
493 ops->cit_decrypt_iv = nocrypt_iv;
494 break;
495
496 default:
497 BUG();
498 }
499
500 if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
501 unsigned long align;
502 unsigned long addr;
503
504 switch (crypto_tfm_alg_blocksize(tfm)) {
505 case 8:
506 ops->cit_xor_block = xor_64;
507 break;
508
509 case 16:
510 ops->cit_xor_block = xor_128;
511 break;
512
513 default:
514 printk(KERN_WARNING "%s: block size %u not supported\n",
515 crypto_tfm_alg_name(tfm),
516 crypto_tfm_alg_blocksize(tfm));
517 ret = -EINVAL;
518 goto out;
519 }
520
521 ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
522 align = crypto_tfm_alg_alignmask(tfm) + 1;
523 addr = (unsigned long)crypto_tfm_ctx(tfm);
524 addr = ALIGN(addr, align);
525 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
526 ops->cit_iv = (void *)addr;
527 }
528
529out:
530 return ret;
531} 90}
532 91
533void crypto_exit_cipher_ops(struct crypto_tfm *tfm) 92void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
diff --git a/crypto/compress.c b/crypto/compress.c
index eca182aa3380..0a6570048c1e 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -34,11 +34,6 @@ static int crypto_decompress(struct crypto_tfm *tfm,
34 dlen); 34 dlen);
35} 35}
36 36
37int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags)
38{
39 return flags ? -EINVAL : 0;
40}
41
42int crypto_init_compress_ops(struct crypto_tfm *tfm) 37int crypto_init_compress_ops(struct crypto_tfm *tfm)
43{ 38{
44 struct compress_tfm *ops = &tfm->crt_compress; 39 struct compress_tfm *ops = &tfm->crt_compress;
diff --git a/crypto/digest.c b/crypto/digest.c
index 8f4593268ce0..1bf7414aeb9e 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -14,7 +14,9 @@
14 14
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/hardirq.h>
17#include <linux/highmem.h> 18#include <linux/highmem.h>
19#include <linux/kernel.h>
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
20 22
@@ -29,8 +31,8 @@ static int init(struct hash_desc *desc)
29 return 0; 31 return 0;
30} 32}
31 33
32static int update(struct hash_desc *desc, 34static int update2(struct hash_desc *desc,
33 struct scatterlist *sg, unsigned int nbytes) 35 struct scatterlist *sg, unsigned int nbytes)
34{ 36{
35 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); 37 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
36 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); 38 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
@@ -81,6 +83,14 @@ static int update(struct hash_desc *desc,
81 return 0; 83 return 0;
82} 84}
83 85
86static int update(struct hash_desc *desc,
87 struct scatterlist *sg, unsigned int nbytes)
88{
89 if (WARN_ON_ONCE(in_irq()))
90 return -EDEADLK;
91 return update2(desc, sg, nbytes);
92}
93
84static int final(struct hash_desc *desc, u8 *out) 94static int final(struct hash_desc *desc, u8 *out)
85{ 95{
86 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); 96 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
@@ -118,16 +128,14 @@ static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
118static int digest(struct hash_desc *desc, 128static int digest(struct hash_desc *desc,
119 struct scatterlist *sg, unsigned int nbytes, u8 *out) 129 struct scatterlist *sg, unsigned int nbytes, u8 *out)
120{ 130{
131 if (WARN_ON_ONCE(in_irq()))
132 return -EDEADLK;
133
121 init(desc); 134 init(desc);
122 update(desc, sg, nbytes); 135 update2(desc, sg, nbytes);
123 return final(desc, out); 136 return final(desc, out);
124} 137}
125 138
126int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
127{
128 return flags ? -EINVAL : 0;
129}
130
131int crypto_init_digest_ops(struct crypto_tfm *tfm) 139int crypto_init_digest_ops(struct crypto_tfm *tfm)
132{ 140{
133 struct hash_tfm *ops = &tfm->crt_hash; 141 struct hash_tfm *ops = &tfm->crt_hash;
diff --git a/crypto/ecb.c b/crypto/ecb.c
index f239aa9c4017..839a0aed8c22 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -99,12 +99,13 @@ static int crypto_ecb_init_tfm(struct crypto_tfm *tfm)
99 struct crypto_instance *inst = (void *)tfm->__crt_alg; 99 struct crypto_instance *inst = (void *)tfm->__crt_alg;
100 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 100 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
101 struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); 101 struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
102 struct crypto_cipher *cipher;
102 103
103 tfm = crypto_spawn_tfm(spawn); 104 cipher = crypto_spawn_cipher(spawn);
104 if (IS_ERR(tfm)) 105 if (IS_ERR(cipher))
105 return PTR_ERR(tfm); 106 return PTR_ERR(cipher);
106 107
107 ctx->child = crypto_cipher_cast(tfm); 108 ctx->child = cipher;
108 return 0; 109 return 0;
109} 110}
110 111
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
new file mode 100644
index 000000000000..9c2bb535b09a
--- /dev/null
+++ b/crypto/fcrypt.c
@@ -0,0 +1,423 @@
1/* FCrypt encryption algorithm
2 *
3 * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Based on code:
12 *
13 * Copyright (c) 1995 - 2000 Kungliga Tekniska Högskolan
14 * (Royal Institute of Technology, Stockholm, Sweden).
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 *
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 *
28 * 3. Neither the name of the Institute nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 */
44
45#include <asm/byteorder.h>
46#include <linux/bitops.h>
47#include <linux/init.h>
48#include <linux/module.h>
49#include <linux/crypto.h>
50
51#define ROUNDS 16
52
53struct fcrypt_ctx {
54 u32 sched[ROUNDS];
55};
56
57/* Rotate right two 32 bit numbers as a 56 bit number */
58#define ror56(hi, lo, n) \
59do { \
60 u32 t = lo & ((1 << n) - 1); \
61 lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \
62 hi = (hi >> n) | (t << (24-n)); \
63} while(0)
64
65/* Rotate right one 64 bit number as a 56 bit number */
66#define ror56_64(k, n) \
67do { \
68 k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \
69} while(0)
70
71/*
72 * Sboxes for Feistel network derived from
73 * /afs/transarc.com/public/afsps/afs.rel31b.export-src/rxkad/sboxes.h
74 */
75#undef Z
76#define Z(x) __constant_be32_to_cpu(x << 3)
77static const u32 sbox0[256] = {
78 Z(0xea), Z(0x7f), Z(0xb2), Z(0x64), Z(0x9d), Z(0xb0), Z(0xd9), Z(0x11),
79 Z(0xcd), Z(0x86), Z(0x86), Z(0x91), Z(0x0a), Z(0xb2), Z(0x93), Z(0x06),
80 Z(0x0e), Z(0x06), Z(0xd2), Z(0x65), Z(0x73), Z(0xc5), Z(0x28), Z(0x60),
81 Z(0xf2), Z(0x20), Z(0xb5), Z(0x38), Z(0x7e), Z(0xda), Z(0x9f), Z(0xe3),
82 Z(0xd2), Z(0xcf), Z(0xc4), Z(0x3c), Z(0x61), Z(0xff), Z(0x4a), Z(0x4a),
83 Z(0x35), Z(0xac), Z(0xaa), Z(0x5f), Z(0x2b), Z(0xbb), Z(0xbc), Z(0x53),
84 Z(0x4e), Z(0x9d), Z(0x78), Z(0xa3), Z(0xdc), Z(0x09), Z(0x32), Z(0x10),
85 Z(0xc6), Z(0x6f), Z(0x66), Z(0xd6), Z(0xab), Z(0xa9), Z(0xaf), Z(0xfd),
86 Z(0x3b), Z(0x95), Z(0xe8), Z(0x34), Z(0x9a), Z(0x81), Z(0x72), Z(0x80),
87 Z(0x9c), Z(0xf3), Z(0xec), Z(0xda), Z(0x9f), Z(0x26), Z(0x76), Z(0x15),
88 Z(0x3e), Z(0x55), Z(0x4d), Z(0xde), Z(0x84), Z(0xee), Z(0xad), Z(0xc7),
89 Z(0xf1), Z(0x6b), Z(0x3d), Z(0xd3), Z(0x04), Z(0x49), Z(0xaa), Z(0x24),
90 Z(0x0b), Z(0x8a), Z(0x83), Z(0xba), Z(0xfa), Z(0x85), Z(0xa0), Z(0xa8),
91 Z(0xb1), Z(0xd4), Z(0x01), Z(0xd8), Z(0x70), Z(0x64), Z(0xf0), Z(0x51),
92 Z(0xd2), Z(0xc3), Z(0xa7), Z(0x75), Z(0x8c), Z(0xa5), Z(0x64), Z(0xef),
93 Z(0x10), Z(0x4e), Z(0xb7), Z(0xc6), Z(0x61), Z(0x03), Z(0xeb), Z(0x44),
94 Z(0x3d), Z(0xe5), Z(0xb3), Z(0x5b), Z(0xae), Z(0xd5), Z(0xad), Z(0x1d),
95 Z(0xfa), Z(0x5a), Z(0x1e), Z(0x33), Z(0xab), Z(0x93), Z(0xa2), Z(0xb7),
96 Z(0xe7), Z(0xa8), Z(0x45), Z(0xa4), Z(0xcd), Z(0x29), Z(0x63), Z(0x44),
97 Z(0xb6), Z(0x69), Z(0x7e), Z(0x2e), Z(0x62), Z(0x03), Z(0xc8), Z(0xe0),
98 Z(0x17), Z(0xbb), Z(0xc7), Z(0xf3), Z(0x3f), Z(0x36), Z(0xba), Z(0x71),
99 Z(0x8e), Z(0x97), Z(0x65), Z(0x60), Z(0x69), Z(0xb6), Z(0xf6), Z(0xe6),
100 Z(0x6e), Z(0xe0), Z(0x81), Z(0x59), Z(0xe8), Z(0xaf), Z(0xdd), Z(0x95),
101 Z(0x22), Z(0x99), Z(0xfd), Z(0x63), Z(0x19), Z(0x74), Z(0x61), Z(0xb1),
102 Z(0xb6), Z(0x5b), Z(0xae), Z(0x54), Z(0xb3), Z(0x70), Z(0xff), Z(0xc6),
103 Z(0x3b), Z(0x3e), Z(0xc1), Z(0xd7), Z(0xe1), Z(0x0e), Z(0x76), Z(0xe5),
104 Z(0x36), Z(0x4f), Z(0x59), Z(0xc7), Z(0x08), Z(0x6e), Z(0x82), Z(0xa6),
105 Z(0x93), Z(0xc4), Z(0xaa), Z(0x26), Z(0x49), Z(0xe0), Z(0x21), Z(0x64),
106 Z(0x07), Z(0x9f), Z(0x64), Z(0x81), Z(0x9c), Z(0xbf), Z(0xf9), Z(0xd1),
107 Z(0x43), Z(0xf8), Z(0xb6), Z(0xb9), Z(0xf1), Z(0x24), Z(0x75), Z(0x03),
108 Z(0xe4), Z(0xb0), Z(0x99), Z(0x46), Z(0x3d), Z(0xf5), Z(0xd1), Z(0x39),
109 Z(0x72), Z(0x12), Z(0xf6), Z(0xba), Z(0x0c), Z(0x0d), Z(0x42), Z(0x2e)
110};
111
112#undef Z
113#define Z(x) __constant_be32_to_cpu((x << 27) | (x >> 5))
114static const u32 sbox1[256] = {
115 Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e),
116 Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85),
117 Z(0x6c), Z(0x7b), Z(0x67), Z(0xc6), Z(0x23), Z(0xe3), Z(0xf2), Z(0x89),
118 Z(0x50), Z(0x9c), Z(0x03), Z(0xb7), Z(0x73), Z(0xe6), Z(0xe1), Z(0x39),
119 Z(0x31), Z(0x2c), Z(0x27), Z(0x9f), Z(0xa5), Z(0x69), Z(0x44), Z(0xd6),
120 Z(0x23), Z(0x83), Z(0x98), Z(0x7d), Z(0x3c), Z(0xb4), Z(0x2d), Z(0x99),
121 Z(0x1c), Z(0x1f), Z(0x8c), Z(0x20), Z(0x03), Z(0x7c), Z(0x5f), Z(0xad),
122 Z(0xf4), Z(0xfa), Z(0x95), Z(0xca), Z(0x76), Z(0x44), Z(0xcd), Z(0xb6),
123 Z(0xb8), Z(0xa1), Z(0xa1), Z(0xbe), Z(0x9e), Z(0x54), Z(0x8f), Z(0x0b),
124 Z(0x16), Z(0x74), Z(0x31), Z(0x8a), Z(0x23), Z(0x17), Z(0x04), Z(0xfa),
125 Z(0x79), Z(0x84), Z(0xb1), Z(0xf5), Z(0x13), Z(0xab), Z(0xb5), Z(0x2e),
126 Z(0xaa), Z(0x0c), Z(0x60), Z(0x6b), Z(0x5b), Z(0xc4), Z(0x4b), Z(0xbc),
127 Z(0xe2), Z(0xaf), Z(0x45), Z(0x73), Z(0xfa), Z(0xc9), Z(0x49), Z(0xcd),
128 Z(0x00), Z(0x92), Z(0x7d), Z(0x97), Z(0x7a), Z(0x18), Z(0x60), Z(0x3d),
129 Z(0xcf), Z(0x5b), Z(0xde), Z(0xc6), Z(0xe2), Z(0xe6), Z(0xbb), Z(0x8b),
130 Z(0x06), Z(0xda), Z(0x08), Z(0x15), Z(0x1b), Z(0x88), Z(0x6a), Z(0x17),
131 Z(0x89), Z(0xd0), Z(0xa9), Z(0xc1), Z(0xc9), Z(0x70), Z(0x6b), Z(0xe5),
132 Z(0x43), Z(0xf4), Z(0x68), Z(0xc8), Z(0xd3), Z(0x84), Z(0x28), Z(0x0a),
133 Z(0x52), Z(0x66), Z(0xa3), Z(0xca), Z(0xf2), Z(0xe3), Z(0x7f), Z(0x7a),
134 Z(0x31), Z(0xf7), Z(0x88), Z(0x94), Z(0x5e), Z(0x9c), Z(0x63), Z(0xd5),
135 Z(0x24), Z(0x66), Z(0xfc), Z(0xb3), Z(0x57), Z(0x25), Z(0xbe), Z(0x89),
136 Z(0x44), Z(0xc4), Z(0xe0), Z(0x8f), Z(0x23), Z(0x3c), Z(0x12), Z(0x52),
137 Z(0xf5), Z(0x1e), Z(0xf4), Z(0xcb), Z(0x18), Z(0x33), Z(0x1f), Z(0xf8),
138 Z(0x69), Z(0x10), Z(0x9d), Z(0xd3), Z(0xf7), Z(0x28), Z(0xf8), Z(0x30),
139 Z(0x05), Z(0x5e), Z(0x32), Z(0xc0), Z(0xd5), Z(0x19), Z(0xbd), Z(0x45),
140 Z(0x8b), Z(0x5b), Z(0xfd), Z(0xbc), Z(0xe2), Z(0x5c), Z(0xa9), Z(0x96),
141 Z(0xef), Z(0x70), Z(0xcf), Z(0xc2), Z(0x2a), Z(0xb3), Z(0x61), Z(0xad),
142 Z(0x80), Z(0x48), Z(0x81), Z(0xb7), Z(0x1d), Z(0x43), Z(0xd9), Z(0xd7),
143 Z(0x45), Z(0xf0), Z(0xd8), Z(0x8a), Z(0x59), Z(0x7c), Z(0x57), Z(0xc1),
144 Z(0x79), Z(0xc7), Z(0x34), Z(0xd6), Z(0x43), Z(0xdf), Z(0xe4), Z(0x78),
145 Z(0x16), Z(0x06), Z(0xda), Z(0x92), Z(0x76), Z(0x51), Z(0xe1), Z(0xd4),
146 Z(0x70), Z(0x03), Z(0xe0), Z(0x2f), Z(0x96), Z(0x91), Z(0x82), Z(0x80)
147};
148
149#undef Z
150#define Z(x) __constant_be32_to_cpu(x << 11)
151static const u32 sbox2[256] = {
152 Z(0xf0), Z(0x37), Z(0x24), Z(0x53), Z(0x2a), Z(0x03), Z(0x83), Z(0x86),
153 Z(0xd1), Z(0xec), Z(0x50), Z(0xf0), Z(0x42), Z(0x78), Z(0x2f), Z(0x6d),
154 Z(0xbf), Z(0x80), Z(0x87), Z(0x27), Z(0x95), Z(0xe2), Z(0xc5), Z(0x5d),
155 Z(0xf9), Z(0x6f), Z(0xdb), Z(0xb4), Z(0x65), Z(0x6e), Z(0xe7), Z(0x24),
156 Z(0xc8), Z(0x1a), Z(0xbb), Z(0x49), Z(0xb5), Z(0x0a), Z(0x7d), Z(0xb9),
157 Z(0xe8), Z(0xdc), Z(0xb7), Z(0xd9), Z(0x45), Z(0x20), Z(0x1b), Z(0xce),
158 Z(0x59), Z(0x9d), Z(0x6b), Z(0xbd), Z(0x0e), Z(0x8f), Z(0xa3), Z(0xa9),
159 Z(0xbc), Z(0x74), Z(0xa6), Z(0xf6), Z(0x7f), Z(0x5f), Z(0xb1), Z(0x68),
160 Z(0x84), Z(0xbc), Z(0xa9), Z(0xfd), Z(0x55), Z(0x50), Z(0xe9), Z(0xb6),
161 Z(0x13), Z(0x5e), Z(0x07), Z(0xb8), Z(0x95), Z(0x02), Z(0xc0), Z(0xd0),
162 Z(0x6a), Z(0x1a), Z(0x85), Z(0xbd), Z(0xb6), Z(0xfd), Z(0xfe), Z(0x17),
163 Z(0x3f), Z(0x09), Z(0xa3), Z(0x8d), Z(0xfb), Z(0xed), Z(0xda), Z(0x1d),
164 Z(0x6d), Z(0x1c), Z(0x6c), Z(0x01), Z(0x5a), Z(0xe5), Z(0x71), Z(0x3e),
165 Z(0x8b), Z(0x6b), Z(0xbe), Z(0x29), Z(0xeb), Z(0x12), Z(0x19), Z(0x34),
166 Z(0xcd), Z(0xb3), Z(0xbd), Z(0x35), Z(0xea), Z(0x4b), Z(0xd5), Z(0xae),
167 Z(0x2a), Z(0x79), Z(0x5a), Z(0xa5), Z(0x32), Z(0x12), Z(0x7b), Z(0xdc),
168 Z(0x2c), Z(0xd0), Z(0x22), Z(0x4b), Z(0xb1), Z(0x85), Z(0x59), Z(0x80),
169 Z(0xc0), Z(0x30), Z(0x9f), Z(0x73), Z(0xd3), Z(0x14), Z(0x48), Z(0x40),
170 Z(0x07), Z(0x2d), Z(0x8f), Z(0x80), Z(0x0f), Z(0xce), Z(0x0b), Z(0x5e),
171 Z(0xb7), Z(0x5e), Z(0xac), Z(0x24), Z(0x94), Z(0x4a), Z(0x18), Z(0x15),
172 Z(0x05), Z(0xe8), Z(0x02), Z(0x77), Z(0xa9), Z(0xc7), Z(0x40), Z(0x45),
173 Z(0x89), Z(0xd1), Z(0xea), Z(0xde), Z(0x0c), Z(0x79), Z(0x2a), Z(0x99),
174 Z(0x6c), Z(0x3e), Z(0x95), Z(0xdd), Z(0x8c), Z(0x7d), Z(0xad), Z(0x6f),
175 Z(0xdc), Z(0xff), Z(0xfd), Z(0x62), Z(0x47), Z(0xb3), Z(0x21), Z(0x8a),
176 Z(0xec), Z(0x8e), Z(0x19), Z(0x18), Z(0xb4), Z(0x6e), Z(0x3d), Z(0xfd),
177 Z(0x74), Z(0x54), Z(0x1e), Z(0x04), Z(0x85), Z(0xd8), Z(0xbc), Z(0x1f),
178 Z(0x56), Z(0xe7), Z(0x3a), Z(0x56), Z(0x67), Z(0xd6), Z(0xc8), Z(0xa5),
179 Z(0xf3), Z(0x8e), Z(0xde), Z(0xae), Z(0x37), Z(0x49), Z(0xb7), Z(0xfa),
180 Z(0xc8), Z(0xf4), Z(0x1f), Z(0xe0), Z(0x2a), Z(0x9b), Z(0x15), Z(0xd1),
181 Z(0x34), Z(0x0e), Z(0xb5), Z(0xe0), Z(0x44), Z(0x78), Z(0x84), Z(0x59),
182 Z(0x56), Z(0x68), Z(0x77), Z(0xa5), Z(0x14), Z(0x06), Z(0xf5), Z(0x2f),
183 Z(0x8c), Z(0x8a), Z(0x73), Z(0x80), Z(0x76), Z(0xb4), Z(0x10), Z(0x86)
184};
185
186#undef Z
187#define Z(x) __constant_be32_to_cpu(x << 19)
188static const u32 sbox3[256] = {
189 Z(0xa9), Z(0x2a), Z(0x48), Z(0x51), Z(0x84), Z(0x7e), Z(0x49), Z(0xe2),
190 Z(0xb5), Z(0xb7), Z(0x42), Z(0x33), Z(0x7d), Z(0x5d), Z(0xa6), Z(0x12),
191 Z(0x44), Z(0x48), Z(0x6d), Z(0x28), Z(0xaa), Z(0x20), Z(0x6d), Z(0x57),
192 Z(0xd6), Z(0x6b), Z(0x5d), Z(0x72), Z(0xf0), Z(0x92), Z(0x5a), Z(0x1b),
193 Z(0x53), Z(0x80), Z(0x24), Z(0x70), Z(0x9a), Z(0xcc), Z(0xa7), Z(0x66),
194 Z(0xa1), Z(0x01), Z(0xa5), Z(0x41), Z(0x97), Z(0x41), Z(0x31), Z(0x82),
195 Z(0xf1), Z(0x14), Z(0xcf), Z(0x53), Z(0x0d), Z(0xa0), Z(0x10), Z(0xcc),
196 Z(0x2a), Z(0x7d), Z(0xd2), Z(0xbf), Z(0x4b), Z(0x1a), Z(0xdb), Z(0x16),
197 Z(0x47), Z(0xf6), Z(0x51), Z(0x36), Z(0xed), Z(0xf3), Z(0xb9), Z(0x1a),
198 Z(0xa7), Z(0xdf), Z(0x29), Z(0x43), Z(0x01), Z(0x54), Z(0x70), Z(0xa4),
199 Z(0xbf), Z(0xd4), Z(0x0b), Z(0x53), Z(0x44), Z(0x60), Z(0x9e), Z(0x23),
200 Z(0xa1), Z(0x18), Z(0x68), Z(0x4f), Z(0xf0), Z(0x2f), Z(0x82), Z(0xc2),
201 Z(0x2a), Z(0x41), Z(0xb2), Z(0x42), Z(0x0c), Z(0xed), Z(0x0c), Z(0x1d),
202 Z(0x13), Z(0x3a), Z(0x3c), Z(0x6e), Z(0x35), Z(0xdc), Z(0x60), Z(0x65),
203 Z(0x85), Z(0xe9), Z(0x64), Z(0x02), Z(0x9a), Z(0x3f), Z(0x9f), Z(0x87),
204 Z(0x96), Z(0xdf), Z(0xbe), Z(0xf2), Z(0xcb), Z(0xe5), Z(0x6c), Z(0xd4),
205 Z(0x5a), Z(0x83), Z(0xbf), Z(0x92), Z(0x1b), Z(0x94), Z(0x00), Z(0x42),
206 Z(0xcf), Z(0x4b), Z(0x00), Z(0x75), Z(0xba), Z(0x8f), Z(0x76), Z(0x5f),
207 Z(0x5d), Z(0x3a), Z(0x4d), Z(0x09), Z(0x12), Z(0x08), Z(0x38), Z(0x95),
208 Z(0x17), Z(0xe4), Z(0x01), Z(0x1d), Z(0x4c), Z(0xa9), Z(0xcc), Z(0x85),
209 Z(0x82), Z(0x4c), Z(0x9d), Z(0x2f), Z(0x3b), Z(0x66), Z(0xa1), Z(0x34),
210 Z(0x10), Z(0xcd), Z(0x59), Z(0x89), Z(0xa5), Z(0x31), Z(0xcf), Z(0x05),
211 Z(0xc8), Z(0x84), Z(0xfa), Z(0xc7), Z(0xba), Z(0x4e), Z(0x8b), Z(0x1a),
212 Z(0x19), Z(0xf1), Z(0xa1), Z(0x3b), Z(0x18), Z(0x12), Z(0x17), Z(0xb0),
213 Z(0x98), Z(0x8d), Z(0x0b), Z(0x23), Z(0xc3), Z(0x3a), Z(0x2d), Z(0x20),
214 Z(0xdf), Z(0x13), Z(0xa0), Z(0xa8), Z(0x4c), Z(0x0d), Z(0x6c), Z(0x2f),
215 Z(0x47), Z(0x13), Z(0x13), Z(0x52), Z(0x1f), Z(0x2d), Z(0xf5), Z(0x79),
216 Z(0x3d), Z(0xa2), Z(0x54), Z(0xbd), Z(0x69), Z(0xc8), Z(0x6b), Z(0xf3),
217 Z(0x05), Z(0x28), Z(0xf1), Z(0x16), Z(0x46), Z(0x40), Z(0xb0), Z(0x11),
218 Z(0xd3), Z(0xb7), Z(0x95), Z(0x49), Z(0xcf), Z(0xc3), Z(0x1d), Z(0x8f),
219 Z(0xd8), Z(0xe1), Z(0x73), Z(0xdb), Z(0xad), Z(0xc8), Z(0xc9), Z(0xa9),
220 Z(0xa1), Z(0xc2), Z(0xc5), Z(0xe3), Z(0xba), Z(0xfc), Z(0x0e), Z(0x25)
221};
222
223/*
224 * This is a 16 round Feistel network with permutation F_ENCRYPT
225 */
226#define F_ENCRYPT(R, L, sched) \
227do { \
228 union lc4 { u32 l; u8 c[4]; } u; \
229 u.l = sched ^ R; \
230 L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \
231} while(0)
232
233/*
234 * encryptor
235 */
236static void fcrypt_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
237{
238 const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
239 struct {
240 u32 l, r;
241 } X;
242
243 memcpy(&X, src, sizeof(X));
244
245 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
246 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
247 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
248 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
249 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
250 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
251 F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
252 F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
253 F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
254 F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
255 F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
256 F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
257 F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
258 F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
259 F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
260 F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
261
262 memcpy(dst, &X, sizeof(X));
263}
264
265/*
266 * decryptor
267 */
268static void fcrypt_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
269{
270 const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
271 struct {
272 u32 l, r;
273 } X;
274
275 memcpy(&X, src, sizeof(X));
276
277 F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
278 F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
279 F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
280 F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
281 F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
282 F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
283 F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
284 F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
285 F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
286 F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
287 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
288 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
289 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
290 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
291 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
292 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
293
294 memcpy(dst, &X, sizeof(X));
295}
296
297/*
298 * Generate a key schedule from key, the least significant bit in each key byte
299 * is parity and shall be ignored. This leaves 56 significant bits in the key
300 * to scatter over the 16 key schedules. For each schedule extract the low
301 * order 32 bits and use as schedule, then rotate right by 11 bits.
302 */
303static int fcrypt_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
304{
305 struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
306
307#if BITS_PER_LONG == 64 /* the 64-bit version can also be used for 32-bit
308 * kernels - it seems to be faster but the code is
309 * larger */
310
311 u64 k; /* k holds all 56 non-parity bits */
312
313 /* discard the parity bits */
314 k = (*key++) >> 1;
315 k <<= 7;
316 k |= (*key++) >> 1;
317 k <<= 7;
318 k |= (*key++) >> 1;
319 k <<= 7;
320 k |= (*key++) >> 1;
321 k <<= 7;
322 k |= (*key++) >> 1;
323 k <<= 7;
324 k |= (*key++) >> 1;
325 k <<= 7;
326 k |= (*key++) >> 1;
327 k <<= 7;
328 k |= (*key) >> 1;
329
330 /* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */
331 ctx->sched[0x0] = be32_to_cpu(k); ror56_64(k, 11);
332 ctx->sched[0x1] = be32_to_cpu(k); ror56_64(k, 11);
333 ctx->sched[0x2] = be32_to_cpu(k); ror56_64(k, 11);
334 ctx->sched[0x3] = be32_to_cpu(k); ror56_64(k, 11);
335 ctx->sched[0x4] = be32_to_cpu(k); ror56_64(k, 11);
336 ctx->sched[0x5] = be32_to_cpu(k); ror56_64(k, 11);
337 ctx->sched[0x6] = be32_to_cpu(k); ror56_64(k, 11);
338 ctx->sched[0x7] = be32_to_cpu(k); ror56_64(k, 11);
339 ctx->sched[0x8] = be32_to_cpu(k); ror56_64(k, 11);
340 ctx->sched[0x9] = be32_to_cpu(k); ror56_64(k, 11);
341 ctx->sched[0xa] = be32_to_cpu(k); ror56_64(k, 11);
342 ctx->sched[0xb] = be32_to_cpu(k); ror56_64(k, 11);
343 ctx->sched[0xc] = be32_to_cpu(k); ror56_64(k, 11);
344 ctx->sched[0xd] = be32_to_cpu(k); ror56_64(k, 11);
345 ctx->sched[0xe] = be32_to_cpu(k); ror56_64(k, 11);
346 ctx->sched[0xf] = be32_to_cpu(k);
347
348 return 0;
349#else
350 u32 hi, lo; /* hi is upper 24 bits and lo lower 32, total 56 */
351
352 /* discard the parity bits */
353 lo = (*key++) >> 1;
354 lo <<= 7;
355 lo |= (*key++) >> 1;
356 lo <<= 7;
357 lo |= (*key++) >> 1;
358 lo <<= 7;
359 lo |= (*key++) >> 1;
360 hi = lo >> 4;
361 lo &= 0xf;
362 lo <<= 7;
363 lo |= (*key++) >> 1;
364 lo <<= 7;
365 lo |= (*key++) >> 1;
366 lo <<= 7;
367 lo |= (*key++) >> 1;
368 lo <<= 7;
369 lo |= (*key) >> 1;
370
371 /* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */
372 ctx->sched[0x0] = be32_to_cpu(lo); ror56(hi, lo, 11);
373 ctx->sched[0x1] = be32_to_cpu(lo); ror56(hi, lo, 11);
374 ctx->sched[0x2] = be32_to_cpu(lo); ror56(hi, lo, 11);
375 ctx->sched[0x3] = be32_to_cpu(lo); ror56(hi, lo, 11);
376 ctx->sched[0x4] = be32_to_cpu(lo); ror56(hi, lo, 11);
377 ctx->sched[0x5] = be32_to_cpu(lo); ror56(hi, lo, 11);
378 ctx->sched[0x6] = be32_to_cpu(lo); ror56(hi, lo, 11);
379 ctx->sched[0x7] = be32_to_cpu(lo); ror56(hi, lo, 11);
380 ctx->sched[0x8] = be32_to_cpu(lo); ror56(hi, lo, 11);
381 ctx->sched[0x9] = be32_to_cpu(lo); ror56(hi, lo, 11);
382 ctx->sched[0xa] = be32_to_cpu(lo); ror56(hi, lo, 11);
383 ctx->sched[0xb] = be32_to_cpu(lo); ror56(hi, lo, 11);
384 ctx->sched[0xc] = be32_to_cpu(lo); ror56(hi, lo, 11);
385 ctx->sched[0xd] = be32_to_cpu(lo); ror56(hi, lo, 11);
386 ctx->sched[0xe] = be32_to_cpu(lo); ror56(hi, lo, 11);
387 ctx->sched[0xf] = be32_to_cpu(lo);
388 return 0;
389#endif
390}
391
392static struct crypto_alg fcrypt_alg = {
393 .cra_name = "fcrypt",
394 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
395 .cra_blocksize = 8,
396 .cra_ctxsize = sizeof(struct fcrypt_ctx),
397 .cra_module = THIS_MODULE,
398 .cra_alignmask = 3,
399 .cra_list = LIST_HEAD_INIT(fcrypt_alg.cra_list),
400 .cra_u = { .cipher = {
401 .cia_min_keysize = 8,
402 .cia_max_keysize = 8,
403 .cia_setkey = fcrypt_setkey,
404 .cia_encrypt = fcrypt_encrypt,
405 .cia_decrypt = fcrypt_decrypt } }
406};
407
408static int __init init(void)
409{
410 return crypto_register_alg(&fcrypt_alg);
411}
412
413static void __exit fini(void)
414{
415 crypto_unregister_alg(&fcrypt_alg);
416}
417
418module_init(init);
419module_exit(fini);
420
421MODULE_LICENSE("Dual BSD/GPL");
422MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
423MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
diff --git a/crypto/hash.c b/crypto/hash.c
index cdec23d885fe..12c4514f3478 100644
--- a/crypto/hash.c
+++ b/crypto/hash.c
@@ -16,12 +16,13 @@
16 16
17#include "internal.h" 17#include "internal.h"
18 18
19static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg) 19static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg, u32 type,
20 u32 mask)
20{ 21{
21 return alg->cra_ctxsize; 22 return alg->cra_ctxsize;
22} 23}
23 24
24static int crypto_init_hash_ops(struct crypto_tfm *tfm) 25static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
25{ 26{
26 struct hash_tfm *crt = &tfm->crt_hash; 27 struct hash_tfm *crt = &tfm->crt_hash;
27 struct hash_alg *alg = &tfm->__crt_alg->cra_hash; 28 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
diff --git a/crypto/hmac.c b/crypto/hmac.c
index b521bcd2b2c6..44187c5ee593 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -172,15 +172,16 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
172 172
173static int hmac_init_tfm(struct crypto_tfm *tfm) 173static int hmac_init_tfm(struct crypto_tfm *tfm)
174{ 174{
175 struct crypto_hash *hash;
175 struct crypto_instance *inst = (void *)tfm->__crt_alg; 176 struct crypto_instance *inst = (void *)tfm->__crt_alg;
176 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 177 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
177 struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); 178 struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
178 179
179 tfm = crypto_spawn_tfm(spawn); 180 hash = crypto_spawn_hash(spawn);
180 if (IS_ERR(tfm)) 181 if (IS_ERR(hash))
181 return PTR_ERR(tfm); 182 return PTR_ERR(hash);
182 183
183 ctx->child = crypto_hash_cast(tfm); 184 ctx->child = hash;
184 return 0; 185 return 0;
185} 186}
186 187
diff --git a/crypto/internal.h b/crypto/internal.h
index 2da6ad4f3593..60acad9788c5 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -83,8 +83,7 @@ static inline void crypto_exit_proc(void)
83{ } 83{ }
84#endif 84#endif
85 85
86static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg, 86static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg)
87 int flags)
88{ 87{
89 unsigned int len = alg->cra_ctxsize; 88 unsigned int len = alg->cra_ctxsize;
90 89
@@ -96,23 +95,12 @@ static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg,
96 return len; 95 return len;
97} 96}
98 97
99static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg, 98static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg)
100 int flags)
101{ 99{
102 unsigned int len = alg->cra_ctxsize; 100 return alg->cra_ctxsize;
103
104 switch (flags & CRYPTO_TFM_MODE_MASK) {
105 case CRYPTO_TFM_MODE_CBC:
106 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
107 len += alg->cra_blocksize;
108 break;
109 }
110
111 return len;
112} 101}
113 102
114static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg, 103static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg)
115 int flags)
116{ 104{
117 return alg->cra_ctxsize; 105 return alg->cra_ctxsize;
118} 106}
@@ -121,10 +109,6 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
121struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask); 109struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask);
122struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); 110struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
123 111
124int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
125int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags);
126int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags);
127
128int crypto_init_digest_ops(struct crypto_tfm *tfm); 112int crypto_init_digest_ops(struct crypto_tfm *tfm);
129int crypto_init_cipher_ops(struct crypto_tfm *tfm); 113int crypto_init_cipher_ops(struct crypto_tfm *tfm);
130int crypto_init_compress_ops(struct crypto_tfm *tfm); 114int crypto_init_compress_ops(struct crypto_tfm *tfm);
@@ -136,7 +120,8 @@ void crypto_exit_compress_ops(struct crypto_tfm *tfm);
136void crypto_larval_error(const char *name, u32 type, u32 mask); 120void crypto_larval_error(const char *name, u32 type, u32 mask);
137 121
138void crypto_shoot_alg(struct crypto_alg *alg); 122void crypto_shoot_alg(struct crypto_alg *alg);
139struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags); 123struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
124 u32 mask);
140 125
141int crypto_register_instance(struct crypto_template *tmpl, 126int crypto_register_instance(struct crypto_template *tmpl,
142 struct crypto_instance *inst); 127 struct crypto_instance *inst);
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 56642586d84f..b4105080ac7a 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -201,21 +201,22 @@ static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
201 201
202static int init_tfm(struct crypto_tfm *tfm) 202static int init_tfm(struct crypto_tfm *tfm)
203{ 203{
204 struct crypto_cipher *cipher;
204 struct crypto_instance *inst = (void *)tfm->__crt_alg; 205 struct crypto_instance *inst = (void *)tfm->__crt_alg;
205 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 206 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
206 struct priv *ctx = crypto_tfm_ctx(tfm); 207 struct priv *ctx = crypto_tfm_ctx(tfm);
207 u32 *flags = &tfm->crt_flags; 208 u32 *flags = &tfm->crt_flags;
208 209
209 tfm = crypto_spawn_tfm(spawn); 210 cipher = crypto_spawn_cipher(spawn);
210 if (IS_ERR(tfm)) 211 if (IS_ERR(cipher))
211 return PTR_ERR(tfm); 212 return PTR_ERR(cipher);
212 213
213 if (crypto_tfm_alg_blocksize(tfm) != 16) { 214 if (crypto_cipher_blocksize(cipher) != 16) {
214 *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; 215 *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
215 return -EINVAL; 216 return -EINVAL;
216 } 217 }
217 218
218 ctx->child = crypto_cipher_cast(tfm); 219 ctx->child = cipher;
219 return 0; 220 return 0;
220} 221}
221 222
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
new file mode 100644
index 000000000000..5174d7fdad6e
--- /dev/null
+++ b/crypto/pcbc.c
@@ -0,0 +1,349 @@
1/*
2 * PCBC: Propagating Cipher Block Chaining mode
3 *
4 * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 * Derived from cbc.c
8 * - Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16
17#include <crypto/algapi.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24
25struct crypto_pcbc_ctx {
26 struct crypto_cipher *child;
27 void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
28};
29
30static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
31 unsigned int keylen)
32{
33 struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent);
34 struct crypto_cipher *child = ctx->child;
35 int err;
36
37 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
38 crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
39 CRYPTO_TFM_REQ_MASK);
40 err = crypto_cipher_setkey(child, key, keylen);
41 crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
42 CRYPTO_TFM_RES_MASK);
43 return err;
44}
45
46static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
47 struct blkcipher_walk *walk,
48 struct crypto_cipher *tfm,
49 void (*xor)(u8 *, const u8 *,
50 unsigned int))
51{
52 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
53 crypto_cipher_alg(tfm)->cia_encrypt;
54 int bsize = crypto_cipher_blocksize(tfm);
55 unsigned int nbytes = walk->nbytes;
56 u8 *src = walk->src.virt.addr;
57 u8 *dst = walk->dst.virt.addr;
58 u8 *iv = walk->iv;
59
60 do {
61 xor(iv, src, bsize);
62 fn(crypto_cipher_tfm(tfm), dst, iv);
63 memcpy(iv, dst, bsize);
64 xor(iv, src, bsize);
65
66 src += bsize;
67 dst += bsize;
68 } while ((nbytes -= bsize) >= bsize);
69
70 return nbytes;
71}
72
73static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
74 struct blkcipher_walk *walk,
75 struct crypto_cipher *tfm,
76 void (*xor)(u8 *, const u8 *,
77 unsigned int))
78{
79 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
80 crypto_cipher_alg(tfm)->cia_encrypt;
81 int bsize = crypto_cipher_blocksize(tfm);
82 unsigned int nbytes = walk->nbytes;
83 u8 *src = walk->src.virt.addr;
84 u8 *iv = walk->iv;
85 u8 tmpbuf[bsize];
86
87 do {
88 memcpy(tmpbuf, src, bsize);
89 xor(iv, tmpbuf, bsize);
90 fn(crypto_cipher_tfm(tfm), src, iv);
91 memcpy(iv, src, bsize);
92 xor(iv, tmpbuf, bsize);
93
94 src += bsize;
95 } while ((nbytes -= bsize) >= bsize);
96
97 memcpy(walk->iv, iv, bsize);
98
99 return nbytes;
100}
101
102static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
103 struct scatterlist *dst, struct scatterlist *src,
104 unsigned int nbytes)
105{
106 struct blkcipher_walk walk;
107 struct crypto_blkcipher *tfm = desc->tfm;
108 struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
109 struct crypto_cipher *child = ctx->child;
110 void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
111 int err;
112
113 blkcipher_walk_init(&walk, dst, src, nbytes);
114 err = blkcipher_walk_virt(desc, &walk);
115
116 while ((nbytes = walk.nbytes)) {
117 if (walk.src.virt.addr == walk.dst.virt.addr)
118 nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, child,
119 xor);
120 else
121 nbytes = crypto_pcbc_encrypt_segment(desc, &walk, child,
122 xor);
123 err = blkcipher_walk_done(desc, &walk, nbytes);
124 }
125
126 return err;
127}
128
129static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
130 struct blkcipher_walk *walk,
131 struct crypto_cipher *tfm,
132 void (*xor)(u8 *, const u8 *,
133 unsigned int))
134{
135 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
136 crypto_cipher_alg(tfm)->cia_decrypt;
137 int bsize = crypto_cipher_blocksize(tfm);
138 unsigned int nbytes = walk->nbytes;
139 u8 *src = walk->src.virt.addr;
140 u8 *dst = walk->dst.virt.addr;
141 u8 *iv = walk->iv;
142
143 do {
144 fn(crypto_cipher_tfm(tfm), dst, src);
145 xor(dst, iv, bsize);
146 memcpy(iv, src, bsize);
147 xor(iv, dst, bsize);
148
149 src += bsize;
150 dst += bsize;
151 } while ((nbytes -= bsize) >= bsize);
152
153 memcpy(walk->iv, iv, bsize);
154
155 return nbytes;
156}
157
158static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
159 struct blkcipher_walk *walk,
160 struct crypto_cipher *tfm,
161 void (*xor)(u8 *, const u8 *,
162 unsigned int))
163{
164 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
165 crypto_cipher_alg(tfm)->cia_decrypt;
166 int bsize = crypto_cipher_blocksize(tfm);
167 unsigned int nbytes = walk->nbytes;
168 u8 *src = walk->src.virt.addr;
169 u8 *iv = walk->iv;
170 u8 tmpbuf[bsize];
171
172 do {
173 memcpy(tmpbuf, src, bsize);
174 fn(crypto_cipher_tfm(tfm), src, src);
175 xor(src, iv, bsize);
176 memcpy(iv, tmpbuf, bsize);
177 xor(iv, src, bsize);
178
179 src += bsize;
180 } while ((nbytes -= bsize) >= bsize);
181
182 memcpy(walk->iv, iv, bsize);
183
184 return nbytes;
185}
186
187static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
188 struct scatterlist *dst, struct scatterlist *src,
189 unsigned int nbytes)
190{
191 struct blkcipher_walk walk;
192 struct crypto_blkcipher *tfm = desc->tfm;
193 struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
194 struct crypto_cipher *child = ctx->child;
195 void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
196 int err;
197
198 blkcipher_walk_init(&walk, dst, src, nbytes);
199 err = blkcipher_walk_virt(desc, &walk);
200
201 while ((nbytes = walk.nbytes)) {
202 if (walk.src.virt.addr == walk.dst.virt.addr)
203 nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, child,
204 xor);
205 else
206 nbytes = crypto_pcbc_decrypt_segment(desc, &walk, child,
207 xor);
208 err = blkcipher_walk_done(desc, &walk, nbytes);
209 }
210
211 return err;
212}
213
214static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
215{
216 do {
217 *a++ ^= *b++;
218 } while (--bs);
219}
220
221static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
222{
223 u32 *a = (u32 *)dst;
224 u32 *b = (u32 *)src;
225
226 do {
227 *a++ ^= *b++;
228 } while ((bs -= 4));
229}
230
231static void xor_64(u8 *a, const u8 *b, unsigned int bs)
232{
233 ((u32 *)a)[0] ^= ((u32 *)b)[0];
234 ((u32 *)a)[1] ^= ((u32 *)b)[1];
235}
236
237static void xor_128(u8 *a, const u8 *b, unsigned int bs)
238{
239 ((u32 *)a)[0] ^= ((u32 *)b)[0];
240 ((u32 *)a)[1] ^= ((u32 *)b)[1];
241 ((u32 *)a)[2] ^= ((u32 *)b)[2];
242 ((u32 *)a)[3] ^= ((u32 *)b)[3];
243}
244
245static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
246{
247 struct crypto_instance *inst = (void *)tfm->__crt_alg;
248 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
249 struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
250 struct crypto_cipher *cipher;
251
252 switch (crypto_tfm_alg_blocksize(tfm)) {
253 case 8:
254 ctx->xor = xor_64;
255 break;
256
257 case 16:
258 ctx->xor = xor_128;
259 break;
260
261 default:
262 if (crypto_tfm_alg_blocksize(tfm) % 4)
263 ctx->xor = xor_byte;
264 else
265 ctx->xor = xor_quad;
266 }
267
268 cipher = crypto_spawn_cipher(spawn);
269 if (IS_ERR(cipher))
270 return PTR_ERR(cipher);
271
272 ctx->child = cipher;
273 return 0;
274}
275
276static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm)
277{
278 struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
279 crypto_free_cipher(ctx->child);
280}
281
282static struct crypto_instance *crypto_pcbc_alloc(void *param, unsigned int len)
283{
284 struct crypto_instance *inst;
285 struct crypto_alg *alg;
286
287 alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
288 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
289 if (IS_ERR(alg))
290 return ERR_PTR(PTR_ERR(alg));
291
292 inst = crypto_alloc_instance("pcbc", alg);
293 if (IS_ERR(inst))
294 goto out_put_alg;
295
296 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
297 inst->alg.cra_priority = alg->cra_priority;
298 inst->alg.cra_blocksize = alg->cra_blocksize;
299 inst->alg.cra_alignmask = alg->cra_alignmask;
300 inst->alg.cra_type = &crypto_blkcipher_type;
301
302 if (!(alg->cra_blocksize % 4))
303 inst->alg.cra_alignmask |= 3;
304 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
305 inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
306 inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
307
308 inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
309
310 inst->alg.cra_init = crypto_pcbc_init_tfm;
311 inst->alg.cra_exit = crypto_pcbc_exit_tfm;
312
313 inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey;
314 inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt;
315 inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt;
316
317out_put_alg:
318 crypto_mod_put(alg);
319 return inst;
320}
321
322static void crypto_pcbc_free(struct crypto_instance *inst)
323{
324 crypto_drop_spawn(crypto_instance_ctx(inst));
325 kfree(inst);
326}
327
328static struct crypto_template crypto_pcbc_tmpl = {
329 .name = "pcbc",
330 .alloc = crypto_pcbc_alloc,
331 .free = crypto_pcbc_free,
332 .module = THIS_MODULE,
333};
334
335static int __init crypto_pcbc_module_init(void)
336{
337 return crypto_register_template(&crypto_pcbc_tmpl);
338}
339
340static void __exit crypto_pcbc_module_exit(void)
341{
342 crypto_unregister_template(&crypto_pcbc_tmpl);
343}
344
345module_init(crypto_pcbc_module_init);
346module_exit(crypto_pcbc_module_exit);
347
348MODULE_LICENSE("GPL");
349MODULE_DESCRIPTION("PCBC block cipher algorithm");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d671e8942b1f..f5e9da319ece 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -12,6 +12,7 @@
12 * Software Foundation; either version 2 of the License, or (at your option) 12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version. 13 * any later version.
14 * 14 *
15 * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
15 * 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>) 16 * 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>)
16 * 2003-09-14 Rewritten by Kartikey Mahendra Bhatt 17 * 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
17 * 18 *
@@ -71,7 +72,8 @@ static char *check[] = {
71 "des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish", 72 "des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish",
72 "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", 73 "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6",
73 "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", 74 "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
74 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", NULL 75 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
76 "camellia", NULL
75}; 77};
76 78
77static void hexdump(unsigned char *buf, unsigned int len) 79static void hexdump(unsigned char *buf, unsigned int len)
@@ -765,7 +767,7 @@ static void test_deflate(void)
765 memcpy(tvmem, deflate_comp_tv_template, tsize); 767 memcpy(tvmem, deflate_comp_tv_template, tsize);
766 tv = (void *)tvmem; 768 tv = (void *)tvmem;
767 769
768 tfm = crypto_alloc_tfm("deflate", 0); 770 tfm = crypto_alloc_comp("deflate", 0, CRYPTO_ALG_ASYNC);
769 if (tfm == NULL) { 771 if (tfm == NULL) {
770 printk("failed to load transform for deflate\n"); 772 printk("failed to load transform for deflate\n");
771 return; 773 return;
@@ -964,6 +966,26 @@ static void do_test(void)
964 test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template, 966 test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template,
965 XETA_DEC_TEST_VECTORS); 967 XETA_DEC_TEST_VECTORS);
966 968
969 //FCrypt
970 test_cipher("pcbc(fcrypt)", ENCRYPT, fcrypt_pcbc_enc_tv_template,
971 FCRYPT_ENC_TEST_VECTORS);
972 test_cipher("pcbc(fcrypt)", DECRYPT, fcrypt_pcbc_dec_tv_template,
973 FCRYPT_DEC_TEST_VECTORS);
974
975 //CAMELLIA
976 test_cipher("ecb(camellia)", ENCRYPT,
977 camellia_enc_tv_template,
978 CAMELLIA_ENC_TEST_VECTORS);
979 test_cipher("ecb(camellia)", DECRYPT,
980 camellia_dec_tv_template,
981 CAMELLIA_DEC_TEST_VECTORS);
982 test_cipher("cbc(camellia)", ENCRYPT,
983 camellia_cbc_enc_tv_template,
984 CAMELLIA_CBC_ENC_TEST_VECTORS);
985 test_cipher("cbc(camellia)", DECRYPT,
986 camellia_cbc_dec_tv_template,
987 CAMELLIA_CBC_DEC_TEST_VECTORS);
988
967 test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS); 989 test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS);
968 test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS); 990 test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS);
969 test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS); 991 test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS);
@@ -980,6 +1002,10 @@ static void do_test(void)
980 HMAC_SHA1_TEST_VECTORS); 1002 HMAC_SHA1_TEST_VECTORS);
981 test_hash("hmac(sha256)", hmac_sha256_tv_template, 1003 test_hash("hmac(sha256)", hmac_sha256_tv_template,
982 HMAC_SHA256_TEST_VECTORS); 1004 HMAC_SHA256_TEST_VECTORS);
1005 test_hash("hmac(sha384)", hmac_sha384_tv_template,
1006 HMAC_SHA384_TEST_VECTORS);
1007 test_hash("hmac(sha512)", hmac_sha512_tv_template,
1008 HMAC_SHA512_TEST_VECTORS);
983 1009
984 test_hash("xcbc(aes)", aes_xcbc128_tv_template, 1010 test_hash("xcbc(aes)", aes_xcbc128_tv_template,
985 XCBC_AES_TEST_VECTORS); 1011 XCBC_AES_TEST_VECTORS);
@@ -1177,6 +1203,28 @@ static void do_test(void)
1177 XETA_DEC_TEST_VECTORS); 1203 XETA_DEC_TEST_VECTORS);
1178 break; 1204 break;
1179 1205
1206 case 31:
1207 test_cipher("pcbc(fcrypt)", ENCRYPT, fcrypt_pcbc_enc_tv_template,
1208 FCRYPT_ENC_TEST_VECTORS);
1209 test_cipher("pcbc(fcrypt)", DECRYPT, fcrypt_pcbc_dec_tv_template,
1210 FCRYPT_DEC_TEST_VECTORS);
1211 break;
1212
1213 case 32:
1214 test_cipher("ecb(camellia)", ENCRYPT,
1215 camellia_enc_tv_template,
1216 CAMELLIA_ENC_TEST_VECTORS);
1217 test_cipher("ecb(camellia)", DECRYPT,
1218 camellia_dec_tv_template,
1219 CAMELLIA_DEC_TEST_VECTORS);
1220 test_cipher("cbc(camellia)", ENCRYPT,
1221 camellia_cbc_enc_tv_template,
1222 CAMELLIA_CBC_ENC_TEST_VECTORS);
1223 test_cipher("cbc(camellia)", DECRYPT,
1224 camellia_cbc_dec_tv_template,
1225 CAMELLIA_CBC_DEC_TEST_VECTORS);
1226 break;
1227
1180 case 100: 1228 case 100:
1181 test_hash("hmac(md5)", hmac_md5_tv_template, 1229 test_hash("hmac(md5)", hmac_md5_tv_template,
1182 HMAC_MD5_TEST_VECTORS); 1230 HMAC_MD5_TEST_VECTORS);
@@ -1192,6 +1240,16 @@ static void do_test(void)
1192 HMAC_SHA256_TEST_VECTORS); 1240 HMAC_SHA256_TEST_VECTORS);
1193 break; 1241 break;
1194 1242
1243 case 103:
1244 test_hash("hmac(sha384)", hmac_sha384_tv_template,
1245 HMAC_SHA384_TEST_VECTORS);
1246 break;
1247
1248 case 104:
1249 test_hash("hmac(sha512)", hmac_sha512_tv_template,
1250 HMAC_SHA512_TEST_VECTORS);
1251 break;
1252
1195 1253
1196 case 200: 1254 case 200:
1197 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, 1255 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
@@ -1260,6 +1318,17 @@ static void do_test(void)
1260 des_speed_template); 1318 des_speed_template);
1261 break; 1319 break;
1262 1320
1321 case 205:
1322 test_cipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
1323 camellia_speed_template);
1324 test_cipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
1325 camellia_speed_template);
1326 test_cipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
1327 camellia_speed_template);
1328 test_cipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
1329 camellia_speed_template);
1330 break;
1331
1263 case 300: 1332 case 300:
1264 /* fall through */ 1333 /* fall through */
1265 1334
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 48a81362cb85..887527bd5bc6 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -12,6 +12,7 @@
12 * Software Foundation; either version 2 of the License, or (at your option) 12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version. 13 * any later version.
14 * 14 *
15 * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
15 * 2004-08-09 Cipher speed tests by Reyk Floeter <reyk@vantronix.net> 16 * 2004-08-09 Cipher speed tests by Reyk Floeter <reyk@vantronix.net>
16 * 2003-09-14 Changes by Kartikey Mahendra Bhatt 17 * 2003-09-14 Changes by Kartikey Mahendra Bhatt
17 * 18 *
@@ -27,7 +28,7 @@
27 28
28struct hash_testvec { 29struct hash_testvec {
29 /* only used with keyed hash algorithms */ 30 /* only used with keyed hash algorithms */
30 char key[128] __attribute__ ((__aligned__(4))); 31 char key[132] __attribute__ ((__aligned__(4)));
31 char plaintext[240]; 32 char plaintext[240];
32 char digest[MAX_DIGEST_SIZE]; 33 char digest[MAX_DIGEST_SIZE];
33 unsigned char tap[MAX_TAP]; 34 unsigned char tap[MAX_TAP];
@@ -1002,6 +1003,248 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
1002}; 1003};
1003 1004
1004/* 1005/*
1006 * SHA384 HMAC test vectors from RFC4231
1007 */
1008
1009#define HMAC_SHA384_TEST_VECTORS 4
1010
1011static struct hash_testvec hmac_sha384_tv_template[] = {
1012 {
1013 .key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
1014 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
1015 0x0b, 0x0b, 0x0b, 0x0b }, // (20 bytes)
1016 .ksize = 20,
1017 .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65 }, // ("Hi There")
1018 .psize = 8,
1019 .digest = { 0xaf, 0xd0, 0x39, 0x44, 0xd8, 0x48, 0x95, 0x62,
1020 0x6b, 0x08, 0x25, 0xf4, 0xab, 0x46, 0x90, 0x7f,
1021 0x15, 0xf9, 0xda, 0xdb, 0xe4, 0x10, 0x1e, 0xc6,
1022 0x82, 0xaa, 0x03, 0x4c, 0x7c, 0xeb, 0xc5, 0x9c,
1023 0xfa, 0xea, 0x9e, 0xa9, 0x07, 0x6e, 0xde, 0x7f,
1024 0x4a, 0xf1, 0x52, 0xe8, 0xb2, 0xfa, 0x9c, 0xb6 },
1025 }, {
1026 .key = { 0x4a, 0x65, 0x66, 0x65 }, // ("Jefe")
1027 .ksize = 4,
1028 .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
1029 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20, // ("what do ya want ")
1030 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
1031 0x69, 0x6e, 0x67, 0x3f }, // ("for nothing?")
1032 .psize = 28,
1033 .digest = { 0xaf, 0x45, 0xd2, 0xe3, 0x76, 0x48, 0x40, 0x31,
1034 0x61, 0x7f, 0x78, 0xd2, 0xb5, 0x8a, 0x6b, 0x1b,
1035 0x9c, 0x7e, 0xf4, 0x64, 0xf5, 0xa0, 0x1b, 0x47,
1036 0xe4, 0x2e, 0xc3, 0x73, 0x63, 0x22, 0x44, 0x5e,
1037 0x8e, 0x22, 0x40, 0xca, 0x5e, 0x69, 0xe2, 0xc7,
1038 0x8b, 0x32, 0x39, 0xec, 0xfa, 0xb2, 0x16, 0x49 },
1039 .np = 4,
1040 .tap = { 7, 7, 7, 7 }
1041 }, {
1042 .key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1043 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1044 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1045 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1046 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1047 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1048 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1049 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1050 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1051 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1052 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1053 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1054 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1055 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1056 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1057 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1058 0xaa, 0xaa, 0xaa }, // (131 bytes)
1059 .ksize = 131,
1060 .plaintext = { 0x54, 0x65, 0x73, 0x74, 0x20, 0x55, 0x73, 0x69,
1061 0x6e, 0x67, 0x20, 0x4c, 0x61, 0x72, 0x67, 0x65, // ("Test Using Large")
1062 0x72, 0x20, 0x54, 0x68, 0x61, 0x6e, 0x20, 0x42,
1063 0x6c, 0x6f, 0x63, 0x6b, 0x2d, 0x53, 0x69, 0x7a, // ("r Than Block-Siz")
1064 0x65, 0x20, 0x4b, 0x65, 0x79, 0x20, 0x2d, 0x20,
1065 0x48, 0x61, 0x73, 0x68, 0x20, 0x4b, 0x65, 0x79, // ("e Key - Hash Key")
1066 0x20, 0x46, 0x69, 0x72, 0x73, 0x74 }, // (" First")
1067 .psize = 54,
1068 .digest = { 0x4e, 0xce, 0x08, 0x44, 0x85, 0x81, 0x3e, 0x90,
1069 0x88, 0xd2, 0xc6, 0x3a, 0x04, 0x1b, 0xc5, 0xb4,
1070 0x4f, 0x9e, 0xf1, 0x01, 0x2a, 0x2b, 0x58, 0x8f,
1071 0x3c, 0xd1, 0x1f, 0x05, 0x03, 0x3a, 0xc4, 0xc6,
1072 0x0c, 0x2e, 0xf6, 0xab, 0x40, 0x30, 0xfe, 0x82,
1073 0x96, 0x24, 0x8d, 0xf1, 0x63, 0xf4, 0x49, 0x52 },
1074 }, {
1075 .key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1076 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1077 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1078 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1079 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1080 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1081 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1082 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1083 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1084 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1085 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1086 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1087 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1088 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1089 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1090 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1091 0xaa, 0xaa, 0xaa }, // (131 bytes)
1092 .ksize = 131,
1093 .plaintext = { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20,
1094 0x61, 0x20, 0x74, 0x65, 0x73, 0x74, 0x20, 0x75, // ("This is a test u")
1095 0x73, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x6c,
1096 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, 0x68, // ("sing a larger th")
1097 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
1098 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6b, 0x65, // ("an block-size ke")
1099 0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x20,
1100 0x6c, 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, // ("y and a larger t")
1101 0x68, 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63,
1102 0x6b, 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x64, // ("han block-size d")
1103 0x61, 0x74, 0x61, 0x2e, 0x20, 0x54, 0x68, 0x65,
1104 0x20, 0x6b, 0x65, 0x79, 0x20, 0x6e, 0x65, 0x65, // ("ata. The key nee")
1105 0x64, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65,
1106 0x20, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x20, // ("ds to be hashed ")
1107 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x62,
1108 0x65, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x65, // ("before being use")
1109 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65,
1110 0x20, 0x48, 0x4d, 0x41, 0x43, 0x20, 0x61, 0x6c, // ("d by the HMAC al")
1111 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x2e }, // ("gorithm.")
1112 .psize = 152,
1113 .digest = { 0x66, 0x17, 0x17, 0x8e, 0x94, 0x1f, 0x02, 0x0d,
1114 0x35, 0x1e, 0x2f, 0x25, 0x4e, 0x8f, 0xd3, 0x2c,
1115 0x60, 0x24, 0x20, 0xfe, 0xb0, 0xb8, 0xfb, 0x9a,
1116 0xdc, 0xce, 0xbb, 0x82, 0x46, 0x1e, 0x99, 0xc5,
1117 0xa6, 0x78, 0xcc, 0x31, 0xe7, 0x99, 0x17, 0x6d,
1118 0x38, 0x60, 0xe6, 0x11, 0x0c, 0x46, 0x52, 0x3e },
1119 },
1120};
1121
1122/*
1123 * SHA512 HMAC test vectors from RFC4231
1124 */
1125
1126#define HMAC_SHA512_TEST_VECTORS 4
1127
1128static struct hash_testvec hmac_sha512_tv_template[] = {
1129 {
1130 .key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
1131 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
1132 0x0b, 0x0b, 0x0b, 0x0b }, // (20 bytes)
1133 .ksize = 20,
1134 .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65 }, // ("Hi There")
1135 .psize = 8,
1136 .digest = { 0x87, 0xaa, 0x7c, 0xde, 0xa5, 0xef, 0x61, 0x9d,
1137 0x4f, 0xf0, 0xb4, 0x24, 0x1a, 0x1d, 0x6c, 0xb0,
1138 0x23, 0x79, 0xf4, 0xe2, 0xce, 0x4e, 0xc2, 0x78,
1139 0x7a, 0xd0, 0xb3, 0x05, 0x45, 0xe1, 0x7c, 0xde,
1140 0xda, 0xa8, 0x33, 0xb7, 0xd6, 0xb8, 0xa7, 0x02,
1141 0x03, 0x8b, 0x27, 0x4e, 0xae, 0xa3, 0xf4, 0xe4,
1142 0xbe, 0x9d, 0x91, 0x4e, 0xeb, 0x61, 0xf1, 0x70,
1143 0x2e, 0x69, 0x6c, 0x20, 0x3a, 0x12, 0x68, 0x54 },
1144 }, {
1145 .key = { 0x4a, 0x65, 0x66, 0x65 }, // ("Jefe")
1146 .ksize = 4,
1147 .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
1148 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20, // ("what do ya want ")
1149 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
1150 0x69, 0x6e, 0x67, 0x3f }, // ("for nothing?")
1151 .psize = 28,
1152 .digest = { 0x16, 0x4b, 0x7a, 0x7b, 0xfc, 0xf8, 0x19, 0xe2,
1153 0xe3, 0x95, 0xfb, 0xe7, 0x3b, 0x56, 0xe0, 0xa3,
1154 0x87, 0xbd, 0x64, 0x22, 0x2e, 0x83, 0x1f, 0xd6,
1155 0x10, 0x27, 0x0c, 0xd7, 0xea, 0x25, 0x05, 0x54,
1156 0x97, 0x58, 0xbf, 0x75, 0xc0, 0x5a, 0x99, 0x4a,
1157 0x6d, 0x03, 0x4f, 0x65, 0xf8, 0xf0, 0xe6, 0xfd,
1158 0xca, 0xea, 0xb1, 0xa3, 0x4d, 0x4a, 0x6b, 0x4b,
1159 0x63, 0x6e, 0x07, 0x0a, 0x38, 0xbc, 0xe7, 0x37 },
1160 .np = 4,
1161 .tap = { 7, 7, 7, 7 }
1162 }, {
1163 .key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1164 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1165 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1166 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1167 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1168 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1169 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1170 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1171 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1172 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1173 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1174 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1175 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1176 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1177 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1178 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1179 0xaa, 0xaa, 0xaa }, // (131 bytes)
1180 .ksize = 131,
1181 .plaintext = { 0x54, 0x65, 0x73, 0x74, 0x20, 0x55, 0x73, 0x69,
1182 0x6e, 0x67, 0x20, 0x4c, 0x61, 0x72, 0x67, 0x65, // ("Test Using Large")
1183 0x72, 0x20, 0x54, 0x68, 0x61, 0x6e, 0x20, 0x42,
1184 0x6c, 0x6f, 0x63, 0x6b, 0x2d, 0x53, 0x69, 0x7a, // ("r Than Block-Siz")
1185 0x65, 0x20, 0x4b, 0x65, 0x79, 0x20, 0x2d, 0x20,
1186 0x48, 0x61, 0x73, 0x68, 0x20, 0x4b, 0x65, 0x79, // ("e Key - Hash Key")
1187 0x20, 0x46, 0x69, 0x72, 0x73, 0x74 }, // (" First")
1188 .psize = 54,
1189 .digest = { 0x80, 0xb2, 0x42, 0x63, 0xc7, 0xc1, 0xa3, 0xeb,
1190 0xb7, 0x14, 0x93, 0xc1, 0xdd, 0x7b, 0xe8, 0xb4,
1191 0x9b, 0x46, 0xd1, 0xf4, 0x1b, 0x4a, 0xee, 0xc1,
1192 0x12, 0x1b, 0x01, 0x37, 0x83, 0xf8, 0xf3, 0x52,
1193 0x6b, 0x56, 0xd0, 0x37, 0xe0, 0x5f, 0x25, 0x98,
1194 0xbd, 0x0f, 0xd2, 0x21, 0x5d, 0x6a, 0x1e, 0x52,
1195 0x95, 0xe6, 0x4f, 0x73, 0xf6, 0x3f, 0x0a, 0xec,
1196 0x8b, 0x91, 0x5a, 0x98, 0x5d, 0x78, 0x65, 0x98 },
1197 }, {
1198 .key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1199 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1200 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1201 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1202 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1203 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1204 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1205 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1206 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1207 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1208 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1209 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1210 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1211 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1212 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1213 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
1214 0xaa, 0xaa, 0xaa }, // (131 bytes)
1215 .ksize = 131,
1216 .plaintext = { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20,
1217 0x61, 0x20, 0x74, 0x65, 0x73, 0x74, 0x20, 0x75, // ("This is a test u")
1218 0x73, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x6c,
1219 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, 0x68, // ("sing a larger th")
1220 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
1221 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6b, 0x65, // ("an block-size ke")
1222 0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x20,
1223 0x6c, 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, // ("y and a larger t")
1224 0x68, 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63,
1225 0x6b, 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x64, // ("han block-size d")
1226 0x61, 0x74, 0x61, 0x2e, 0x20, 0x54, 0x68, 0x65,
1227 0x20, 0x6b, 0x65, 0x79, 0x20, 0x6e, 0x65, 0x65, // ("ata. The key nee")
1228 0x64, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65,
1229 0x20, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x20, // ("ds to be hashed ")
1230 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x62,
1231 0x65, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x65, // ("before being use")
1232 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65,
1233 0x20, 0x48, 0x4d, 0x41, 0x43, 0x20, 0x61, 0x6c, // ("d by the HMAC al")
1234 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x2e }, // ("gorithm.")
1235 .psize = 152,
1236 .digest = { 0xe3, 0x7b, 0x6a, 0x77, 0x5d, 0xc8, 0x7d, 0xba,
1237 0xa4, 0xdf, 0xa9, 0xf9, 0x6e, 0x5e, 0x3f, 0xfd,
1238 0xde, 0xbd, 0x71, 0xf8, 0x86, 0x72, 0x89, 0x86,
1239 0x5d, 0xf5, 0xa3, 0x2d, 0x20, 0xcd, 0xc9, 0x44,
1240 0xb6, 0x02, 0x2c, 0xac, 0x3c, 0x49, 0x82, 0xb1,
1241 0x0d, 0x5e, 0xeb, 0x55, 0xc3, 0xe4, 0xde, 0x15,
1242 0x13, 0x46, 0x76, 0xfb, 0x6d, 0xe0, 0x44, 0x60,
1243 0x65, 0xc9, 0x74, 0x40, 0xfa, 0x8c, 0x6a, 0x58 },
1244 },
1245};
1246
1247/*
1005 * DES test vectors. 1248 * DES test vectors.
1006 */ 1249 */
1007#define DES_ENC_TEST_VECTORS 10 1250#define DES_ENC_TEST_VECTORS 10
@@ -3316,6 +3559,278 @@ static struct cipher_testvec xeta_dec_tv_template[] = {
3316 } 3559 }
3317}; 3560};
3318 3561
3562/*
3563 * FCrypt test vectors
3564 */
3565#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
3566#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
3567
3568static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
3569 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
3570 .key = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
3571 .klen = 8,
3572 .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
3573 .input = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
3574 .ilen = 8,
3575 .result = { 0x0E, 0x09, 0x00, 0xC7, 0x3E, 0xF7, 0xED, 0x41 },
3576 .rlen = 8,
3577 }, {
3578 .key = { 0x11, 0x44, 0x77, 0xAA, 0xDD, 0x00, 0x33, 0x66 },
3579 .klen = 8,
3580 .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
3581 .input = { 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0 },
3582 .ilen = 8,
3583 .result = { 0xD8, 0xED, 0x78, 0x74, 0x77, 0xEC, 0x06, 0x80 },
3584 .rlen = 8,
3585 }, { /* From Arla */
3586 .key = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
3587 .klen = 8,
3588 .iv = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3589 .input = "The quick brown fox jumps over the lazy dogs.\0\0",
3590 .ilen = 48,
3591 .result = { 0x00, 0xf0, 0xe, 0x11, 0x75, 0xe6, 0x23, 0x82,
3592 0xee, 0xac, 0x98, 0x62, 0x44, 0x51, 0xe4, 0x84,
3593 0xc3, 0x59, 0xd8, 0xaa, 0x64, 0x60, 0xae, 0xf7,
3594 0xd2, 0xd9, 0x13, 0x79, 0x72, 0xa3, 0x45, 0x03,
3595 0x23, 0xb5, 0x62, 0xd7, 0x0c, 0xf5, 0x27, 0xd1,
3596 0xf8, 0x91, 0x3c, 0xac, 0x44, 0x22, 0x92, 0xef },
3597 .rlen = 48,
3598 }, {
3599 .key = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3600 .klen = 8,
3601 .iv = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
3602 .input = "The quick brown fox jumps over the lazy dogs.\0\0",
3603 .ilen = 48,
3604 .result = { 0xca, 0x90, 0xf5, 0x9d, 0xcb, 0xd4, 0xd2, 0x3c,
3605 0x01, 0x88, 0x7f, 0x3e, 0x31, 0x6e, 0x62, 0x9d,
3606 0xd8, 0xe0, 0x57, 0xa3, 0x06, 0x3a, 0x42, 0x58,
3607 0x2a, 0x28, 0xfe, 0x72, 0x52, 0x2f, 0xdd, 0xe0,
3608 0x19, 0x89, 0x09, 0x1c, 0x2a, 0x8e, 0x8c, 0x94,
3609 0xfc, 0xc7, 0x68, 0xe4, 0x88, 0xaa, 0xde, 0x0f },
3610 .rlen = 48,
3611 }, { /* split-page version */
3612 .key = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3613 .klen = 8,
3614 .iv = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
3615 .input = "The quick brown fox jumps over the lazy dogs.\0\0",
3616 .ilen = 48,
3617 .result = { 0xca, 0x90, 0xf5, 0x9d, 0xcb, 0xd4, 0xd2, 0x3c,
3618 0x01, 0x88, 0x7f, 0x3e, 0x31, 0x6e, 0x62, 0x9d,
3619 0xd8, 0xe0, 0x57, 0xa3, 0x06, 0x3a, 0x42, 0x58,
3620 0x2a, 0x28, 0xfe, 0x72, 0x52, 0x2f, 0xdd, 0xe0,
3621 0x19, 0x89, 0x09, 0x1c, 0x2a, 0x8e, 0x8c, 0x94,
3622 0xfc, 0xc7, 0x68, 0xe4, 0x88, 0xaa, 0xde, 0x0f },
3623 .rlen = 48,
3624 .np = 2,
3625 .tap = { 20, 28 },
3626 }
3627};
3628
3629static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
3630 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
3631 .key = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
3632 .klen = 8,
3633 .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
3634 .input = { 0x0E, 0x09, 0x00, 0xC7, 0x3E, 0xF7, 0xED, 0x41 },
3635 .ilen = 8,
3636 .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
3637 .rlen = 8,
3638 }, {
3639 .key = { 0x11, 0x44, 0x77, 0xAA, 0xDD, 0x00, 0x33, 0x66 },
3640 .klen = 8,
3641 .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
3642 .input = { 0xD8, 0xED, 0x78, 0x74, 0x77, 0xEC, 0x06, 0x80 },
3643 .ilen = 8,
3644 .result = { 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0 },
3645 .rlen = 8,
3646 }, { /* From Arla */
3647 .key = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
3648 .klen = 8,
3649 .iv = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3650 .input = { 0x00, 0xf0, 0xe, 0x11, 0x75, 0xe6, 0x23, 0x82,
3651 0xee, 0xac, 0x98, 0x62, 0x44, 0x51, 0xe4, 0x84,
3652 0xc3, 0x59, 0xd8, 0xaa, 0x64, 0x60, 0xae, 0xf7,
3653 0xd2, 0xd9, 0x13, 0x79, 0x72, 0xa3, 0x45, 0x03,
3654 0x23, 0xb5, 0x62, 0xd7, 0x0c, 0xf5, 0x27, 0xd1,
3655 0xf8, 0x91, 0x3c, 0xac, 0x44, 0x22, 0x92, 0xef },
3656 .ilen = 48,
3657 .result = "The quick brown fox jumps over the lazy dogs.\0\0",
3658 .rlen = 48,
3659 }, {
3660 .key = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3661 .klen = 8,
3662 .iv = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
3663 .input = { 0xca, 0x90, 0xf5, 0x9d, 0xcb, 0xd4, 0xd2, 0x3c,
3664 0x01, 0x88, 0x7f, 0x3e, 0x31, 0x6e, 0x62, 0x9d,
3665 0xd8, 0xe0, 0x57, 0xa3, 0x06, 0x3a, 0x42, 0x58,
3666 0x2a, 0x28, 0xfe, 0x72, 0x52, 0x2f, 0xdd, 0xe0,
3667 0x19, 0x89, 0x09, 0x1c, 0x2a, 0x8e, 0x8c, 0x94,
3668 0xfc, 0xc7, 0x68, 0xe4, 0x88, 0xaa, 0xde, 0x0f },
3669 .ilen = 48,
3670 .result = "The quick brown fox jumps over the lazy dogs.\0\0",
3671 .rlen = 48,
3672 }, { /* split-page version */
3673 .key = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3674 .klen = 8,
3675 .iv = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
3676 .input = { 0xca, 0x90, 0xf5, 0x9d, 0xcb, 0xd4, 0xd2, 0x3c,
3677 0x01, 0x88, 0x7f, 0x3e, 0x31, 0x6e, 0x62, 0x9d,
3678 0xd8, 0xe0, 0x57, 0xa3, 0x06, 0x3a, 0x42, 0x58,
3679 0x2a, 0x28, 0xfe, 0x72, 0x52, 0x2f, 0xdd, 0xe0,
3680 0x19, 0x89, 0x09, 0x1c, 0x2a, 0x8e, 0x8c, 0x94,
3681 0xfc, 0xc7, 0x68, 0xe4, 0x88, 0xaa, 0xde, 0x0f },
3682 .ilen = 48,
3683 .result = "The quick brown fox jumps over the lazy dogs.\0\0",
3684 .rlen = 48,
3685 .np = 2,
3686 .tap = { 20, 28 },
3687 }
3688};
3689
3690/*
3691 * CAMELLIA test vectors.
3692 */
3693#define CAMELLIA_ENC_TEST_VECTORS 3
3694#define CAMELLIA_DEC_TEST_VECTORS 3
3695#define CAMELLIA_CBC_ENC_TEST_VECTORS 2
3696#define CAMELLIA_CBC_DEC_TEST_VECTORS 2
3697
3698static struct cipher_testvec camellia_enc_tv_template[] = {
3699 {
3700 .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3701 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3702 .klen = 16,
3703 .input = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3704 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3705 .ilen = 16,
3706 .result = { 0x67, 0x67, 0x31, 0x38, 0x54, 0x96, 0x69, 0x73,
3707 0x08, 0x57, 0x06, 0x56, 0x48, 0xea, 0xbe, 0x43 },
3708 .rlen = 16,
3709 }, {
3710 .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3711 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10,
3712 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 },
3713 .klen = 24,
3714 .input = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3715 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3716 .ilen = 16,
3717 .result = { 0xb4, 0x99, 0x34, 0x01, 0xb3, 0xe9, 0x96, 0xf8,
3718 0x4e, 0xe5, 0xce, 0xe7, 0xd7, 0x9b, 0x09, 0xb9 },
3719 .rlen = 16,
3720 }, {
3721 .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3722 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10,
3723 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
3724 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff },
3725 .klen = 32,
3726 .input = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3727 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3728 .ilen = 16,
3729 .result = { 0x9a, 0xcc, 0x23, 0x7d, 0xff, 0x16, 0xd7, 0x6c,
3730 0x20, 0xef, 0x7c, 0x91, 0x9e, 0x3a, 0x75, 0x09 },
3731 .rlen = 16,
3732 },
3733};
3734
3735static struct cipher_testvec camellia_dec_tv_template[] = {
3736 {
3737 .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3738 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3739 .klen = 16,
3740 .input = { 0x67, 0x67, 0x31, 0x38, 0x54, 0x96, 0x69, 0x73,
3741 0x08, 0x57, 0x06, 0x56, 0x48, 0xea, 0xbe, 0x43 },
3742 .ilen = 16,
3743 .result = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3744 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3745 .rlen = 16,
3746 }, {
3747 .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3748 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10,
3749 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 },
3750 .klen = 24,
3751 .input = { 0xb4, 0x99, 0x34, 0x01, 0xb3, 0xe9, 0x96, 0xf8,
3752 0x4e, 0xe5, 0xce, 0xe7, 0xd7, 0x9b, 0x09, 0xb9 },
3753 .ilen = 16,
3754 .result = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3755 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3756 .rlen = 16,
3757 }, {
3758 .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3759 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10,
3760 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
3761 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff },
3762 .klen = 32,
3763 .input = { 0x9a, 0xcc, 0x23, 0x7d, 0xff, 0x16, 0xd7, 0x6c,
3764 0x20, 0xef, 0x7c, 0x91, 0x9e, 0x3a, 0x75, 0x09 },
3765 .ilen = 16,
3766 .result = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
3767 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
3768 .rlen = 16,
3769 },
3770};
3771
3772static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
3773 {
3774 .key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
3775 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 },
3776 .klen = 16,
3777 .iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
3778 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
3779 .input = { "Single block msg" },
3780 .ilen = 16,
3781 .result = { 0xea, 0x32, 0x12, 0x76, 0x3b, 0x50, 0x10, 0xe7,
3782 0x18, 0xf6, 0xfd, 0x5d, 0xf6, 0x8f, 0x13, 0x51 },
3783 .rlen = 16,
3784 }, {
3785 .key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
3786 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a },
3787 .klen = 16,
3788 .iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
3789 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
3790 .input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
3791 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
3792 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
3793 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
3794 .ilen = 32,
3795 .result = { 0xa5, 0xdf, 0x6e, 0x50, 0xda, 0x70, 0x6c, 0x01,
3796 0x4a, 0xab, 0xf3, 0xf2, 0xd6, 0xfc, 0x6c, 0xfd,
3797 0x19, 0xb4, 0x3e, 0x57, 0x1c, 0x02, 0x5e, 0xa0,
3798 0x15, 0x78, 0xe0, 0x5e, 0xf2, 0xcb, 0x87, 0x16 },
3799 .rlen = 32,
3800 },
3801};
3802
3803static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
3804 {
3805 .key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
3806 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 },
3807 .klen = 16,
3808 .iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
3809 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
3810 .input = { 0xea, 0x32, 0x12, 0x76, 0x3b, 0x50, 0x10, 0xe7,
3811 0x18, 0xf6, 0xfd, 0x5d, 0xf6, 0x8f, 0x13, 0x51 },
3812 .ilen = 16,
3813 .result = { "Single block msg" },
3814 .rlen = 16,
3815 }, {
3816 .key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
3817 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a },
3818 .klen = 16,
3819 .iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
3820 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
3821 .input = { 0xa5, 0xdf, 0x6e, 0x50, 0xda, 0x70, 0x6c, 0x01,
3822 0x4a, 0xab, 0xf3, 0xf2, 0xd6, 0xfc, 0x6c, 0xfd,
3823 0x19, 0xb4, 0x3e, 0x57, 0x1c, 0x02, 0x5e, 0xa0,
3824 0x15, 0x78, 0xe0, 0x5e, 0xf2, 0xcb, 0x87, 0x16 },
3825 .ilen = 32,
3826 .result = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
3827 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
3828 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
3829 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
3830 .rlen = 32,
3831 },
3832};
3833
3319/* 3834/*
3320 * Compression stuff. 3835 * Compression stuff.
3321 */ 3836 */
@@ -3769,4 +4284,25 @@ static struct hash_speed generic_hash_speed_template[] = {
3769 { .blen = 0, .plen = 0, } 4284 { .blen = 0, .plen = 0, }
3770}; 4285};
3771 4286
4287static struct cipher_speed camellia_speed_template[] = {
4288 { .klen = 16, .blen = 16, },
4289 { .klen = 16, .blen = 64, },
4290 { .klen = 16, .blen = 256, },
4291 { .klen = 16, .blen = 1024, },
4292 { .klen = 16, .blen = 8192, },
4293 { .klen = 24, .blen = 16, },
4294 { .klen = 24, .blen = 64, },
4295 { .klen = 24, .blen = 256, },
4296 { .klen = 24, .blen = 1024, },
4297 { .klen = 24, .blen = 8192, },
4298 { .klen = 32, .blen = 16, },
4299 { .klen = 32, .blen = 64, },
4300 { .klen = 32, .blen = 256, },
4301 { .klen = 32, .blen = 1024, },
4302 { .klen = 32, .blen = 8192, },
4303
4304 /* End marker */
4305 { .klen = 0, .blen = 0, }
4306};
4307
3772#endif /* _CRYPTO_TCRYPT_H */ 4308#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 9347eb6bcf69..53e8ccbf0f5f 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/crypto.h> 22#include <linux/crypto.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/hardirq.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/mm.h> 26#include <linux/mm.h>
26#include <linux/rtnetlink.h> 27#include <linux/rtnetlink.h>
@@ -47,7 +48,7 @@ static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
47 * +------------------------ 48 * +------------------------
48 */ 49 */
49struct crypto_xcbc_ctx { 50struct crypto_xcbc_ctx {
50 struct crypto_tfm *child; 51 struct crypto_cipher *child;
51 u8 *odds; 52 u8 *odds;
52 u8 *prev; 53 u8 *prev;
53 u8 *key; 54 u8 *key;
@@ -75,8 +76,7 @@ static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
75 if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen))) 76 if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen)))
76 return err; 77 return err;
77 78
78 ctx->child->__crt_alg->cra_cipher.cia_encrypt(ctx->child, key1, 79 crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts);
79 ctx->consts);
80 80
81 return crypto_cipher_setkey(ctx->child, key1, bs); 81 return crypto_cipher_setkey(ctx->child, key1, bs);
82} 82}
@@ -86,7 +86,7 @@ static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
86{ 86{
87 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); 87 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
88 88
89 if (keylen != crypto_tfm_alg_blocksize(ctx->child)) 89 if (keylen != crypto_cipher_blocksize(ctx->child))
90 return -EINVAL; 90 return -EINVAL;
91 91
92 ctx->keylen = keylen; 92 ctx->keylen = keylen;
@@ -108,13 +108,13 @@ static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
108 return 0; 108 return 0;
109} 109}
110 110
111static int crypto_xcbc_digest_update(struct hash_desc *pdesc, 111static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
112 struct scatterlist *sg, 112 struct scatterlist *sg,
113 unsigned int nbytes) 113 unsigned int nbytes)
114{ 114{
115 struct crypto_hash *parent = pdesc->tfm; 115 struct crypto_hash *parent = pdesc->tfm;
116 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); 116 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
117 struct crypto_tfm *tfm = ctx->child; 117 struct crypto_cipher *tfm = ctx->child;
118 int bs = crypto_hash_blocksize(parent); 118 int bs = crypto_hash_blocksize(parent);
119 unsigned int i = 0; 119 unsigned int i = 0;
120 120
@@ -142,7 +142,7 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
142 offset += len; 142 offset += len;
143 143
144 crypto_kunmap(p, 0); 144 crypto_kunmap(p, 0);
145 crypto_yield(tfm->crt_flags); 145 crypto_yield(pdesc->flags);
146 continue; 146 continue;
147 } 147 }
148 148
@@ -152,7 +152,7 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
152 p += bs - ctx->len; 152 p += bs - ctx->len;
153 153
154 ctx->xor(ctx->prev, ctx->odds, bs); 154 ctx->xor(ctx->prev, ctx->odds, bs);
155 tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev); 155 crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev);
156 156
157 /* clearing the length */ 157 /* clearing the length */
158 ctx->len = 0; 158 ctx->len = 0;
@@ -160,7 +160,8 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
160 /* encrypting the rest of data */ 160 /* encrypting the rest of data */
161 while (len > bs) { 161 while (len > bs) {
162 ctx->xor(ctx->prev, p, bs); 162 ctx->xor(ctx->prev, p, bs);
163 tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev); 163 crypto_cipher_encrypt_one(tfm, ctx->prev,
164 ctx->prev);
164 p += bs; 165 p += bs;
165 len -= bs; 166 len -= bs;
166 } 167 }
@@ -171,7 +172,7 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
171 ctx->len = len; 172 ctx->len = len;
172 } 173 }
173 crypto_kunmap(p, 0); 174 crypto_kunmap(p, 0);
174 crypto_yield(tfm->crt_flags); 175 crypto_yield(pdesc->flags);
175 slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset); 176 slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
176 offset = 0; 177 offset = 0;
177 pg++; 178 pg++;
@@ -183,11 +184,20 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
183 return 0; 184 return 0;
184} 185}
185 186
187static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
188 struct scatterlist *sg,
189 unsigned int nbytes)
190{
191 if (WARN_ON_ONCE(in_irq()))
192 return -EDEADLK;
193 return crypto_xcbc_digest_update2(pdesc, sg, nbytes);
194}
195
186static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) 196static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
187{ 197{
188 struct crypto_hash *parent = pdesc->tfm; 198 struct crypto_hash *parent = pdesc->tfm;
189 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); 199 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
190 struct crypto_tfm *tfm = ctx->child; 200 struct crypto_cipher *tfm = ctx->child;
191 int bs = crypto_hash_blocksize(parent); 201 int bs = crypto_hash_blocksize(parent);
192 int err = 0; 202 int err = 0;
193 203
@@ -197,13 +207,14 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
197 if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) 207 if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
198 return err; 208 return err;
199 209
200 tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key2, (const u8*)(ctx->consts+bs)); 210 crypto_cipher_encrypt_one(tfm, key2,
211 (u8 *)(ctx->consts + bs));
201 212
202 ctx->xor(ctx->prev, ctx->odds, bs); 213 ctx->xor(ctx->prev, ctx->odds, bs);
203 ctx->xor(ctx->prev, key2, bs); 214 ctx->xor(ctx->prev, key2, bs);
204 _crypto_xcbc_digest_setkey(parent, ctx); 215 _crypto_xcbc_digest_setkey(parent, ctx);
205 216
206 tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev); 217 crypto_cipher_encrypt_one(tfm, out, ctx->prev);
207 } else { 218 } else {
208 u8 key3[bs]; 219 u8 key3[bs];
209 unsigned int rlen; 220 unsigned int rlen;
@@ -218,14 +229,15 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
218 if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) 229 if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
219 return err; 230 return err;
220 231
221 tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key3, (const u8*)(ctx->consts+bs*2)); 232 crypto_cipher_encrypt_one(tfm, key3,
233 (u8 *)(ctx->consts + bs * 2));
222 234
223 ctx->xor(ctx->prev, ctx->odds, bs); 235 ctx->xor(ctx->prev, ctx->odds, bs);
224 ctx->xor(ctx->prev, key3, bs); 236 ctx->xor(ctx->prev, key3, bs);
225 237
226 _crypto_xcbc_digest_setkey(parent, ctx); 238 _crypto_xcbc_digest_setkey(parent, ctx);
227 239
228 tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev); 240 crypto_cipher_encrypt_one(tfm, out, ctx->prev);
229 } 241 }
230 242
231 return 0; 243 return 0;
@@ -234,21 +246,25 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
234static int crypto_xcbc_digest(struct hash_desc *pdesc, 246static int crypto_xcbc_digest(struct hash_desc *pdesc,
235 struct scatterlist *sg, unsigned int nbytes, u8 *out) 247 struct scatterlist *sg, unsigned int nbytes, u8 *out)
236{ 248{
249 if (WARN_ON_ONCE(in_irq()))
250 return -EDEADLK;
251
237 crypto_xcbc_digest_init(pdesc); 252 crypto_xcbc_digest_init(pdesc);
238 crypto_xcbc_digest_update(pdesc, sg, nbytes); 253 crypto_xcbc_digest_update2(pdesc, sg, nbytes);
239 return crypto_xcbc_digest_final(pdesc, out); 254 return crypto_xcbc_digest_final(pdesc, out);
240} 255}
241 256
242static int xcbc_init_tfm(struct crypto_tfm *tfm) 257static int xcbc_init_tfm(struct crypto_tfm *tfm)
243{ 258{
259 struct crypto_cipher *cipher;
244 struct crypto_instance *inst = (void *)tfm->__crt_alg; 260 struct crypto_instance *inst = (void *)tfm->__crt_alg;
245 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 261 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
246 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); 262 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
247 int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm)); 263 int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
248 264
249 tfm = crypto_spawn_tfm(spawn); 265 cipher = crypto_spawn_cipher(spawn);
250 if (IS_ERR(tfm)) 266 if (IS_ERR(cipher))
251 return PTR_ERR(tfm); 267 return PTR_ERR(cipher);
252 268
253 switch(bs) { 269 switch(bs) {
254 case 16: 270 case 16:
@@ -258,7 +274,7 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm)
258 return -EINVAL; 274 return -EINVAL;
259 } 275 }
260 276
261 ctx->child = crypto_cipher_cast(tfm); 277 ctx->child = cipher;
262 ctx->odds = (u8*)(ctx+1); 278 ctx->odds = (u8*)(ctx+1);
263 ctx->prev = ctx->odds + bs; 279 ctx->prev = ctx->odds + bs;
264 ctx->key = ctx->prev + bs; 280 ctx->key = ctx->prev + bs;
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 43a68398656f..31ea405f2eeb 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -457,7 +457,7 @@ static struct pci_driver geode_aes_driver = {
457static int __init 457static int __init
458geode_aes_init(void) 458geode_aes_init(void)
459{ 459{
460 return pci_module_init(&geode_aes_driver); 460 return pci_register_driver(&geode_aes_driver);
461} 461}
462 462
463static void __exit 463static void __exit
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 32923162179e..217a2eedee0a 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -184,7 +184,7 @@ static int tlb_initialize(struct bonding *bond)
184 184
185 spin_lock_init(&(bond_info->tx_hashtbl_lock)); 185 spin_lock_init(&(bond_info->tx_hashtbl_lock));
186 186
187 new_hashtbl = kmalloc(size, GFP_KERNEL); 187 new_hashtbl = kzalloc(size, GFP_KERNEL);
188 if (!new_hashtbl) { 188 if (!new_hashtbl) {
189 printk(KERN_ERR DRV_NAME 189 printk(KERN_ERR DRV_NAME
190 ": %s: Error: Failed to allocate TLB hash table\n", 190 ": %s: Error: Failed to allocate TLB hash table\n",
@@ -195,8 +195,6 @@ static int tlb_initialize(struct bonding *bond)
195 195
196 bond_info->tx_hashtbl = new_hashtbl; 196 bond_info->tx_hashtbl = new_hashtbl;
197 197
198 memset(bond_info->tx_hashtbl, 0, size);
199
200 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { 198 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
201 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1); 199 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
202 } 200 }
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d3801a00d3d5..8ce8fec615ba 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1343,14 +1343,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1343 "inaccurate.\n", bond_dev->name, slave_dev->name); 1343 "inaccurate.\n", bond_dev->name, slave_dev->name);
1344 } 1344 }
1345 1345
1346 new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL); 1346 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
1347 if (!new_slave) { 1347 if (!new_slave) {
1348 res = -ENOMEM; 1348 res = -ENOMEM;
1349 goto err_undo_flags; 1349 goto err_undo_flags;
1350 } 1350 }
1351 1351
1352 memset(new_slave, 0, sizeof(struct slave));
1353
1354 /* save slave's original flags before calling 1352 /* save slave's original flags before calling
1355 * netdev_set_master and dev_open 1353 * netdev_set_master and dev_open
1356 */ 1354 */
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index a0806d262fc6..2f4b1de7a2b4 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -1343,15 +1343,12 @@ static int __init slip_init(void)
1343 printk(KERN_INFO "SLIP linefill/keepalive option.\n"); 1343 printk(KERN_INFO "SLIP linefill/keepalive option.\n");
1344#endif 1344#endif
1345 1345
1346 slip_devs = kmalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL); 1346 slip_devs = kzalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL);
1347 if (!slip_devs) { 1347 if (!slip_devs) {
1348 printk(KERN_ERR "SLIP: Can't allocate slip devices array! Uaargh! (-> No SLIP available)\n"); 1348 printk(KERN_ERR "SLIP: Can't allocate slip devices array! Uaargh! (-> No SLIP available)\n");
1349 return -ENOMEM; 1349 return -ENOMEM;
1350 } 1350 }
1351 1351
1352 /* Clear the pointer array, we allocate devices when we need them */
1353 memset(slip_devs, 0, sizeof(struct net_device *)*slip_maxdev);
1354
1355 /* Fill in our line protocol discipline, and register it */ 1352 /* Fill in our line protocol discipline, and register it */
1356 if ((status = tty_register_ldisc(N_SLIP, &sl_ldisc)) != 0) { 1353 if ((status = tty_register_ldisc(N_SLIP, &sl_ldisc)) != 0) {
1357 printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status); 1354 printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 135c0987deae..e136bae61970 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3380,7 +3380,7 @@ next_pkt:
3380 } 3380 }
3381next_pkt_nopost: 3381next_pkt_nopost:
3382 sw_idx++; 3382 sw_idx++;
3383 sw_idx %= TG3_RX_RCB_RING_SIZE(tp); 3383 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3384 3384
3385 /* Refresh hw_idx to see if there is new work */ 3385 /* Refresh hw_idx to see if there is new work */
3386 if (sw_idx == hw_idx) { 3386 if (sw_idx == hw_idx) {
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index a138b1510093..3a1a958fb5f2 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Character device driver for reading z/VM *MONITOR service records. 4 * Character device driver for reading z/VM *MONITOR service records.
5 * 5 *
6 * Copyright (C) 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. 6 * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH.
7 * 7 *
8 * Author: Gerald Schaefer <geraldsc@de.ibm.com> 8 * Author: Gerald Schaefer <geraldsc@de.ibm.com>
9 */ 9 */
@@ -22,7 +22,7 @@
22#include <asm/ebcdic.h> 22#include <asm/ebcdic.h>
23#include <asm/extmem.h> 23#include <asm/extmem.h>
24#include <linux/poll.h> 24#include <linux/poll.h>
25#include "../net/iucv.h" 25#include <net/iucv/iucv.h>
26 26
27 27
28//#define MON_DEBUG /* Debug messages on/off */ 28//#define MON_DEBUG /* Debug messages on/off */
@@ -50,14 +50,13 @@ static char mon_dcss_name[9] = "MONDCSS\0";
50struct mon_msg { 50struct mon_msg {
51 u32 pos; 51 u32 pos;
52 u32 mca_offset; 52 u32 mca_offset;
53 iucv_MessagePending local_eib; 53 struct iucv_message msg;
54 char msglim_reached; 54 char msglim_reached;
55 char replied_msglim; 55 char replied_msglim;
56}; 56};
57 57
58struct mon_private { 58struct mon_private {
59 u16 pathid; 59 struct iucv_path *path;
60 iucv_handle_t iucv_handle;
61 struct mon_msg *msg_array[MON_MSGLIM]; 60 struct mon_msg *msg_array[MON_MSGLIM];
62 unsigned int write_index; 61 unsigned int write_index;
63 unsigned int read_index; 62 unsigned int read_index;
@@ -75,8 +74,6 @@ static unsigned long mon_dcss_end;
75static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue); 74static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
76static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue); 75static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
77 76
78static u8 iucv_host[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
79
80static u8 user_data_connect[16] = { 77static u8 user_data_connect[16] = {
81 /* Version code, must be 0x01 for shared mode */ 78 /* Version code, must be 0x01 for shared mode */
82 0x01, 79 0x01,
@@ -100,8 +97,7 @@ static u8 user_data_sever[16] = {
100 * Create the 8 bytes EBCDIC DCSS segment name from 97 * Create the 8 bytes EBCDIC DCSS segment name from
101 * an ASCII name, incl. padding 98 * an ASCII name, incl. padding
102 */ 99 */
103static inline void 100static inline void dcss_mkname(char *ascii_name, char *ebcdic_name)
104dcss_mkname(char *ascii_name, char *ebcdic_name)
105{ 101{
106 int i; 102 int i;
107 103
@@ -119,8 +115,7 @@ dcss_mkname(char *ascii_name, char *ebcdic_name)
119 * print appropriate error message for segment_load()/segment_type() 115 * print appropriate error message for segment_load()/segment_type()
120 * return code 116 * return code
121 */ 117 */
122static void 118static void mon_segment_warn(int rc, char* seg_name)
123mon_segment_warn(int rc, char* seg_name)
124{ 119{
125 switch (rc) { 120 switch (rc) {
126 case -ENOENT: 121 case -ENOENT:
@@ -166,44 +161,37 @@ mon_segment_warn(int rc, char* seg_name)
166 } 161 }
167} 162}
168 163
169static inline unsigned long 164static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
170mon_mca_start(struct mon_msg *monmsg)
171{ 165{
172 return monmsg->local_eib.ln1msg1.iprmmsg1_u32; 166 return *(u32 *) &monmsg->msg.rmmsg;
173} 167}
174 168
175static inline unsigned long 169static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
176mon_mca_end(struct mon_msg *monmsg)
177{ 170{
178 return monmsg->local_eib.ln1msg2.ipbfln1f; 171 return *(u32 *) &monmsg->msg.rmmsg[4];
179} 172}
180 173
181static inline u8 174static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
182mon_mca_type(struct mon_msg *monmsg, u8 index)
183{ 175{
184 return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index); 176 return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
185} 177}
186 178
187static inline u32 179static inline u32 mon_mca_size(struct mon_msg *monmsg)
188mon_mca_size(struct mon_msg *monmsg)
189{ 180{
190 return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1; 181 return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
191} 182}
192 183
193static inline u32 184static inline u32 mon_rec_start(struct mon_msg *monmsg)
194mon_rec_start(struct mon_msg *monmsg)
195{ 185{
196 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4)); 186 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
197} 187}
198 188
199static inline u32 189static inline u32 mon_rec_end(struct mon_msg *monmsg)
200mon_rec_end(struct mon_msg *monmsg)
201{ 190{
202 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); 191 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
203} 192}
204 193
205static inline int 194static inline int mon_check_mca(struct mon_msg *monmsg)
206mon_check_mca(struct mon_msg *monmsg)
207{ 195{
208 if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) || 196 if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
209 (mon_rec_start(monmsg) < mon_dcss_start) || 197 (mon_rec_start(monmsg) < mon_dcss_start) ||
@@ -221,20 +209,17 @@ mon_check_mca(struct mon_msg *monmsg)
221 return 0; 209 return 0;
222} 210}
223 211
224static inline int 212static inline int mon_send_reply(struct mon_msg *monmsg,
225mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv) 213 struct mon_private *monpriv)
226{ 214{
227 u8 prmmsg[8];
228 int rc; 215 int rc;
229 216
230 P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = " 217 P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
231 "0x%08X\n\n", 218 "0x%08X\n\n",
232 monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid, 219 monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
233 monmsg->local_eib.iptrgcls); 220
234 rc = iucv_reply_prmmsg(monmsg->local_eib.ippathid, 221 rc = iucv_message_reply(monpriv->path, &monmsg->msg,
235 monmsg->local_eib.ipmsgid, 222 IUCV_IPRMDATA, NULL, 0);
236 monmsg->local_eib.iptrgcls,
237 0, prmmsg);
238 atomic_dec(&monpriv->msglim_count); 223 atomic_dec(&monpriv->msglim_count);
239 if (likely(!monmsg->msglim_reached)) { 224 if (likely(!monmsg->msglim_reached)) {
240 monmsg->pos = 0; 225 monmsg->pos = 0;
@@ -251,10 +236,19 @@ mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv)
251 return 0; 236 return 0;
252} 237}
253 238
254static inline struct mon_private * 239static inline void mon_free_mem(struct mon_private *monpriv)
255mon_alloc_mem(void) 240{
241 int i;
242
243 for (i = 0; i < MON_MSGLIM; i++)
244 if (monpriv->msg_array[i])
245 kfree(monpriv->msg_array[i]);
246 kfree(monpriv);
247}
248
249static inline struct mon_private *mon_alloc_mem(void)
256{ 250{
257 int i,j; 251 int i;
258 struct mon_private *monpriv; 252 struct mon_private *monpriv;
259 253
260 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); 254 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
@@ -267,16 +261,15 @@ mon_alloc_mem(void)
267 GFP_KERNEL); 261 GFP_KERNEL);
268 if (!monpriv->msg_array[i]) { 262 if (!monpriv->msg_array[i]) {
269 P_ERROR("open, no memory for msg_array\n"); 263 P_ERROR("open, no memory for msg_array\n");
270 for (j = 0; j < i; j++) 264 mon_free_mem(monpriv);
271 kfree(monpriv->msg_array[j]);
272 return NULL; 265 return NULL;
273 } 266 }
274 } 267 }
275 return monpriv; 268 return monpriv;
276} 269}
277 270
278static inline void 271static inline void mon_read_debug(struct mon_msg *monmsg,
279mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) 272 struct mon_private *monpriv)
280{ 273{
281#ifdef MON_DEBUG 274#ifdef MON_DEBUG
282 u8 msg_type[2], mca_type; 275 u8 msg_type[2], mca_type;
@@ -284,7 +277,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
284 277
285 records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; 278 records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
286 279
287 memcpy(msg_type, &monmsg->local_eib.iptrgcls, 2); 280 memcpy(msg_type, &monmsg->msg.class, 2);
288 EBCASC(msg_type, 2); 281 EBCASC(msg_type, 2);
289 mca_type = mon_mca_type(monmsg, 0); 282 mca_type = mon_mca_type(monmsg, 0);
290 EBCASC(&mca_type, 1); 283 EBCASC(&mca_type, 1);
@@ -292,8 +285,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
292 P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", 285 P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
293 monpriv->read_index, monpriv->write_index); 286 monpriv->read_index, monpriv->write_index);
294 P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", 287 P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
295 monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid, 288 monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
296 monmsg->local_eib.iptrgcls);
297 P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", 289 P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
298 msg_type[0], msg_type[1], mca_type ? mca_type : 'X', 290 msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
299 mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); 291 mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
@@ -306,8 +298,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
306#endif 298#endif
307} 299}
308 300
309static inline void 301static inline void mon_next_mca(struct mon_msg *monmsg)
310mon_next_mca(struct mon_msg *monmsg)
311{ 302{
312 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) 303 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
313 return; 304 return;
@@ -316,8 +307,7 @@ mon_next_mca(struct mon_msg *monmsg)
316 monmsg->pos = 0; 307 monmsg->pos = 0;
317} 308}
318 309
319static inline struct mon_msg * 310static inline struct mon_msg *mon_next_message(struct mon_private *monpriv)
320mon_next_message(struct mon_private *monpriv)
321{ 311{
322 struct mon_msg *monmsg; 312 struct mon_msg *monmsg;
323 313
@@ -342,39 +332,37 @@ mon_next_message(struct mon_private *monpriv)
342/****************************************************************************** 332/******************************************************************************
343 * IUCV handler * 333 * IUCV handler *
344 *****************************************************************************/ 334 *****************************************************************************/
345static void 335static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
346mon_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data)
347{ 336{
348 struct mon_private *monpriv = (struct mon_private *) pgm_data; 337 struct mon_private *monpriv = path->private;
349 338
350 P_DEBUG("IUCV connection completed\n"); 339 P_DEBUG("IUCV connection completed\n");
351 P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " 340 P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
352 "0x%02X, Sample = 0x%02X\n", 341 "0x%02X, Sample = 0x%02X\n",
353 eib->ipuser[0], eib->ipuser[1], eib->ipuser[2]); 342 ipuser[0], ipuser[1], ipuser[2]);
354 atomic_set(&monpriv->iucv_connected, 1); 343 atomic_set(&monpriv->iucv_connected, 1);
355 wake_up(&mon_conn_wait_queue); 344 wake_up(&mon_conn_wait_queue);
356} 345}
357 346
358static void 347static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
359mon_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data)
360{ 348{
361 struct mon_private *monpriv = (struct mon_private *) pgm_data; 349 struct mon_private *monpriv = path->private;
362 350
363 P_ERROR("IUCV connection severed with rc = 0x%X\n", 351 P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]);
364 (u8) eib->ipuser[0]); 352 iucv_path_sever(path, NULL);
365 atomic_set(&monpriv->iucv_severed, 1); 353 atomic_set(&monpriv->iucv_severed, 1);
366 wake_up(&mon_conn_wait_queue); 354 wake_up(&mon_conn_wait_queue);
367 wake_up_interruptible(&mon_read_wait_queue); 355 wake_up_interruptible(&mon_read_wait_queue);
368} 356}
369 357
370static void 358static void mon_iucv_message_pending(struct iucv_path *path,
371mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data) 359 struct iucv_message *msg)
372{ 360{
373 struct mon_private *monpriv = (struct mon_private *) pgm_data; 361 struct mon_private *monpriv = path->private;
374 362
375 P_DEBUG("IUCV message pending\n"); 363 P_DEBUG("IUCV message pending\n");
376 memcpy(&monpriv->msg_array[monpriv->write_index]->local_eib, eib, 364 memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
377 sizeof(iucv_MessagePending)); 365 msg, sizeof(*msg));
378 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { 366 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
379 P_WARNING("IUCV message pending, message limit (%i) reached\n", 367 P_WARNING("IUCV message pending, message limit (%i) reached\n",
380 MON_MSGLIM); 368 MON_MSGLIM);
@@ -385,54 +373,45 @@ mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data)
385 wake_up_interruptible(&mon_read_wait_queue); 373 wake_up_interruptible(&mon_read_wait_queue);
386} 374}
387 375
388static iucv_interrupt_ops_t mon_iucvops = { 376static struct iucv_handler monreader_iucv_handler = {
389 .ConnectionComplete = mon_iucv_ConnectionComplete, 377 .path_complete = mon_iucv_path_complete,
390 .ConnectionSevered = mon_iucv_ConnectionSevered, 378 .path_severed = mon_iucv_path_severed,
391 .MessagePending = mon_iucv_MessagePending, 379 .message_pending = mon_iucv_message_pending,
392}; 380};
393 381
394/****************************************************************************** 382/******************************************************************************
395 * file operations * 383 * file operations *
396 *****************************************************************************/ 384 *****************************************************************************/
397static int 385static int mon_open(struct inode *inode, struct file *filp)
398mon_open(struct inode *inode, struct file *filp)
399{ 386{
400 int rc, i;
401 struct mon_private *monpriv; 387 struct mon_private *monpriv;
388 int rc;
402 389
403 /* 390 /*
404 * only one user allowed 391 * only one user allowed
405 */ 392 */
393 rc = -EBUSY;
406 if (test_and_set_bit(MON_IN_USE, &mon_in_use)) 394 if (test_and_set_bit(MON_IN_USE, &mon_in_use))
407 return -EBUSY; 395 goto out;
408 396
397 rc = -ENOMEM;
409 monpriv = mon_alloc_mem(); 398 monpriv = mon_alloc_mem();
410 if (!monpriv) 399 if (!monpriv)
411 return -ENOMEM; 400 goto out_use;
412 401
413 /* 402 /*
414 * Register with IUCV and connect to *MONITOR service 403 * Connect to *MONITOR service
415 */ 404 */
416 monpriv->iucv_handle = iucv_register_program("my_monreader ", 405 monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
417 MON_SERVICE, 406 if (!monpriv->path)
418 NULL, 407 goto out_priv;
419 &mon_iucvops, 408 rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
420 monpriv); 409 MON_SERVICE, NULL, user_data_connect, monpriv);
421 if (!monpriv->iucv_handle) {
422 P_ERROR("failed to register with iucv driver\n");
423 rc = -EIO;
424 goto out_error;
425 }
426 P_INFO("open, registered with IUCV\n");
427
428 rc = iucv_connect(&monpriv->pathid, MON_MSGLIM, user_data_connect,
429 MON_SERVICE, iucv_host, IPRMDATA, NULL, NULL,
430 monpriv->iucv_handle, NULL);
431 if (rc) { 410 if (rc) {
432 P_ERROR("iucv connection to *MONITOR failed with " 411 P_ERROR("iucv connection to *MONITOR failed with "
433 "IPUSER SEVER code = %i\n", rc); 412 "IPUSER SEVER code = %i\n", rc);
434 rc = -EIO; 413 rc = -EIO;
435 goto out_unregister; 414 goto out_path;
436 } 415 }
437 /* 416 /*
438 * Wait for connection confirmation 417 * Wait for connection confirmation
@@ -444,24 +423,23 @@ mon_open(struct inode *inode, struct file *filp)
444 atomic_set(&monpriv->iucv_severed, 0); 423 atomic_set(&monpriv->iucv_severed, 0);
445 atomic_set(&monpriv->iucv_connected, 0); 424 atomic_set(&monpriv->iucv_connected, 0);
446 rc = -EIO; 425 rc = -EIO;
447 goto out_unregister; 426 goto out_path;
448 } 427 }
449 P_INFO("open, established connection to *MONITOR service\n\n"); 428 P_INFO("open, established connection to *MONITOR service\n\n");
450 filp->private_data = monpriv; 429 filp->private_data = monpriv;
451 return nonseekable_open(inode, filp); 430 return nonseekable_open(inode, filp);
452 431
453out_unregister: 432out_path:
454 iucv_unregister_program(monpriv->iucv_handle); 433 kfree(monpriv->path);
455out_error: 434out_priv:
456 for (i = 0; i < MON_MSGLIM; i++) 435 mon_free_mem(monpriv);
457 kfree(monpriv->msg_array[i]); 436out_use:
458 kfree(monpriv);
459 clear_bit(MON_IN_USE, &mon_in_use); 437 clear_bit(MON_IN_USE, &mon_in_use);
438out:
460 return rc; 439 return rc;
461} 440}
462 441
463static int 442static int mon_close(struct inode *inode, struct file *filp)
464mon_close(struct inode *inode, struct file *filp)
465{ 443{
466 int rc, i; 444 int rc, i;
467 struct mon_private *monpriv = filp->private_data; 445 struct mon_private *monpriv = filp->private_data;
@@ -469,18 +447,12 @@ mon_close(struct inode *inode, struct file *filp)
469 /* 447 /*
470 * Close IUCV connection and unregister 448 * Close IUCV connection and unregister
471 */ 449 */
472 rc = iucv_sever(monpriv->pathid, user_data_sever); 450 rc = iucv_path_sever(monpriv->path, user_data_sever);
473 if (rc) 451 if (rc)
474 P_ERROR("close, iucv_sever failed with rc = %i\n", rc); 452 P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
475 else 453 else
476 P_INFO("close, terminated connection to *MONITOR service\n"); 454 P_INFO("close, terminated connection to *MONITOR service\n");
477 455
478 rc = iucv_unregister_program(monpriv->iucv_handle);
479 if (rc)
480 P_ERROR("close, iucv_unregister failed with rc = %i\n", rc);
481 else
482 P_INFO("close, unregistered with IUCV\n");
483
484 atomic_set(&monpriv->iucv_severed, 0); 456 atomic_set(&monpriv->iucv_severed, 0);
485 atomic_set(&monpriv->iucv_connected, 0); 457 atomic_set(&monpriv->iucv_connected, 0);
486 atomic_set(&monpriv->read_ready, 0); 458 atomic_set(&monpriv->read_ready, 0);
@@ -495,8 +467,8 @@ mon_close(struct inode *inode, struct file *filp)
495 return 0; 467 return 0;
496} 468}
497 469
498static ssize_t 470static ssize_t mon_read(struct file *filp, char __user *data,
499mon_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) 471 size_t count, loff_t *ppos)
500{ 472{
501 struct mon_private *monpriv = filp->private_data; 473 struct mon_private *monpriv = filp->private_data;
502 struct mon_msg *monmsg; 474 struct mon_msg *monmsg;
@@ -563,8 +535,7 @@ out_copy:
563 return count; 535 return count;
564} 536}
565 537
566static unsigned int 538static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
567mon_poll(struct file *filp, struct poll_table_struct *p)
568{ 539{
569 struct mon_private *monpriv = filp->private_data; 540 struct mon_private *monpriv = filp->private_data;
570 541
@@ -593,8 +564,7 @@ static struct miscdevice mon_dev = {
593/****************************************************************************** 564/******************************************************************************
594 * module init/exit * 565 * module init/exit *
595 *****************************************************************************/ 566 *****************************************************************************/
596static int __init 567static int __init mon_init(void)
597mon_init(void)
598{ 568{
599 int rc; 569 int rc;
600 570
@@ -603,22 +573,34 @@ mon_init(void)
603 return -ENODEV; 573 return -ENODEV;
604 } 574 }
605 575
576 /*
577 * Register with IUCV and connect to *MONITOR service
578 */
579 rc = iucv_register(&monreader_iucv_handler, 1);
580 if (rc) {
581 P_ERROR("failed to register with iucv driver\n");
582 return rc;
583 }
584 P_INFO("open, registered with IUCV\n");
585
606 rc = segment_type(mon_dcss_name); 586 rc = segment_type(mon_dcss_name);
607 if (rc < 0) { 587 if (rc < 0) {
608 mon_segment_warn(rc, mon_dcss_name); 588 mon_segment_warn(rc, mon_dcss_name);
609 return rc; 589 goto out_iucv;
610 } 590 }
611 if (rc != SEG_TYPE_SC) { 591 if (rc != SEG_TYPE_SC) {
612 P_ERROR("segment %s has unsupported type, should be SC\n", 592 P_ERROR("segment %s has unsupported type, should be SC\n",
613 mon_dcss_name); 593 mon_dcss_name);
614 return -EINVAL; 594 rc = -EINVAL;
595 goto out_iucv;
615 } 596 }
616 597
617 rc = segment_load(mon_dcss_name, SEGMENT_SHARED, 598 rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
618 &mon_dcss_start, &mon_dcss_end); 599 &mon_dcss_start, &mon_dcss_end);
619 if (rc < 0) { 600 if (rc < 0) {
620 mon_segment_warn(rc, mon_dcss_name); 601 mon_segment_warn(rc, mon_dcss_name);
621 return -EINVAL; 602 rc = -EINVAL;
603 goto out_iucv;
622 } 604 }
623 dcss_mkname(mon_dcss_name, &user_data_connect[8]); 605 dcss_mkname(mon_dcss_name, &user_data_connect[8]);
624 606
@@ -634,14 +616,16 @@ mon_init(void)
634 616
635out: 617out:
636 segment_unload(mon_dcss_name); 618 segment_unload(mon_dcss_name);
619out_iucv:
620 iucv_unregister(&monreader_iucv_handler, 1);
637 return rc; 621 return rc;
638} 622}
639 623
640static void __exit 624static void __exit mon_exit(void)
641mon_exit(void)
642{ 625{
643 segment_unload(mon_dcss_name); 626 segment_unload(mon_dcss_name);
644 WARN_ON(misc_deregister(&mon_dev) != 0); 627 WARN_ON(misc_deregister(&mon_dev) != 0);
628 iucv_unregister(&monreader_iucv_handler, 1);
645 return; 629 return;
646} 630}
647 631
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 4f894dc2373b..8432a76b961e 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -3,7 +3,7 @@
3 * character device driver for reading z/VM system service records 3 * character device driver for reading z/VM system service records
4 * 4 *
5 * 5 *
6 * Copyright (C) 2004 IBM Corporation 6 * Copyright 2004 IBM Corporation
7 * character device driver for reading z/VM system service records, 7 * character device driver for reading z/VM system service records,
8 * Version 1.0 8 * Version 1.0
9 * Author(s): Xenia Tkatschow <xenia@us.ibm.com> 9 * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
@@ -21,7 +21,7 @@
21#include <asm/cpcmd.h> 21#include <asm/cpcmd.h>
22#include <asm/debug.h> 22#include <asm/debug.h>
23#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
24#include "../net/iucv.h" 24#include <net/iucv/iucv.h>
25#include <linux/kmod.h> 25#include <linux/kmod.h>
26#include <linux/cdev.h> 26#include <linux/cdev.h>
27#include <linux/device.h> 27#include <linux/device.h>
@@ -60,12 +60,11 @@ struct vmlogrdr_priv_t {
60 char system_service[8]; 60 char system_service[8];
61 char internal_name[8]; 61 char internal_name[8];
62 char recording_name[8]; 62 char recording_name[8];
63 u16 pathid; 63 struct iucv_path *path;
64 int connection_established; 64 int connection_established;
65 int iucv_path_severed; 65 int iucv_path_severed;
66 iucv_MessagePending local_interrupt_buffer; 66 struct iucv_message local_interrupt_buffer;
67 atomic_t receive_ready; 67 atomic_t receive_ready;
68 iucv_handle_t iucv_handle;
69 int minor_num; 68 int minor_num;
70 char * buffer; 69 char * buffer;
71 char * current_position; 70 char * current_position;
@@ -97,37 +96,19 @@ static struct file_operations vmlogrdr_fops = {
97}; 96};
98 97
99 98
100static u8 iucvMagic[16] = { 99static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
101 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 100static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
102 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 101static void vmlogrdr_iucv_message_pending(struct iucv_path *,
103}; 102 struct iucv_message *);
104 103
105 104
106static u8 mask[] = { 105static struct iucv_handler vmlogrdr_iucv_handler = {
107 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 106 .path_complete = vmlogrdr_iucv_path_complete,
108 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 107 .path_severed = vmlogrdr_iucv_path_severed,
109 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 108 .message_pending = vmlogrdr_iucv_message_pending,
110 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
111}; 109};
112 110
113 111
114static u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
115
116
117static void
118vmlogrdr_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data);
119static void
120vmlogrdr_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data);
121static void
122vmlogrdr_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data);
123
124
125static iucv_interrupt_ops_t vmlogrdr_iucvops = {
126 .ConnectionComplete = vmlogrdr_iucv_ConnectionComplete,
127 .ConnectionSevered = vmlogrdr_iucv_ConnectionSevered,
128 .MessagePending = vmlogrdr_iucv_MessagePending,
129};
130
131static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); 112static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
132static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); 113static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
133 114
@@ -176,28 +157,29 @@ static struct cdev *vmlogrdr_cdev = NULL;
176static int recording_class_AB; 157static int recording_class_AB;
177 158
178 159
179static void 160static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
180vmlogrdr_iucv_ConnectionComplete (iucv_ConnectionComplete * eib,
181 void * pgm_data)
182{ 161{
183 struct vmlogrdr_priv_t * logptr = pgm_data; 162 struct vmlogrdr_priv_t * logptr = path->private;
163
184 spin_lock(&logptr->priv_lock); 164 spin_lock(&logptr->priv_lock);
185 logptr->connection_established = 1; 165 logptr->connection_established = 1;
186 spin_unlock(&logptr->priv_lock); 166 spin_unlock(&logptr->priv_lock);
187 wake_up(&conn_wait_queue); 167 wake_up(&conn_wait_queue);
188 return;
189} 168}
190 169
191 170
192static void 171static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
193vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data)
194{ 172{
195 u8 reason = (u8) eib->ipuser[8]; 173 struct vmlogrdr_priv_t * logptr = path->private;
196 struct vmlogrdr_priv_t * logptr = pgm_data; 174 u8 reason = (u8) ipuser[8];
197 175
198 printk (KERN_ERR "vmlogrdr: connection severed with" 176 printk (KERN_ERR "vmlogrdr: connection severed with"
199 " reason %i\n", reason); 177 " reason %i\n", reason);
200 178
179 iucv_path_sever(path, NULL);
180 kfree(path);
181 logptr->path = NULL;
182
201 spin_lock(&logptr->priv_lock); 183 spin_lock(&logptr->priv_lock);
202 logptr->connection_established = 0; 184 logptr->connection_established = 0;
203 logptr->iucv_path_severed = 1; 185 logptr->iucv_path_severed = 1;
@@ -209,10 +191,10 @@ vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data)
209} 191}
210 192
211 193
212static void 194static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
213vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data) 195 struct iucv_message *msg)
214{ 196{
215 struct vmlogrdr_priv_t * logptr = pgm_data; 197 struct vmlogrdr_priv_t * logptr = path->private;
216 198
217 /* 199 /*
218 * This function is the bottom half so it should be quick. 200 * This function is the bottom half so it should be quick.
@@ -220,15 +202,15 @@ vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data)
220 * the usage count 202 * the usage count
221 */ 203 */
222 spin_lock(&logptr->priv_lock); 204 spin_lock(&logptr->priv_lock);
223 memcpy(&(logptr->local_interrupt_buffer), eib, sizeof(*eib)); 205 memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
224 atomic_inc(&logptr->receive_ready); 206 atomic_inc(&logptr->receive_ready);
225 spin_unlock(&logptr->priv_lock); 207 spin_unlock(&logptr->priv_lock);
226 wake_up_interruptible(&read_wait_queue); 208 wake_up_interruptible(&read_wait_queue);
227} 209}
228 210
229 211
230static int 212static int vmlogrdr_get_recording_class_AB(void)
231vmlogrdr_get_recording_class_AB(void) { 213{
232 char cp_command[]="QUERY COMMAND RECORDING "; 214 char cp_command[]="QUERY COMMAND RECORDING ";
233 char cp_response[80]; 215 char cp_response[80];
234 char *tail; 216 char *tail;
@@ -258,8 +240,9 @@ vmlogrdr_get_recording_class_AB(void) {
258} 240}
259 241
260 242
261static int 243static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
262vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) { 244 int action, int purge)
245{
263 246
264 char cp_command[80]; 247 char cp_command[80];
265 char cp_response[160]; 248 char cp_response[160];
@@ -317,8 +300,7 @@ vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) {
317} 300}
318 301
319 302
320static int 303static int vmlogrdr_open (struct inode *inode, struct file *filp)
321vmlogrdr_open (struct inode *inode, struct file *filp)
322{ 304{
323 int dev_num = 0; 305 int dev_num = 0;
324 struct vmlogrdr_priv_t * logptr = NULL; 306 struct vmlogrdr_priv_t * logptr = NULL;
@@ -328,10 +310,7 @@ vmlogrdr_open (struct inode *inode, struct file *filp)
328 dev_num = iminor(inode); 310 dev_num = iminor(inode);
329 if (dev_num > MAXMINOR) 311 if (dev_num > MAXMINOR)
330 return -ENODEV; 312 return -ENODEV;
331
332 logptr = &sys_ser[dev_num]; 313 logptr = &sys_ser[dev_num];
333 if (logptr == NULL)
334 return -ENODEV;
335 314
336 /* 315 /*
337 * only allow for blocking reads to be open 316 * only allow for blocking reads to be open
@@ -344,52 +323,38 @@ vmlogrdr_open (struct inode *inode, struct file *filp)
344 if (logptr->dev_in_use) { 323 if (logptr->dev_in_use) {
345 spin_unlock_bh(&logptr->priv_lock); 324 spin_unlock_bh(&logptr->priv_lock);
346 return -EBUSY; 325 return -EBUSY;
347 } else {
348 logptr->dev_in_use = 1;
349 spin_unlock_bh(&logptr->priv_lock);
350 } 326 }
351 327 logptr->dev_in_use = 1;
328 logptr->connection_established = 0;
329 logptr->iucv_path_severed = 0;
352 atomic_set(&logptr->receive_ready, 0); 330 atomic_set(&logptr->receive_ready, 0);
353 logptr->buffer_free = 1; 331 logptr->buffer_free = 1;
332 spin_unlock_bh(&logptr->priv_lock);
354 333
355 /* set the file options */ 334 /* set the file options */
356 filp->private_data = logptr; 335 filp->private_data = logptr;
357 filp->f_op = &vmlogrdr_fops; 336 filp->f_op = &vmlogrdr_fops;
358 337
359 /* start recording for this service*/ 338 /* start recording for this service*/
360 ret=0; 339 if (logptr->autorecording) {
361 if (logptr->autorecording)
362 ret = vmlogrdr_recording(logptr,1,logptr->autopurge); 340 ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
363 if (ret) 341 if (ret)
364 printk (KERN_WARNING "vmlogrdr: failed to start " 342 printk (KERN_WARNING "vmlogrdr: failed to start "
365 "recording automatically\n"); 343 "recording automatically\n");
366
367 /* Register with iucv driver */
368 logptr->iucv_handle = iucv_register_program(iucvMagic,
369 logptr->system_service, mask, &vmlogrdr_iucvops,
370 logptr);
371
372 if (logptr->iucv_handle == NULL) {
373 printk (KERN_ERR "vmlogrdr: failed to register with"
374 "iucv driver\n");
375 goto not_registered;
376 } 344 }
377 345
378 /* create connection to the system service */ 346 /* create connection to the system service */
379 spin_lock_bh(&logptr->priv_lock); 347 logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
380 logptr->connection_established = 0; 348 if (!logptr->path)
381 logptr->iucv_path_severed = 0; 349 goto out_dev;
382 spin_unlock_bh(&logptr->priv_lock); 350 connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
383 351 logptr->system_service, NULL, NULL,
384 connect_rc = iucv_connect (&(logptr->pathid), 10, iucvMagic, 352 logptr);
385 logptr->system_service, iucv_host, 0,
386 NULL, NULL,
387 logptr->iucv_handle, NULL);
388 if (connect_rc) { 353 if (connect_rc) {
389 printk (KERN_ERR "vmlogrdr: iucv connection to %s " 354 printk (KERN_ERR "vmlogrdr: iucv connection to %s "
390 "failed with rc %i \n", logptr->system_service, 355 "failed with rc %i \n", logptr->system_service,
391 connect_rc); 356 connect_rc);
392 goto not_connected; 357 goto out_path;
393 } 358 }
394 359
395 /* We've issued the connect and now we must wait for a 360 /* We've issued the connect and now we must wait for a
@@ -398,35 +363,28 @@ vmlogrdr_open (struct inode *inode, struct file *filp)
398 */ 363 */
399 wait_event(conn_wait_queue, (logptr->connection_established) 364 wait_event(conn_wait_queue, (logptr->connection_established)
400 || (logptr->iucv_path_severed)); 365 || (logptr->iucv_path_severed));
401 if (logptr->iucv_path_severed) { 366 if (logptr->iucv_path_severed)
402 goto not_connected; 367 goto out_record;
403 }
404
405 return nonseekable_open(inode, filp); 368 return nonseekable_open(inode, filp);
406 369
407not_connected: 370out_record:
408 iucv_unregister_program(logptr->iucv_handle);
409 logptr->iucv_handle = NULL;
410not_registered:
411 if (logptr->autorecording) 371 if (logptr->autorecording)
412 vmlogrdr_recording(logptr,0,logptr->autopurge); 372 vmlogrdr_recording(logptr,0,logptr->autopurge);
373out_path:
374 kfree(logptr->path); /* kfree(NULL) is ok. */
375 logptr->path = NULL;
376out_dev:
413 logptr->dev_in_use = 0; 377 logptr->dev_in_use = 0;
414 return -EIO; 378 return -EIO;
415
416
417} 379}
418 380
419 381
420static int 382static int vmlogrdr_release (struct inode *inode, struct file *filp)
421vmlogrdr_release (struct inode *inode, struct file *filp)
422{ 383{
423 int ret; 384 int ret;
424 385
425 struct vmlogrdr_priv_t * logptr = filp->private_data; 386 struct vmlogrdr_priv_t * logptr = filp->private_data;
426 387
427 iucv_unregister_program(logptr->iucv_handle);
428 logptr->iucv_handle = NULL;
429
430 if (logptr->autorecording) { 388 if (logptr->autorecording) {
431 ret = vmlogrdr_recording(logptr,0,logptr->autopurge); 389 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
432 if (ret) 390 if (ret)
@@ -439,8 +397,8 @@ vmlogrdr_release (struct inode *inode, struct file *filp)
439} 397}
440 398
441 399
442static int 400static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
443vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { 401{
444 int rc, *temp; 402 int rc, *temp;
445 /* we need to keep track of two data sizes here: 403 /* we need to keep track of two data sizes here:
446 * The number of bytes we need to receive from iucv and 404 * The number of bytes we need to receive from iucv and
@@ -461,8 +419,7 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
461 * We need to return the total length of the record 419 * We need to return the total length of the record
462 * + size of FENCE in the first 4 bytes of the buffer. 420 * + size of FENCE in the first 4 bytes of the buffer.
463 */ 421 */
464 iucv_data_count = 422 iucv_data_count = priv->local_interrupt_buffer.length;
465 priv->local_interrupt_buffer.ln1msg2.ipbfln1f;
466 user_data_count = sizeof(int); 423 user_data_count = sizeof(int);
467 temp = (int*)priv->buffer; 424 temp = (int*)priv->buffer;
468 *temp= iucv_data_count + sizeof(FENCE); 425 *temp= iucv_data_count + sizeof(FENCE);
@@ -474,14 +431,10 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
474 */ 431 */
475 if (iucv_data_count > NET_BUFFER_SIZE) 432 if (iucv_data_count > NET_BUFFER_SIZE)
476 iucv_data_count = NET_BUFFER_SIZE; 433 iucv_data_count = NET_BUFFER_SIZE;
477 rc = iucv_receive(priv->pathid, 434 rc = iucv_message_receive(priv->path,
478 priv->local_interrupt_buffer.ipmsgid, 435 &priv->local_interrupt_buffer,
479 priv->local_interrupt_buffer.iptrgcls, 436 0, buffer, iucv_data_count,
480 buffer, 437 &priv->residual_length);
481 iucv_data_count,
482 NULL,
483 NULL,
484 &priv->residual_length);
485 spin_unlock_bh(&priv->priv_lock); 438 spin_unlock_bh(&priv->priv_lock);
486 /* An rc of 5 indicates that the record was bigger then 439 /* An rc of 5 indicates that the record was bigger then
487 * the buffer, which is OK for us. A 9 indicates that the 440 * the buffer, which is OK for us. A 9 indicates that the
@@ -513,8 +466,8 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
513} 466}
514 467
515 468
516static ssize_t 469static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
517vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos) 470 size_t count, loff_t * ppos)
518{ 471{
519 int rc; 472 int rc;
520 struct vmlogrdr_priv_t * priv = filp->private_data; 473 struct vmlogrdr_priv_t * priv = filp->private_data;
@@ -546,8 +499,10 @@ vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos)
546 return count; 499 return count;
547} 500}
548 501
549static ssize_t 502static ssize_t vmlogrdr_autopurge_store(struct device * dev,
550vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { 503 struct device_attribute *attr,
504 const char * buf, size_t count)
505{
551 struct vmlogrdr_priv_t *priv = dev->driver_data; 506 struct vmlogrdr_priv_t *priv = dev->driver_data;
552 ssize_t ret = count; 507 ssize_t ret = count;
553 508
@@ -565,8 +520,10 @@ vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, con
565} 520}
566 521
567 522
568static ssize_t 523static ssize_t vmlogrdr_autopurge_show(struct device *dev,
569vmlogrdr_autopurge_show(struct device *dev, struct device_attribute *attr, char *buf) { 524 struct device_attribute *attr,
525 char *buf)
526{
570 struct vmlogrdr_priv_t *priv = dev->driver_data; 527 struct vmlogrdr_priv_t *priv = dev->driver_data;
571 return sprintf(buf, "%u\n", priv->autopurge); 528 return sprintf(buf, "%u\n", priv->autopurge);
572} 529}
@@ -576,8 +533,10 @@ static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
576 vmlogrdr_autopurge_store); 533 vmlogrdr_autopurge_store);
577 534
578 535
579static ssize_t 536static ssize_t vmlogrdr_purge_store(struct device * dev,
580vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { 537 struct device_attribute *attr,
538 const char * buf, size_t count)
539{
581 540
582 char cp_command[80]; 541 char cp_command[80];
583 char cp_response[80]; 542 char cp_response[80];
@@ -617,9 +576,10 @@ vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const c
617static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store); 576static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
618 577
619 578
620static ssize_t 579static ssize_t vmlogrdr_autorecording_store(struct device *dev,
621vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr, const char *buf, 580 struct device_attribute *attr,
622 size_t count) { 581 const char *buf, size_t count)
582{
623 struct vmlogrdr_priv_t *priv = dev->driver_data; 583 struct vmlogrdr_priv_t *priv = dev->driver_data;
624 ssize_t ret = count; 584 ssize_t ret = count;
625 585
@@ -637,8 +597,10 @@ vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr,
637} 597}
638 598
639 599
640static ssize_t 600static ssize_t vmlogrdr_autorecording_show(struct device *dev,
641vmlogrdr_autorecording_show(struct device *dev, struct device_attribute *attr, char *buf) { 601 struct device_attribute *attr,
602 char *buf)
603{
642 struct vmlogrdr_priv_t *priv = dev->driver_data; 604 struct vmlogrdr_priv_t *priv = dev->driver_data;
643 return sprintf(buf, "%u\n", priv->autorecording); 605 return sprintf(buf, "%u\n", priv->autorecording);
644} 606}
@@ -648,9 +610,10 @@ static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
648 vmlogrdr_autorecording_store); 610 vmlogrdr_autorecording_store);
649 611
650 612
651static ssize_t 613static ssize_t vmlogrdr_recording_store(struct device * dev,
652vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { 614 struct device_attribute *attr,
653 615 const char * buf, size_t count)
616{
654 struct vmlogrdr_priv_t *priv = dev->driver_data; 617 struct vmlogrdr_priv_t *priv = dev->driver_data;
655 ssize_t ret; 618 ssize_t ret;
656 619
@@ -675,8 +638,9 @@ vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, con
675static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store); 638static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
676 639
677 640
678static ssize_t 641static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
679vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) { 642 char *buf)
643{
680 644
681 char cp_command[] = "QUERY RECORDING "; 645 char cp_command[] = "QUERY RECORDING ";
682 int len; 646 int len;
@@ -709,52 +673,63 @@ static struct device_driver vmlogrdr_driver = {
709}; 673};
710 674
711 675
712static int 676static int vmlogrdr_register_driver(void)
713vmlogrdr_register_driver(void) { 677{
714 int ret; 678 int ret;
715 679
680 /* Register with iucv driver */
681 ret = iucv_register(&vmlogrdr_iucv_handler, 1);
682 if (ret) {
683 printk (KERN_ERR "vmlogrdr: failed to register with"
684 "iucv driver\n");
685 goto out;
686 }
687
716 ret = driver_register(&vmlogrdr_driver); 688 ret = driver_register(&vmlogrdr_driver);
717 if (ret) { 689 if (ret) {
718 printk(KERN_ERR "vmlogrdr: failed to register driver.\n"); 690 printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
719 return ret; 691 goto out_iucv;
720 } 692 }
721 693
722 ret = driver_create_file(&vmlogrdr_driver, 694 ret = driver_create_file(&vmlogrdr_driver,
723 &driver_attr_recording_status); 695 &driver_attr_recording_status);
724 if (ret) { 696 if (ret) {
725 printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n"); 697 printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
726 goto unregdriver; 698 goto out_driver;
727 } 699 }
728 700
729 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); 701 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
730 if (IS_ERR(vmlogrdr_class)) { 702 if (IS_ERR(vmlogrdr_class)) {
731 printk(KERN_ERR "vmlogrdr: failed to create class.\n"); 703 printk(KERN_ERR "vmlogrdr: failed to create class.\n");
732 ret=PTR_ERR(vmlogrdr_class); 704 ret = PTR_ERR(vmlogrdr_class);
733 vmlogrdr_class=NULL; 705 vmlogrdr_class = NULL;
734 goto unregattr; 706 goto out_attr;
735 } 707 }
736 return 0; 708 return 0;
737 709
738unregattr: 710out_attr:
739 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); 711 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
740unregdriver: 712out_driver:
741 driver_unregister(&vmlogrdr_driver); 713 driver_unregister(&vmlogrdr_driver);
714out_iucv:
715 iucv_unregister(&vmlogrdr_iucv_handler, 1);
716out:
742 return ret; 717 return ret;
743} 718}
744 719
745 720
746static void 721static void vmlogrdr_unregister_driver(void)
747vmlogrdr_unregister_driver(void) { 722{
748 class_destroy(vmlogrdr_class); 723 class_destroy(vmlogrdr_class);
749 vmlogrdr_class = NULL; 724 vmlogrdr_class = NULL;
750 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); 725 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
751 driver_unregister(&vmlogrdr_driver); 726 driver_unregister(&vmlogrdr_driver);
752 return; 727 iucv_unregister(&vmlogrdr_iucv_handler, 1);
753} 728}
754 729
755 730
756static int 731static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
757vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { 732{
758 struct device *dev; 733 struct device *dev;
759 int ret; 734 int ret;
760 735
@@ -803,9 +778,10 @@ vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) {
803} 778}
804 779
805 780
806static int 781static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
807vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) { 782{
808 class_device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); 783 class_device_destroy(vmlogrdr_class,
784 MKDEV(vmlogrdr_major, priv->minor_num));
809 if (priv->device != NULL) { 785 if (priv->device != NULL) {
810 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group); 786 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
811 device_unregister(priv->device); 787 device_unregister(priv->device);
@@ -815,8 +791,8 @@ vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) {
815} 791}
816 792
817 793
818static int 794static int vmlogrdr_register_cdev(dev_t dev)
819vmlogrdr_register_cdev(dev_t dev) { 795{
820 int rc = 0; 796 int rc = 0;
821 vmlogrdr_cdev = cdev_alloc(); 797 vmlogrdr_cdev = cdev_alloc();
822 if (!vmlogrdr_cdev) { 798 if (!vmlogrdr_cdev) {
@@ -836,9 +812,10 @@ vmlogrdr_register_cdev(dev_t dev) {
836} 812}
837 813
838 814
839static void 815static void vmlogrdr_cleanup(void)
840vmlogrdr_cleanup(void) { 816{
841 int i; 817 int i;
818
842 if (vmlogrdr_cdev) { 819 if (vmlogrdr_cdev) {
843 cdev_del(vmlogrdr_cdev); 820 cdev_del(vmlogrdr_cdev);
844 vmlogrdr_cdev=NULL; 821 vmlogrdr_cdev=NULL;
@@ -855,8 +832,7 @@ vmlogrdr_cleanup(void) {
855} 832}
856 833
857 834
858static int 835static int vmlogrdr_init(void)
859vmlogrdr_init(void)
860{ 836{
861 int rc; 837 int rc;
862 int i; 838 int i;
@@ -906,8 +882,7 @@ cleanup:
906} 882}
907 883
908 884
909static void 885static void vmlogrdr_exit(void)
910vmlogrdr_exit(void)
911{ 886{
912 vmlogrdr_cleanup(); 887 vmlogrdr_cleanup();
913 printk (KERN_INFO "vmlogrdr: driver unloaded\n"); 888 printk (KERN_INFO "vmlogrdr: driver unloaded\n");
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 52625153a4f0..f98fa465df0a 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -22,13 +22,6 @@ config CTC
22 available. This option is also available as a module which will be 22 available. This option is also available as a module which will be
23 called ctc.ko. If you do not know what it is, it's safe to say "Y". 23 called ctc.ko. If you do not know what it is, it's safe to say "Y".
24 24
25config IUCV
26 tristate "IUCV support (VM only)"
27 help
28 Select this option if you want to use inter-user communication
29 under VM or VIF. If unsure, say "Y" to enable a fast communication
30 link between VM guests.
31
32config NETIUCV 25config NETIUCV
33 tristate "IUCV network device support (VM only)" 26 tristate "IUCV network device support (VM only)"
34 depends on IUCV && NETDEVICES 27 depends on IUCV && NETDEVICES
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 4777e36a922f..bbe3ab2e93d9 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -4,7 +4,6 @@
4 4
5ctc-objs := ctcmain.o ctcdbug.o 5ctc-objs := ctcmain.o ctcdbug.o
6 6
7obj-$(CONFIG_IUCV) += iucv.o
8obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o 7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
9obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
10obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o 9obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
deleted file mode 100644
index 229aeb5fc399..000000000000
--- a/drivers/s390/net/iucv.c
+++ /dev/null
@@ -1,2540 +0,0 @@
1/*
2 * IUCV network driver
3 *
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s):
6 * Original source:
7 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
8 * Xenia Tkatschow (xenia@us.ibm.com)
9 * 2Gb awareness and general cleanup:
10 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
11 *
12 * Documentation used:
13 * The original source
14 * CP Programming Service, IBM document # SC24-5760
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2, or (at your option)
19 * any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 *
30 */
31
32/* #define DEBUG */
33
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36
37#include <linux/spinlock.h>
38#include <linux/kernel.h>
39#include <linux/slab.h>
40#include <linux/init.h>
41#include <linux/interrupt.h>
42#include <linux/list.h>
43#include <linux/errno.h>
44#include <linux/err.h>
45#include <linux/device.h>
46#include <asm/atomic.h>
47#include "iucv.h"
48#include <asm/io.h>
49#include <asm/s390_ext.h>
50#include <asm/ebcdic.h>
51#include <asm/smp.h>
52#include <asm/s390_rdev.h>
53
54/* FLAGS:
55 * All flags are defined in the field IPFLAGS1 of each function
56 * and can be found in CP Programming Services.
57 * IPSRCCLS - Indicates you have specified a source class
58 * IPFGMCL - Indicates you have specified a target class
59 * IPFGPID - Indicates you have specified a pathid
60 * IPFGMID - Indicates you have specified a message ID
61 * IPANSLST - Indicates that you are using an address list for
62 * reply data
63 * IPBUFLST - Indicates that you are using an address list for
64 * message data
65 */
66
67#define IPSRCCLS 0x01
68#define IPFGMCL 0x01
69#define IPFGPID 0x02
70#define IPFGMID 0x04
71#define IPANSLST 0x08
72#define IPBUFLST 0x40
73
74static int
75iucv_bus_match (struct device *dev, struct device_driver *drv)
76{
77 return 0;
78}
79
80struct bus_type iucv_bus = {
81 .name = "iucv",
82 .match = iucv_bus_match,
83};
84
85struct device *iucv_root;
86
87/* General IUCV interrupt structure */
88typedef struct {
89 __u16 ippathid;
90 __u8 res1;
91 __u8 iptype;
92 __u32 res2;
93 __u8 ipvmid[8];
94 __u8 res3[24];
95} iucv_GeneralInterrupt;
96
97static iucv_GeneralInterrupt *iucv_external_int_buffer = NULL;
98
99/* Spin Lock declaration */
100
101static DEFINE_SPINLOCK(iucv_lock);
102
103static int messagesDisabled = 0;
104
105/***************INTERRUPT HANDLING ***************/
106
107typedef struct {
108 struct list_head queue;
109 iucv_GeneralInterrupt data;
110} iucv_irqdata;
111
112static struct list_head iucv_irq_queue;
113static DEFINE_SPINLOCK(iucv_irq_queue_lock);
114
115/*
116 *Internal function prototypes
117 */
118static void iucv_tasklet_handler(unsigned long);
119static void iucv_irq_handler(__u16);
120
121static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0);
122
123/************ FUNCTION ID'S ****************************/
124
125#define ACCEPT 10
126#define CONNECT 11
127#define DECLARE_BUFFER 12
128#define PURGE 9
129#define QUERY 0
130#define QUIESCE 13
131#define RECEIVE 5
132#define REJECT 8
133#define REPLY 6
134#define RESUME 14
135#define RETRIEVE_BUFFER 2
136#define SEND 4
137#define SETMASK 16
138#define SEVER 15
139
140/**
141 * Structure: handler
142 * members: list - list management.
143 * structure: id
144 * userid - 8 char array of machine identification
145 * user_data - 16 char array for user identification
146 * mask - 24 char array used to compare the 2 previous
147 * interrupt_table - vector of interrupt functions.
148 * pgm_data - ulong, application data that is passed
149 * to the interrupt handlers
150*/
151typedef struct handler_t {
152 struct list_head list;
153 struct {
154 __u8 userid[8];
155 __u8 user_data[16];
156 __u8 mask[24];
157 } id;
158 iucv_interrupt_ops_t *interrupt_table;
159 void *pgm_data;
160} handler;
161
162/**
163 * iucv_handler_table: List of registered handlers.
164 */
165static struct list_head iucv_handler_table;
166
167/**
168 * iucv_pathid_table: an array of *handler pointing into
169 * iucv_handler_table for fast indexing by pathid;
170 */
171static handler **iucv_pathid_table;
172
173static unsigned long max_connections;
174
175/**
176 * iucv_cpuid: contains the logical cpu number of the cpu which
177 * has declared the iucv buffer by issuing DECLARE_BUFFER.
178 * If no cpu has done the initialization iucv_cpuid contains -1.
179 */
180static int iucv_cpuid = -1;
181/**
182 * register_flag: is 0 when external interrupt has not been registered
183 */
184static int register_flag;
185
186/****************FIVE 40-BYTE PARAMETER STRUCTURES******************/
187/* Data struct 1: iparml_control
188 * Used for iucv_accept
189 * iucv_connect
190 * iucv_quiesce
191 * iucv_resume
192 * iucv_sever
193 * iucv_retrieve_buffer
194 * Data struct 2: iparml_dpl (data in parameter list)
195 * Used for iucv_send_prmmsg
196 * iucv_send2way_prmmsg
197 * iucv_send2way_prmmsg_array
198 * iucv_reply_prmmsg
199 * Data struct 3: iparml_db (data in a buffer)
200 * Used for iucv_receive
201 * iucv_receive_array
202 * iucv_reject
203 * iucv_reply
204 * iucv_reply_array
205 * iucv_send
206 * iucv_send_array
207 * iucv_send2way
208 * iucv_send2way_array
209 * iucv_declare_buffer
210 * Data struct 4: iparml_purge
211 * Used for iucv_purge
212 * iucv_query
213 * Data struct 5: iparml_set_mask
214 * Used for iucv_set_mask
215 */
216
217typedef struct {
218 __u16 ippathid;
219 __u8 ipflags1;
220 __u8 iprcode;
221 __u16 ipmsglim;
222 __u16 res1;
223 __u8 ipvmid[8];
224 __u8 ipuser[16];
225 __u8 iptarget[8];
226} iparml_control;
227
228typedef struct {
229 __u16 ippathid;
230 __u8 ipflags1;
231 __u8 iprcode;
232 __u32 ipmsgid;
233 __u32 iptrgcls;
234 __u8 iprmmsg[8];
235 __u32 ipsrccls;
236 __u32 ipmsgtag;
237 __u32 ipbfadr2;
238 __u32 ipbfln2f;
239 __u32 res;
240} iparml_dpl;
241
242typedef struct {
243 __u16 ippathid;
244 __u8 ipflags1;
245 __u8 iprcode;
246 __u32 ipmsgid;
247 __u32 iptrgcls;
248 __u32 ipbfadr1;
249 __u32 ipbfln1f;
250 __u32 ipsrccls;
251 __u32 ipmsgtag;
252 __u32 ipbfadr2;
253 __u32 ipbfln2f;
254 __u32 res;
255} iparml_db;
256
257typedef struct {
258 __u16 ippathid;
259 __u8 ipflags1;
260 __u8 iprcode;
261 __u32 ipmsgid;
262 __u8 ipaudit[3];
263 __u8 res1[5];
264 __u32 res2;
265 __u32 ipsrccls;
266 __u32 ipmsgtag;
267 __u32 res3[3];
268} iparml_purge;
269
270typedef struct {
271 __u8 ipmask;
272 __u8 res1[2];
273 __u8 iprcode;
274 __u32 res2[9];
275} iparml_set_mask;
276
277typedef struct {
278 union {
279 iparml_control p_ctrl;
280 iparml_dpl p_dpl;
281 iparml_db p_db;
282 iparml_purge p_purge;
283 iparml_set_mask p_set_mask;
284 } param;
285 atomic_t in_use;
286 __u32 res;
287} __attribute__ ((aligned(8))) iucv_param;
288#define PARAM_POOL_SIZE (PAGE_SIZE / sizeof(iucv_param))
289
290static iucv_param * iucv_param_pool;
291
292MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
293MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
294MODULE_LICENSE("GPL");
295
296/*
297 * Debugging stuff
298 *******************************************************************************/
299
300
301#ifdef DEBUG
302static int debuglevel = 0;
303
304module_param(debuglevel, int, 0);
305MODULE_PARM_DESC(debuglevel,
306 "Specifies the debug level (0=off ... 3=all)");
307
308static void
309iucv_dumpit(char *title, void *buf, int len)
310{
311 int i;
312 __u8 *p = (__u8 *)buf;
313
314 if (debuglevel < 3)
315 return;
316
317 printk(KERN_DEBUG "%s\n", title);
318 printk(" ");
319 for (i = 0; i < len; i++) {
320 if (!(i % 16) && i != 0)
321 printk ("\n ");
322 else if (!(i % 4) && i != 0)
323 printk(" ");
324 printk("%02X", *p++);
325 }
326 if (len % 16)
327 printk ("\n");
328 return;
329}
330#define iucv_debug(lvl, fmt, args...) \
331do { \
332 if (debuglevel >= lvl) \
333 printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__ , ## args); \
334} while (0)
335
336#else
337
338#define iucv_debug(lvl, fmt, args...) do { } while (0)
339#define iucv_dumpit(title, buf, len) do { } while (0)
340
341#endif
342
343/*
344 * Internal functions
345 *******************************************************************************/
346
347/**
348 * print start banner
349 */
350static void
351iucv_banner(void)
352{
353 printk(KERN_INFO "IUCV lowlevel driver initialized\n");
354}
355
356/**
357 * iucv_init - Initialization
358 *
359 * Allocates and initializes various data structures.
360 */
361static int
362iucv_init(void)
363{
364 int ret;
365
366 if (iucv_external_int_buffer)
367 return 0;
368
369 if (!MACHINE_IS_VM) {
370 printk(KERN_ERR "IUCV: IUCV connection needs VM as base\n");
371 return -EPROTONOSUPPORT;
372 }
373
374 ret = bus_register(&iucv_bus);
375 if (ret) {
376 printk(KERN_ERR "IUCV: failed to register bus.\n");
377 return ret;
378 }
379
380 iucv_root = s390_root_dev_register("iucv");
381 if (IS_ERR(iucv_root)) {
382 printk(KERN_ERR "IUCV: failed to register iucv root.\n");
383 bus_unregister(&iucv_bus);
384 return PTR_ERR(iucv_root);
385 }
386
387 /* Note: GFP_DMA used used to get memory below 2G */
388 iucv_external_int_buffer = kzalloc(sizeof(iucv_GeneralInterrupt),
389 GFP_KERNEL|GFP_DMA);
390 if (!iucv_external_int_buffer) {
391 printk(KERN_WARNING
392 "%s: Could not allocate external interrupt buffer\n",
393 __FUNCTION__);
394 s390_root_dev_unregister(iucv_root);
395 bus_unregister(&iucv_bus);
396 return -ENOMEM;
397 }
398
399 /* Initialize parameter pool */
400 iucv_param_pool = kzalloc(sizeof(iucv_param) * PARAM_POOL_SIZE,
401 GFP_KERNEL|GFP_DMA);
402 if (!iucv_param_pool) {
403 printk(KERN_WARNING "%s: Could not allocate param pool\n",
404 __FUNCTION__);
405 kfree(iucv_external_int_buffer);
406 iucv_external_int_buffer = NULL;
407 s390_root_dev_unregister(iucv_root);
408 bus_unregister(&iucv_bus);
409 return -ENOMEM;
410 }
411
412 /* Initialize irq queue */
413 INIT_LIST_HEAD(&iucv_irq_queue);
414
415 /* Initialize handler table */
416 INIT_LIST_HEAD(&iucv_handler_table);
417
418 iucv_banner();
419 return 0;
420}
421
422/**
423 * iucv_exit - De-Initialization
424 *
425 * Frees everything allocated from iucv_init.
426 */
427static int iucv_retrieve_buffer (void);
428
429static void
430iucv_exit(void)
431{
432 iucv_retrieve_buffer();
433 kfree(iucv_external_int_buffer);
434 iucv_external_int_buffer = NULL;
435 kfree(iucv_param_pool);
436 iucv_param_pool = NULL;
437 s390_root_dev_unregister(iucv_root);
438 bus_unregister(&iucv_bus);
439 printk(KERN_INFO "IUCV lowlevel driver unloaded\n");
440}
441
442/**
443 * grab_param: - Get a parameter buffer from the pre-allocated pool.
444 *
445 * This function searches for an unused element in the pre-allocated pool
446 * of parameter buffers. If one is found, it marks it "in use" and returns
447 * a pointer to it. The calling function is responsible for releasing it
448 * when it has finished its usage.
449 *
450 * Returns: A pointer to iucv_param.
451 */
452static __inline__ iucv_param *
453grab_param(void)
454{
455 iucv_param *ptr;
456 static int hint = 0;
457
458 ptr = iucv_param_pool + hint;
459 do {
460 ptr++;
461 if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
462 ptr = iucv_param_pool;
463 } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0);
464 hint = ptr - iucv_param_pool;
465
466 memset(&ptr->param, 0, sizeof(ptr->param));
467 return ptr;
468}
469
470/**
471 * release_param - Release a parameter buffer.
472 * @p: A pointer to a struct iucv_param, previously obtained by calling
473 * grab_param().
474 *
475 * This function marks the specified parameter buffer "unused".
476 */
477static __inline__ void
478release_param(void *p)
479{
480 atomic_set(&((iucv_param *)p)->in_use, 0);
481}
482
483/**
484 * iucv_add_handler: - Add a new handler
485 * @new_handler: handle that is being entered into chain.
486 *
487 * Places new handle on iucv_handler_table, if identical handler is not
488 * found.
489 *
490 * Returns: 0 on success, !0 on failure (handler already in chain).
491 */
492static int
493iucv_add_handler (handler *new)
494{
495 ulong flags;
496
497 iucv_debug(1, "entering");
498 iucv_dumpit("handler:", new, sizeof(handler));
499
500 spin_lock_irqsave (&iucv_lock, flags);
501 if (!list_empty(&iucv_handler_table)) {
502 struct list_head *lh;
503
504 /**
505 * Search list for handler with identical id. If one
506 * is found, the new handler is _not_ added.
507 */
508 list_for_each(lh, &iucv_handler_table) {
509 handler *h = list_entry(lh, handler, list);
510 if (!memcmp(&new->id, &h->id, sizeof(h->id))) {
511 iucv_debug(1, "ret 1");
512 spin_unlock_irqrestore (&iucv_lock, flags);
513 return 1;
514 }
515 }
516 }
517 /**
518 * If we get here, no handler was found.
519 */
520 INIT_LIST_HEAD(&new->list);
521 list_add(&new->list, &iucv_handler_table);
522 spin_unlock_irqrestore (&iucv_lock, flags);
523
524 iucv_debug(1, "exiting");
525 return 0;
526}
527
528/**
529 * b2f0:
530 * @code: identifier of IUCV call to CP.
531 * @parm: pointer to 40 byte iparml area passed to CP
532 *
533 * Calls CP to execute IUCV commands.
534 *
535 * Returns: return code from CP's IUCV call
536 */
537static inline ulong b2f0(__u32 code, void *parm)
538{
539 register unsigned long reg0 asm ("0");
540 register unsigned long reg1 asm ("1");
541 iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param));
542
543 reg0 = code;
544 reg1 = virt_to_phys(parm);
545 asm volatile(".long 0xb2f01000" : : "d" (reg0), "a" (reg1));
546
547 iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param));
548
549 return (unsigned long)*((__u8 *)(parm + 3));
550}
551
552/*
553 * Name: iucv_add_pathid
554 * Purpose: Adds a path id to the system.
555 * Input: pathid - pathid that is going to be entered into system
556 * handle - address of handler that the pathid will be associated
557 * with.
558 * pgm_data - token passed in by application.
559 * Output: 0: successful addition of pathid
560 * - EINVAL - pathid entry is being used by another application
561 * - ENOMEM - storage allocation for a new pathid table failed
562*/
563static int
564__iucv_add_pathid(__u16 pathid, handler *handler)
565{
566
567 iucv_debug(1, "entering");
568
569 iucv_debug(1, "handler is pointing to %p", handler);
570
571 if (pathid > (max_connections - 1))
572 return -EINVAL;
573
574 if (iucv_pathid_table[pathid]) {
575 iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]);
576 printk(KERN_WARNING
577 "%s: Pathid being used, error.\n", __FUNCTION__);
578 return -EINVAL;
579 }
580 iucv_pathid_table[pathid] = handler;
581
582 iucv_debug(1, "exiting");
583 return 0;
584} /* end of add_pathid function */
585
586static int
587iucv_add_pathid(__u16 pathid, handler *handler)
588{
589 ulong flags;
590 int rc;
591
592 spin_lock_irqsave (&iucv_lock, flags);
593 rc = __iucv_add_pathid(pathid, handler);
594 spin_unlock_irqrestore (&iucv_lock, flags);
595 return rc;
596}
597
598static void
599iucv_remove_pathid(__u16 pathid)
600{
601 ulong flags;
602
603 if (pathid > (max_connections - 1))
604 return;
605
606 spin_lock_irqsave (&iucv_lock, flags);
607 iucv_pathid_table[pathid] = NULL;
608 spin_unlock_irqrestore (&iucv_lock, flags);
609}
610
611/**
612 * iucv_declare_buffer_cpuid
613 * Register at VM for subsequent IUCV operations. This is executed
614 * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer().
615 */
616static void
617iucv_declare_buffer_cpuid (void *result)
618{
619 iparml_db *parm;
620
621 parm = (iparml_db *)grab_param();
622 parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer);
623 if ((*((ulong *)result) = b2f0(DECLARE_BUFFER, parm)) == 1)
624 *((ulong *)result) = parm->iprcode;
625 release_param(parm);
626}
627
628/**
629 * iucv_retrieve_buffer_cpuid:
630 * Unregister IUCV usage at VM. This is always executed on the same
631 * cpu that registered the buffer to VM.
632 * Called from iucv_retrieve_buffer().
633 */
634static void
635iucv_retrieve_buffer_cpuid (void *cpu)
636{
637 iparml_control *parm;
638
639 parm = (iparml_control *)grab_param();
640 b2f0(RETRIEVE_BUFFER, parm);
641 release_param(parm);
642}
643
644/**
645 * Name: iucv_declare_buffer
646 * Purpose: Specifies the guests real address of an external
647 * interrupt.
648 * Input: void
649 * Output: iprcode - return code from b2f0 call
650 */
651static int
652iucv_declare_buffer (void)
653{
654 unsigned long flags;
655 ulong b2f0_result;
656
657 iucv_debug(1, "entering");
658 b2f0_result = -ENODEV;
659 spin_lock_irqsave (&iucv_lock, flags);
660 if (iucv_cpuid == -1) {
661 /* Reserve any cpu for use by iucv. */
662 iucv_cpuid = smp_get_cpu(CPU_MASK_ALL);
663 spin_unlock_irqrestore (&iucv_lock, flags);
664 smp_call_function_on(iucv_declare_buffer_cpuid,
665 &b2f0_result, 0, 1, iucv_cpuid);
666 if (b2f0_result) {
667 smp_put_cpu(iucv_cpuid);
668 iucv_cpuid = -1;
669 }
670 iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
671 } else {
672 spin_unlock_irqrestore (&iucv_lock, flags);
673 b2f0_result = 0;
674 }
675 iucv_debug(1, "exiting");
676 return b2f0_result;
677}
678
679/**
680 * iucv_retrieve_buffer:
681 *
682 * Terminates all use of IUCV.
683 * Returns: return code from CP
684 */
685static int
686iucv_retrieve_buffer (void)
687{
688 iucv_debug(1, "entering");
689 if (iucv_cpuid != -1) {
690 smp_call_function_on(iucv_retrieve_buffer_cpuid,
691 NULL, 0, 1, iucv_cpuid);
692 /* Release the cpu reserved by iucv_declare_buffer. */
693 smp_put_cpu(iucv_cpuid);
694 iucv_cpuid = -1;
695 }
696 iucv_debug(1, "exiting");
697 return 0;
698}
699
700/**
701 * iucv_remove_handler:
702 * @users_handler: handler to be removed
703 *
704 * Remove handler when application unregisters.
705 */
706static void
707iucv_remove_handler(handler *handler)
708{
709 unsigned long flags;
710
711 if ((!iucv_pathid_table) || (!handler))
712 return;
713
714 iucv_debug(1, "entering");
715
716 spin_lock_irqsave (&iucv_lock, flags);
717 list_del(&handler->list);
718 if (list_empty(&iucv_handler_table)) {
719 if (register_flag) {
720 unregister_external_interrupt(0x4000, iucv_irq_handler);
721 register_flag = 0;
722 }
723 }
724 spin_unlock_irqrestore (&iucv_lock, flags);
725
726 iucv_debug(1, "exiting");
727 return;
728}
729
730/**
731 * iucv_register_program:
732 * @pgmname: user identification
733 * @userid: machine identification
734 * @pgmmask: Indicates which bits in the pgmname and userid combined will be
735 * used to determine who is given control.
736 * @ops: Address of interrupt handler table.
737 * @pgm_data: Application data to be passed to interrupt handlers.
738 *
739 * Registers an application with IUCV.
740 * Returns:
741 * The address of handler, or NULL on failure.
742 * NOTE on pgmmask:
743 * If pgmname, userid and pgmmask are provided, pgmmask is entered into the
744 * handler as is.
745 * If pgmmask is NULL, the internal mask is set to all 0xff's
746 * When userid is NULL, the first 8 bytes of the internal mask are forced
747 * to 0x00.
748 * If pgmmask and userid are NULL, the first 8 bytes of the internal mask
749 * are forced to 0x00 and the last 16 bytes to 0xff.
750 */
751
752iucv_handle_t
753iucv_register_program (__u8 pgmname[16],
754 __u8 userid[8],
755 __u8 pgmmask[24],
756 iucv_interrupt_ops_t * ops, void *pgm_data)
757{
758 ulong rc = 0; /* return code from function calls */
759 handler *new_handler;
760
761 iucv_debug(1, "entering");
762
763 if (ops == NULL) {
764 /* interrupt table is not defined */
765 printk(KERN_WARNING "%s: Interrupt table is not defined, "
766 "exiting\n", __FUNCTION__);
767 return NULL;
768 }
769 if (!pgmname) {
770 printk(KERN_WARNING "%s: pgmname not provided\n", __FUNCTION__);
771 return NULL;
772 }
773
774 /* Allocate handler entry */
775 new_handler = kmalloc(sizeof(handler), GFP_ATOMIC);
776 if (new_handler == NULL) {
777 printk(KERN_WARNING "%s: storage allocation for new handler "
778 "failed.\n", __FUNCTION__);
779 return NULL;
780 }
781
782 if (!iucv_pathid_table) {
783 if (iucv_init()) {
784 kfree(new_handler);
785 return NULL;
786 }
787
788 max_connections = iucv_query_maxconn();
789 iucv_pathid_table = kcalloc(max_connections, sizeof(handler *),
790 GFP_ATOMIC);
791 if (iucv_pathid_table == NULL) {
792 printk(KERN_WARNING "%s: iucv_pathid_table storage "
793 "allocation failed\n", __FUNCTION__);
794 kfree(new_handler);
795 return NULL;
796 }
797 }
798 memset(new_handler, 0, sizeof (handler));
799 memcpy(new_handler->id.user_data, pgmname,
800 sizeof (new_handler->id.user_data));
801 if (userid) {
802 memcpy (new_handler->id.userid, userid,
803 sizeof (new_handler->id.userid));
804 ASCEBC (new_handler->id.userid,
805 sizeof (new_handler->id.userid));
806 EBC_TOUPPER (new_handler->id.userid,
807 sizeof (new_handler->id.userid));
808
809 if (pgmmask) {
810 memcpy (new_handler->id.mask, pgmmask,
811 sizeof (new_handler->id.mask));
812 } else {
813 memset (new_handler->id.mask, 0xFF,
814 sizeof (new_handler->id.mask));
815 }
816 } else {
817 if (pgmmask) {
818 memcpy (new_handler->id.mask, pgmmask,
819 sizeof (new_handler->id.mask));
820 } else {
821 memset (new_handler->id.mask, 0xFF,
822 sizeof (new_handler->id.mask));
823 }
824 memset (new_handler->id.userid, 0x00,
825 sizeof (new_handler->id.userid));
826 }
827 /* fill in the rest of handler */
828 new_handler->pgm_data = pgm_data;
829 new_handler->interrupt_table = ops;
830
831 /*
832 * Check if someone else is registered with same pgmname, userid
833 * and mask. If someone is already registered with same pgmname,
834 * userid and mask, registration will fail and NULL will be returned
835 * to the application.
836 * If identical handler not found, then handler is added to list.
837 */
838 rc = iucv_add_handler(new_handler);
839 if (rc) {
840 printk(KERN_WARNING "%s: Someone already registered with same "
841 "pgmname, userid, pgmmask\n", __FUNCTION__);
842 kfree (new_handler);
843 return NULL;
844 }
845
846 rc = iucv_declare_buffer();
847 if (rc) {
848 char *err = "Unknown";
849 iucv_remove_handler(new_handler);
850 kfree(new_handler);
851 switch(rc) {
852 case 0x03:
853 err = "Directory error";
854 break;
855 case 0x0a:
856 err = "Invalid length";
857 break;
858 case 0x13:
859 err = "Buffer already exists";
860 break;
861 case 0x3e:
862 err = "Buffer overlap";
863 break;
864 case 0x5c:
865 err = "Paging or storage error";
866 break;
867 }
868 printk(KERN_WARNING "%s: iucv_declare_buffer "
869 "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err);
870 return NULL;
871 }
872 if (!register_flag) {
873 /* request the 0x4000 external interrupt */
874 rc = register_external_interrupt (0x4000, iucv_irq_handler);
875 if (rc) {
876 iucv_remove_handler(new_handler);
877 kfree (new_handler);
878 printk(KERN_WARNING "%s: "
879 "register_external_interrupt returned %ld\n",
880 __FUNCTION__, rc);
881 return NULL;
882
883 }
884 register_flag = 1;
885 }
886 iucv_debug(1, "exiting");
887 return new_handler;
888} /* end of register function */
889
890/**
891 * iucv_unregister_program:
892 * @handle: address of handler
893 *
894 * Unregister application with IUCV.
895 * Returns:
896 * 0 on success, -EINVAL, if specified handle is invalid.
897 */
898
899int
900iucv_unregister_program (iucv_handle_t handle)
901{
902 handler *h = NULL;
903 struct list_head *lh;
904 int i;
905 ulong flags;
906
907 iucv_debug(1, "entering");
908 iucv_debug(1, "address of handler is %p", h);
909
910 /* Checking if handle is valid */
911 spin_lock_irqsave (&iucv_lock, flags);
912 list_for_each(lh, &iucv_handler_table) {
913 if ((handler *)handle == list_entry(lh, handler, list)) {
914 h = (handler *)handle;
915 break;
916 }
917 }
918 if (!h) {
919 spin_unlock_irqrestore (&iucv_lock, flags);
920 if (handle)
921 printk(KERN_WARNING
922 "%s: Handler not found in iucv_handler_table.\n",
923 __FUNCTION__);
924 else
925 printk(KERN_WARNING
926 "%s: NULL handle passed by application.\n",
927 __FUNCTION__);
928 return -EINVAL;
929 }
930
931 /**
932 * First, walk thru iucv_pathid_table and sever any pathid which is
933 * still pointing to the handler to be removed.
934 */
935 for (i = 0; i < max_connections; i++)
936 if (iucv_pathid_table[i] == h) {
937 spin_unlock_irqrestore (&iucv_lock, flags);
938 iucv_sever(i, h->id.user_data);
939 spin_lock_irqsave(&iucv_lock, flags);
940 }
941 spin_unlock_irqrestore (&iucv_lock, flags);
942
943 iucv_remove_handler(h);
944 kfree(h);
945
946 iucv_debug(1, "exiting");
947 return 0;
948}
949
950/**
951 * iucv_accept:
952 * @pathid: Path identification number
953 * @msglim_reqstd: The number of outstanding messages requested.
954 * @user_data: Data specified by the iucv_connect function.
955 * @flags1: Contains options for this path.
956 * - IPPRTY (0x20) Specifies if you want to send priority message.
957 * - IPRMDATA (0x80) Specifies whether your program can handle a message
958 * in the parameter list.
959 * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
960 * established.
961 * @handle: Address of handler.
962 * @pgm_data: Application data passed to interrupt handlers.
963 * @flags1_out: Pointer to an int. If not NULL, on return the options for
964 * the path are stored at the given location:
965 * - IPPRTY (0x20) Indicates you may send a priority message.
966 * @msglim: Pointer to an __u16. If not NULL, on return the maximum
967 * number of outstanding messages is stored at the given
968 * location.
969 *
970 * This function is issued after the user receives a Connection Pending external
971 * interrupt and now wishes to complete the IUCV communication path.
972 * Returns:
973 * return code from CP
974 */
975int
976iucv_accept(__u16 pathid, __u16 msglim_reqstd,
977 __u8 user_data[16], int flags1,
978 iucv_handle_t handle, void *pgm_data,
979 int *flags1_out, __u16 * msglim)
980{
981 ulong b2f0_result = 0;
982 ulong flags;
983 struct list_head *lh;
984 handler *h = NULL;
985 iparml_control *parm;
986
987 iucv_debug(1, "entering");
988 iucv_debug(1, "pathid = %d", pathid);
989
990 /* Checking if handle is valid */
991 spin_lock_irqsave (&iucv_lock, flags);
992 list_for_each(lh, &iucv_handler_table) {
993 if ((handler *)handle == list_entry(lh, handler, list)) {
994 h = (handler *)handle;
995 break;
996 }
997 }
998 spin_unlock_irqrestore (&iucv_lock, flags);
999
1000 if (!h) {
1001 if (handle)
1002 printk(KERN_WARNING
1003 "%s: Handler not found in iucv_handler_table.\n",
1004 __FUNCTION__);
1005 else
1006 printk(KERN_WARNING
1007 "%s: NULL handle passed by application.\n",
1008 __FUNCTION__);
1009 return -EINVAL;
1010 }
1011
1012 parm = (iparml_control *)grab_param();
1013
1014 parm->ippathid = pathid;
1015 parm->ipmsglim = msglim_reqstd;
1016 if (user_data)
1017 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1018
1019 parm->ipflags1 = (__u8)flags1;
1020 b2f0_result = b2f0(ACCEPT, parm);
1021
1022 if (!b2f0_result) {
1023 if (msglim)
1024 *msglim = parm->ipmsglim;
1025 if (pgm_data)
1026 h->pgm_data = pgm_data;
1027 if (flags1_out)
1028 *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
1029 }
1030 release_param(parm);
1031
1032 iucv_debug(1, "exiting");
1033 return b2f0_result;
1034}
1035
1036/**
1037 * iucv_connect:
1038 * @pathid: Path identification number
1039 * @msglim_reqstd: Number of outstanding messages requested
1040 * @user_data: 16-byte user data
1041 * @userid: 8-byte of user identification
1042 * @system_name: 8-byte identifying the system name
1043 * @flags1: Specifies options for this path:
1044 * - IPPRTY (0x20) Specifies if you want to send priority message.
1045 * - IPRMDATA (0x80) Specifies whether your program can handle a message
1046 * in the parameter list.
1047 * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
1048 * established.
1049 * - IPLOCAL (0x01) Allows an application to force the partner to be on the
1050 * local system. If local is specified then target class
1051 * cannot be specified.
1052 * @flags1_out: Pointer to an int. If not NULL, on return the options for
1053 * the path are stored at the given location:
1054 * - IPPRTY (0x20) Indicates you may send a priority message.
1055 * @msglim: Pointer to an __u16. If not NULL, on return the maximum
1056 * number of outstanding messages is stored at the given
1057 * location.
1058 * @handle: Address of handler.
1059 * @pgm_data: Application data to be passed to interrupt handlers.
1060 *
1061 * This function establishes an IUCV path. Although the connect may complete
1062 * successfully, you are not able to use the path until you receive an IUCV
1063 * Connection Complete external interrupt.
1064 * Returns: return code from CP, or one of the following
1065 * - ENOMEM
1066 * - return code from iucv_declare_buffer
1067 * - EINVAL - invalid handle passed by application
1068 * - EINVAL - pathid address is NULL
1069 * - ENOMEM - pathid table storage allocation failed
1070 * - return code from internal function add_pathid
1071 */
1072int
1073iucv_connect (__u16 *pathid, __u16 msglim_reqstd,
1074 __u8 user_data[16], __u8 userid[8],
1075 __u8 system_name[8], int flags1,
1076 int *flags1_out, __u16 * msglim,
1077 iucv_handle_t handle, void *pgm_data)
1078{
1079 iparml_control *parm;
1080 iparml_control local_parm;
1081 struct list_head *lh;
1082 ulong b2f0_result = 0;
1083 ulong flags;
1084 int add_pathid_result = 0;
1085 handler *h = NULL;
1086 __u8 no_memory[16] = "NO MEMORY";
1087
1088 iucv_debug(1, "entering");
1089
1090 /* Checking if handle is valid */
1091 spin_lock_irqsave (&iucv_lock, flags);
1092 list_for_each(lh, &iucv_handler_table) {
1093 if ((handler *)handle == list_entry(lh, handler, list)) {
1094 h = (handler *)handle;
1095 break;
1096 }
1097 }
1098 spin_unlock_irqrestore (&iucv_lock, flags);
1099
1100 if (!h) {
1101 if (handle)
1102 printk(KERN_WARNING
1103 "%s: Handler not found in iucv_handler_table.\n",
1104 __FUNCTION__);
1105 else
1106 printk(KERN_WARNING
1107 "%s: NULL handle passed by application.\n",
1108 __FUNCTION__);
1109 return -EINVAL;
1110 }
1111
1112 if (pathid == NULL) {
1113 printk(KERN_WARNING "%s: NULL pathid pointer\n",
1114 __FUNCTION__);
1115 return -EINVAL;
1116 }
1117
1118 parm = (iparml_control *)grab_param();
1119
1120 parm->ipmsglim = msglim_reqstd;
1121
1122 if (user_data)
1123 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1124
1125 if (userid) {
1126 memcpy(parm->ipvmid, userid, sizeof(parm->ipvmid));
1127 ASCEBC(parm->ipvmid, sizeof(parm->ipvmid));
1128 EBC_TOUPPER(parm->ipvmid, sizeof(parm->ipvmid));
1129 }
1130
1131 if (system_name) {
1132 memcpy(parm->iptarget, system_name, sizeof(parm->iptarget));
1133 ASCEBC(parm->iptarget, sizeof(parm->iptarget));
1134 EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget));
1135 }
1136
1137 /* In order to establish an IUCV connection, the procedure is:
1138 *
1139 * b2f0(CONNECT)
1140 * take the ippathid from the b2f0 call
1141 * register the handler to the ippathid
1142 *
1143 * Unfortunately, the ConnectionEstablished message gets sent after the
1144 * b2f0(CONNECT) call but before the register is handled.
1145 *
1146 * In order for this race condition to be eliminated, the IUCV Control
1147 * Interrupts must be disabled for the above procedure.
1148 *
1149 * David Kennedy <dkennedy@linuxcare.com>
1150 */
1151
1152 /* Enable everything but IUCV Control messages */
1153 iucv_setmask(~(AllInterrupts));
1154 messagesDisabled = 1;
1155
1156 spin_lock_irqsave (&iucv_lock, flags);
1157 parm->ipflags1 = (__u8)flags1;
1158 b2f0_result = b2f0(CONNECT, parm);
1159 memcpy(&local_parm, parm, sizeof(local_parm));
1160 release_param(parm);
1161 parm = &local_parm;
1162 if (!b2f0_result)
1163 add_pathid_result = __iucv_add_pathid(parm->ippathid, h);
1164 spin_unlock_irqrestore (&iucv_lock, flags);
1165
1166 if (b2f0_result) {
1167 iucv_setmask(~0);
1168 messagesDisabled = 0;
1169 return b2f0_result;
1170 }
1171
1172 *pathid = parm->ippathid;
1173
1174 /* Enable everything again */
1175 iucv_setmask(IUCVControlInterruptsFlag);
1176
1177 if (msglim)
1178 *msglim = parm->ipmsglim;
1179 if (flags1_out)
1180 *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
1181
1182 if (add_pathid_result) {
1183 iucv_sever(*pathid, no_memory);
1184 printk(KERN_WARNING "%s: add_pathid failed with rc ="
1185 " %d\n", __FUNCTION__, add_pathid_result);
1186 return(add_pathid_result);
1187 }
1188
1189 iucv_debug(1, "exiting");
1190 return b2f0_result;
1191}
1192
1193/**
1194 * iucv_purge:
1195 * @pathid: Path identification number
1196 * @msgid: Message ID of message to purge.
1197 * @srccls: Message class of the message to purge.
1198 * @audit: Pointer to an __u32. If not NULL, on return, information about
1199 * asynchronous errors that may have affected the normal completion
1200 * of this message ist stored at the given location.
1201 *
1202 * Cancels a message you have sent.
1203 * Returns: return code from CP
1204 */
1205int
1206iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit)
1207{
1208 iparml_purge *parm;
1209 ulong b2f0_result = 0;
1210
1211 iucv_debug(1, "entering");
1212 iucv_debug(1, "pathid = %d", pathid);
1213
1214 parm = (iparml_purge *)grab_param();
1215
1216 parm->ipmsgid = msgid;
1217 parm->ippathid = pathid;
1218 parm->ipsrccls = srccls;
1219 parm->ipflags1 |= (IPSRCCLS | IPFGMID | IPFGPID);
1220 b2f0_result = b2f0(PURGE, parm);
1221
1222 if (!b2f0_result && audit) {
1223 memcpy(audit, parm->ipaudit, sizeof(parm->ipaudit));
1224 /* parm->ipaudit has only 3 bytes */
1225 *audit >>= 8;
1226 }
1227
1228 release_param(parm);
1229
1230 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1231 iucv_debug(1, "exiting");
1232 return b2f0_result;
1233}
1234
1235/**
1236 * iucv_query_generic:
1237 * @want_maxconn: Flag, describing which value is to be returned.
1238 *
1239 * Helper function for iucv_query_maxconn() and iucv_query_bufsize().
1240 *
1241 * Returns: The buffersize, if want_maxconn is 0; the maximum number of
1242 * connections, if want_maxconn is 1 or an error-code < 0 on failure.
1243 */
1244static int
1245iucv_query_generic(int want_maxconn)
1246{
1247 register unsigned long reg0 asm ("0");
1248 register unsigned long reg1 asm ("1");
1249 iparml_purge *parm = (iparml_purge *)grab_param();
1250 int bufsize, maxconn;
1251 int ccode;
1252
1253 /**
1254 * Call b2f0 and store R0 (max buffer size),
1255 * R1 (max connections) and CC.
1256 */
1257 reg0 = QUERY;
1258 reg1 = virt_to_phys(parm);
1259 asm volatile(
1260 " .long 0xb2f01000\n"
1261 " ipm %0\n"
1262 " srl %0,28\n"
1263 : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
1264 bufsize = reg0;
1265 maxconn = reg1;
1266 release_param(parm);
1267
1268 if (ccode)
1269 return -EPERM;
1270 if (want_maxconn)
1271 return maxconn;
1272 return bufsize;
1273}
1274
1275/**
1276 * iucv_query_maxconn:
1277 *
1278 * Determines the maximum number of connections thay may be established.
1279 *
1280 * Returns: Maximum number of connections that can be.
1281 */
1282ulong
1283iucv_query_maxconn(void)
1284{
1285 return iucv_query_generic(1);
1286}
1287
1288/**
1289 * iucv_query_bufsize:
1290 *
1291 * Determines the size of the external interrupt buffer.
1292 *
1293 * Returns: Size of external interrupt buffer.
1294 */
1295ulong
1296iucv_query_bufsize (void)
1297{
1298 return iucv_query_generic(0);
1299}
1300
1301/**
1302 * iucv_quiesce:
1303 * @pathid: Path identification number
1304 * @user_data: 16-byte user data
1305 *
1306 * Temporarily suspends incoming messages on an IUCV path.
1307 * You can later reactivate the path by invoking the iucv_resume function.
1308 * Returns: return code from CP
1309 */
1310int
1311iucv_quiesce (__u16 pathid, __u8 user_data[16])
1312{
1313 iparml_control *parm;
1314 ulong b2f0_result = 0;
1315
1316 iucv_debug(1, "entering");
1317 iucv_debug(1, "pathid = %d", pathid);
1318
1319 parm = (iparml_control *)grab_param();
1320
1321 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1322 parm->ippathid = pathid;
1323
1324 b2f0_result = b2f0(QUIESCE, parm);
1325 release_param(parm);
1326
1327 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1328 iucv_debug(1, "exiting");
1329
1330 return b2f0_result;
1331}
1332
1333/**
1334 * iucv_receive:
1335 * @pathid: Path identification number.
1336 * @buffer: Address of buffer to receive. Must be below 2G.
1337 * @buflen: Length of buffer to receive.
1338 * @msgid: Specifies the message ID.
1339 * @trgcls: Specifies target class.
1340 * @flags1_out: Receives options for path on return.
1341 * - IPNORPY (0x10) Specifies whether a reply is required
1342 * - IPPRTY (0x20) Specifies if you want to send priority message
1343 * - IPRMDATA (0x80) Specifies the data is contained in the parameter list
1344 * @residual_buffer: Receives the address of buffer updated by the number
1345 * of bytes you have received on return.
1346 * @residual_length: On return, receives one of the following values:
1347 * - 0 If the receive buffer is the same length as
1348 * the message.
1349 * - Remaining bytes in buffer If the receive buffer is longer than the
1350 * message.
1351 * - Remaining bytes in message If the receive buffer is shorter than the
1352 * message.
1353 *
1354 * This function receives messages that are being sent to you over established
1355 * paths.
1356 * Returns: return code from CP IUCV call; If the receive buffer is shorter
1357 * than the message, always 5
1358 * -EINVAL - buffer address is pointing to NULL
1359 */
1360int
1361iucv_receive (__u16 pathid, __u32 msgid, __u32 trgcls,
1362 void *buffer, ulong buflen,
1363 int *flags1_out, ulong * residual_buffer, ulong * residual_length)
1364{
1365 iparml_db *parm;
1366 ulong b2f0_result;
1367 int moved = 0; /* number of bytes moved from parmlist to buffer */
1368
1369 iucv_debug(2, "entering");
1370
1371 if (!buffer)
1372 return -EINVAL;
1373
1374 parm = (iparml_db *)grab_param();
1375
1376 parm->ipbfadr1 = (__u32) (addr_t) buffer;
1377 parm->ipbfln1f = (__u32) ((ulong) buflen);
1378 parm->ipmsgid = msgid;
1379 parm->ippathid = pathid;
1380 parm->iptrgcls = trgcls;
1381 parm->ipflags1 = (IPFGPID | IPFGMID | IPFGMCL);
1382
1383 b2f0_result = b2f0(RECEIVE, parm);
1384
1385 if (!b2f0_result || b2f0_result == 5) {
1386 if (flags1_out) {
1387 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1388 *flags1_out = (parm->ipflags1 & (~0x07));
1389 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1390 }
1391
1392 if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
1393 if (residual_length)
1394 *residual_length = parm->ipbfln1f;
1395
1396 if (residual_buffer)
1397 *residual_buffer = parm->ipbfadr1;
1398 } else {
1399 moved = min_t (unsigned long, buflen, 8);
1400
1401 memcpy ((char *) buffer,
1402 (char *) &parm->ipbfadr1, moved);
1403
1404 if (buflen < 8)
1405 b2f0_result = 5;
1406
1407 if (residual_length)
1408 *residual_length = abs (buflen - 8);
1409
1410 if (residual_buffer)
1411 *residual_buffer = (ulong) (buffer + moved);
1412 }
1413 }
1414 release_param(parm);
1415
1416 iucv_debug(2, "exiting");
1417 return b2f0_result;
1418}
1419
1420/*
1421 * Name: iucv_receive_array
1422 * Purpose: This function receives messages that are being sent to you
1423 * over established paths.
1424 * Input: pathid - path identification number
1425 * buffer - address of array of buffers
1426 * buflen - total length of buffers
1427 * msgid - specifies the message ID.
1428 * trgcls - specifies target class
1429 * Output:
1430 * flags1_out: Options for path.
1431 * IPNORPY - 0x10 specifies whether a reply is required
1432 * IPPRTY - 0x20 specifies if you want to send priority message
1433 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
1434 * residual_buffer - address points to the current list entry IUCV
1435 * is working on.
1436 * residual_length -
1437 * Contains one of the following values, if the receive buffer is:
1438 * The same length as the message, this field is zero.
1439 * Longer than the message, this field contains the number of
1440 * bytes remaining in the buffer.
1441 * Shorter than the message, this field contains the residual
1442 * count (that is, the number of bytes remaining in the
1443 * message that does not fit into the buffer. In this case
1444 * b2f0_result = 5.
1445 * Return: b2f0_result - return code from CP
1446 * (-EINVAL) - buffer address is NULL
1447 */
1448int
1449iucv_receive_array (__u16 pathid,
1450 __u32 msgid, __u32 trgcls,
1451 iucv_array_t * buffer, ulong buflen,
1452 int *flags1_out,
1453 ulong * residual_buffer, ulong * residual_length)
1454{
1455 iparml_db *parm;
1456 ulong b2f0_result;
1457 int i = 0, moved = 0, need_to_move = 8, dyn_len;
1458
1459 iucv_debug(2, "entering");
1460
1461 if (!buffer)
1462 return -EINVAL;
1463
1464 parm = (iparml_db *)grab_param();
1465
1466 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1467 parm->ipbfln1f = (__u32) buflen;
1468 parm->ipmsgid = msgid;
1469 parm->ippathid = pathid;
1470 parm->iptrgcls = trgcls;
1471 parm->ipflags1 = (IPBUFLST | IPFGPID | IPFGMID | IPFGMCL);
1472
1473 b2f0_result = b2f0(RECEIVE, parm);
1474
1475 if (!b2f0_result || b2f0_result == 5) {
1476
1477 if (flags1_out) {
1478 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1479 *flags1_out = (parm->ipflags1 & (~0x07));
1480 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1481 }
1482
1483 if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
1484
1485 if (residual_length)
1486 *residual_length = parm->ipbfln1f;
1487
1488 if (residual_buffer)
1489 *residual_buffer = parm->ipbfadr1;
1490
1491 } else {
1492 /* copy msg from parmlist to users array. */
1493
1494 while ((moved < 8) && (moved < buflen)) {
1495 dyn_len =
1496 min_t (unsigned int,
1497 (buffer + i)->length, need_to_move);
1498
1499 memcpy ((char *)((ulong)((buffer + i)->address)),
1500 ((char *) &parm->ipbfadr1) + moved,
1501 dyn_len);
1502
1503 moved += dyn_len;
1504 need_to_move -= dyn_len;
1505
1506 (buffer + i)->address =
1507 (__u32)
1508 ((ulong)(__u8 *) ((ulong)(buffer + i)->address)
1509 + dyn_len);
1510
1511 (buffer + i)->length -= dyn_len;
1512 i++;
1513 }
1514
1515 if (need_to_move) /* buflen < 8 bytes */
1516 b2f0_result = 5;
1517
1518 if (residual_length)
1519 *residual_length = abs (buflen - 8);
1520
1521 if (residual_buffer) {
1522 if (!moved)
1523 *residual_buffer = (ulong) buffer;
1524 else
1525 *residual_buffer =
1526 (ulong) (buffer + (i - 1));
1527 }
1528
1529 }
1530 }
1531 release_param(parm);
1532
1533 iucv_debug(2, "exiting");
1534 return b2f0_result;
1535}
1536
1537/**
1538 * iucv_reject:
1539 * @pathid: Path identification number.
1540 * @msgid: Message ID of the message to reject.
1541 * @trgcls: Target class of the message to reject.
1542 * Returns: return code from CP
1543 *
1544 * Refuses a specified message. Between the time you are notified of a
1545 * message and the time that you complete the message, the message may
1546 * be rejected.
1547 */
1548int
1549iucv_reject (__u16 pathid, __u32 msgid, __u32 trgcls)
1550{
1551 iparml_db *parm;
1552 ulong b2f0_result = 0;
1553
1554 iucv_debug(1, "entering");
1555 iucv_debug(1, "pathid = %d", pathid);
1556
1557 parm = (iparml_db *)grab_param();
1558
1559 parm->ippathid = pathid;
1560 parm->ipmsgid = msgid;
1561 parm->iptrgcls = trgcls;
1562 parm->ipflags1 = (IPFGMCL | IPFGMID | IPFGPID);
1563
1564 b2f0_result = b2f0(REJECT, parm);
1565 release_param(parm);
1566
1567 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1568 iucv_debug(1, "exiting");
1569
1570 return b2f0_result;
1571}
1572
1573/*
1574 * Name: iucv_reply
1575 * Purpose: This function responds to the two-way messages that you
1576 * receive. You must identify completely the message to
1577 * which you wish to reply. ie, pathid, msgid, and trgcls.
1578 * Input: pathid - path identification number
1579 * msgid - specifies the message ID.
1580 * trgcls - specifies target class
1581 * flags1 - option for path
1582 * IPPRTY- 0x20 - specifies if you want to send priority message
1583 * buffer - address of reply buffer
1584 * buflen - length of reply buffer
1585 * Output: ipbfadr2 - Address of buffer updated by the number
1586 * of bytes you have moved.
1587 * ipbfln2f - Contains one of the following values:
1588 * If the answer buffer is the same length as the reply, this field
1589 * contains zero.
1590 * If the answer buffer is longer than the reply, this field contains
1591 * the number of bytes remaining in the buffer.
1592 * If the answer buffer is shorter than the reply, this field contains
1593 * a residual count (that is, the number of bytes remianing in the
1594 * reply that does not fit into the buffer. In this
1595 * case b2f0_result = 5.
1596 * Return: b2f0_result - return code from CP
1597 * (-EINVAL) - buffer address is NULL
1598 */
1599int
1600iucv_reply (__u16 pathid,
1601 __u32 msgid, __u32 trgcls,
1602 int flags1,
1603 void *buffer, ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
1604{
1605 iparml_db *parm;
1606 ulong b2f0_result;
1607
1608 iucv_debug(2, "entering");
1609
1610 if (!buffer)
1611 return -EINVAL;
1612
1613 parm = (iparml_db *)grab_param();
1614
1615 parm->ipbfadr2 = (__u32) ((ulong) buffer);
1616 parm->ipbfln2f = (__u32) buflen; /* length of message */
1617 parm->ippathid = pathid;
1618 parm->ipmsgid = msgid;
1619 parm->iptrgcls = trgcls;
1620 parm->ipflags1 = (__u8) flags1; /* priority message */
1621
1622 b2f0_result = b2f0(REPLY, parm);
1623
1624 if ((!b2f0_result) || (b2f0_result == 5)) {
1625 if (ipbfadr2)
1626 *ipbfadr2 = parm->ipbfadr2;
1627 if (ipbfln2f)
1628 *ipbfln2f = parm->ipbfln2f;
1629 }
1630 release_param(parm);
1631
1632 iucv_debug(2, "exiting");
1633
1634 return b2f0_result;
1635}
1636
1637/*
1638 * Name: iucv_reply_array
1639 * Purpose: This function responds to the two-way messages that you
1640 * receive. You must identify completely the message to
1641 * which you wish to reply. ie, pathid, msgid, and trgcls.
1642 * The array identifies a list of addresses and lengths of
1643 * discontiguous buffers that contains the reply data.
1644 * Input: pathid - path identification number
1645 * msgid - specifies the message ID.
1646 * trgcls - specifies target class
1647 * flags1 - option for path
1648 * IPPRTY- specifies if you want to send priority message
1649 * buffer - address of array of reply buffers
1650 * buflen - total length of reply buffers
1651 * Output: ipbfadr2 - Address of buffer which IUCV is currently working on.
1652 * ipbfln2f - Contains one of the following values:
1653 * If the answer buffer is the same length as the reply, this field
1654 * contains zero.
1655 * If the answer buffer is longer than the reply, this field contains
1656 * the number of bytes remaining in the buffer.
1657 * If the answer buffer is shorter than the reply, this field contains
1658 * a residual count (that is, the number of bytes remianing in the
1659 * reply that does not fit into the buffer. In this
1660 * case b2f0_result = 5.
1661 * Return: b2f0_result - return code from CP
1662 * (-EINVAL) - buffer address is NULL
1663*/
1664int
1665iucv_reply_array (__u16 pathid,
1666 __u32 msgid, __u32 trgcls,
1667 int flags1,
1668 iucv_array_t * buffer,
1669 ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
1670{
1671 iparml_db *parm;
1672 ulong b2f0_result;
1673
1674 iucv_debug(2, "entering");
1675
1676 if (!buffer)
1677 return -EINVAL;
1678
1679 parm = (iparml_db *)grab_param();
1680
1681 parm->ipbfadr2 = (__u32) ((ulong) buffer);
1682 parm->ipbfln2f = buflen; /* length of message */
1683 parm->ippathid = pathid;
1684 parm->ipmsgid = msgid;
1685 parm->iptrgcls = trgcls;
1686 parm->ipflags1 = (IPANSLST | flags1);
1687
1688 b2f0_result = b2f0(REPLY, parm);
1689
1690 if ((!b2f0_result) || (b2f0_result == 5)) {
1691
1692 if (ipbfadr2)
1693 *ipbfadr2 = parm->ipbfadr2;
1694 if (ipbfln2f)
1695 *ipbfln2f = parm->ipbfln2f;
1696 }
1697 release_param(parm);
1698
1699 iucv_debug(2, "exiting");
1700
1701 return b2f0_result;
1702}
1703
1704/*
1705 * Name: iucv_reply_prmmsg
1706 * Purpose: This function responds to the two-way messages that you
1707 * receive. You must identify completely the message to
1708 * which you wish to reply. ie, pathid, msgid, and trgcls.
1709 * Prmmsg signifies the data is moved into the
1710 * parameter list.
1711 * Input: pathid - path identification number
1712 * msgid - specifies the message ID.
1713 * trgcls - specifies target class
1714 * flags1 - option for path
1715 * IPPRTY- specifies if you want to send priority message
1716 * prmmsg - 8-bytes of data to be placed into the parameter
1717 * list.
1718 * Output: NA
1719 * Return: b2f0_result - return code from CP
1720*/
1721int
1722iucv_reply_prmmsg (__u16 pathid,
1723 __u32 msgid, __u32 trgcls, int flags1, __u8 prmmsg[8])
1724{
1725 iparml_dpl *parm;
1726 ulong b2f0_result;
1727
1728 iucv_debug(2, "entering");
1729
1730 parm = (iparml_dpl *)grab_param();
1731
1732 parm->ippathid = pathid;
1733 parm->ipmsgid = msgid;
1734 parm->iptrgcls = trgcls;
1735 memcpy(parm->iprmmsg, prmmsg, sizeof (parm->iprmmsg));
1736 parm->ipflags1 = (IPRMDATA | flags1);
1737
1738 b2f0_result = b2f0(REPLY, parm);
1739 release_param(parm);
1740
1741 iucv_debug(2, "exiting");
1742
1743 return b2f0_result;
1744}
1745
1746/**
1747 * iucv_resume:
1748 * @pathid: Path identification number
1749 * @user_data: 16-byte of user data
1750 *
1751 * This function restores communication over a quiesced path.
1752 * Returns: return code from CP
1753 */
1754int
1755iucv_resume (__u16 pathid, __u8 user_data[16])
1756{
1757 iparml_control *parm;
1758 ulong b2f0_result = 0;
1759
1760 iucv_debug(1, "entering");
1761 iucv_debug(1, "pathid = %d", pathid);
1762
1763 parm = (iparml_control *)grab_param();
1764
1765 memcpy (parm->ipuser, user_data, sizeof (*user_data));
1766 parm->ippathid = pathid;
1767
1768 b2f0_result = b2f0(RESUME, parm);
1769 release_param(parm);
1770
1771 iucv_debug(1, "exiting");
1772
1773 return b2f0_result;
1774}
1775
1776/*
1777 * Name: iucv_send
1778 * Purpose: sends messages
1779 * Input: pathid - ushort, pathid
1780 * msgid - ulong *, id of message returned to caller
1781 * trgcls - ulong, target message class
1782 * srccls - ulong, source message class
1783 * msgtag - ulong, message tag
1784 * flags1 - Contains options for this path.
1785 * IPPRTY - Ox20 - specifies if you want to send a priority message.
1786 * buffer - pointer to buffer
1787 * buflen - ulong, length of buffer
1788 * Output: b2f0_result - return code from b2f0 call
1789 * msgid - returns message id
1790 */
1791int
1792iucv_send (__u16 pathid, __u32 * msgid,
1793 __u32 trgcls, __u32 srccls,
1794 __u32 msgtag, int flags1, void *buffer, ulong buflen)
1795{
1796 iparml_db *parm;
1797 ulong b2f0_result;
1798
1799 iucv_debug(2, "entering");
1800
1801 if (!buffer)
1802 return -EINVAL;
1803
1804 parm = (iparml_db *)grab_param();
1805
1806 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1807 parm->ippathid = pathid;
1808 parm->iptrgcls = trgcls;
1809 parm->ipbfln1f = (__u32) buflen; /* length of message */
1810 parm->ipsrccls = srccls;
1811 parm->ipmsgtag = msgtag;
1812 parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */
1813
1814 b2f0_result = b2f0(SEND, parm);
1815
1816 if ((!b2f0_result) && (msgid))
1817 *msgid = parm->ipmsgid;
1818 release_param(parm);
1819
1820 iucv_debug(2, "exiting");
1821
1822 return b2f0_result;
1823}
1824
1825/*
1826 * Name: iucv_send_array
1827 * Purpose: This function transmits data to another application.
1828 * The contents of buffer is the address of the array of
1829 * addresses and lengths of discontiguous buffers that hold
1830 * the message text. This is a one-way message and the
1831 * receiver will not reply to the message.
1832 * Input: pathid - path identification number
1833 * trgcls - specifies target class
1834 * srccls - specifies the source message class
1835 * msgtag - specifies a tag to be associated witht the message
1836 * flags1 - option for path
1837 * IPPRTY- specifies if you want to send priority message
1838 * buffer - address of array of send buffers
1839 * buflen - total length of send buffers
1840 * Output: msgid - specifies the message ID.
1841 * Return: b2f0_result - return code from CP
1842 * (-EINVAL) - buffer address is NULL
1843 */
1844int
1845iucv_send_array (__u16 pathid,
1846 __u32 * msgid,
1847 __u32 trgcls,
1848 __u32 srccls,
1849 __u32 msgtag, int flags1, iucv_array_t * buffer, ulong buflen)
1850{
1851 iparml_db *parm;
1852 ulong b2f0_result;
1853
1854 iucv_debug(2, "entering");
1855
1856 if (!buffer)
1857 return -EINVAL;
1858
1859 parm = (iparml_db *)grab_param();
1860
1861 parm->ippathid = pathid;
1862 parm->iptrgcls = trgcls;
1863 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1864 parm->ipbfln1f = (__u32) buflen; /* length of message */
1865 parm->ipsrccls = srccls;
1866 parm->ipmsgtag = msgtag;
1867 parm->ipflags1 = (IPNORPY | IPBUFLST | flags1);
1868 b2f0_result = b2f0(SEND, parm);
1869
1870 if ((!b2f0_result) && (msgid))
1871 *msgid = parm->ipmsgid;
1872 release_param(parm);
1873
1874 iucv_debug(2, "exiting");
1875 return b2f0_result;
1876}
1877
1878/*
1879 * Name: iucv_send_prmmsg
1880 * Purpose: This function transmits data to another application.
1881 * Prmmsg specifies that the 8-bytes of data are to be moved
1882 * into the parameter list. This is a one-way message and the
1883 * receiver will not reply to the message.
1884 * Input: pathid - path identification number
1885 * trgcls - specifies target class
1886 * srccls - specifies the source message class
1887 * msgtag - specifies a tag to be associated with the message
1888 * flags1 - option for path
1889 * IPPRTY- specifies if you want to send priority message
1890 * prmmsg - 8-bytes of data to be placed into parameter list
1891 * Output: msgid - specifies the message ID.
1892 * Return: b2f0_result - return code from CP
1893*/
1894int
1895iucv_send_prmmsg (__u16 pathid,
1896 __u32 * msgid,
1897 __u32 trgcls,
1898 __u32 srccls, __u32 msgtag, int flags1, __u8 prmmsg[8])
1899{
1900 iparml_dpl *parm;
1901 ulong b2f0_result;
1902
1903 iucv_debug(2, "entering");
1904
1905 parm = (iparml_dpl *)grab_param();
1906
1907 parm->ippathid = pathid;
1908 parm->iptrgcls = trgcls;
1909 parm->ipsrccls = srccls;
1910 parm->ipmsgtag = msgtag;
1911 parm->ipflags1 = (IPRMDATA | IPNORPY | flags1);
1912 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
1913
1914 b2f0_result = b2f0(SEND, parm);
1915
1916 if ((!b2f0_result) && (msgid))
1917 *msgid = parm->ipmsgid;
1918 release_param(parm);
1919
1920 iucv_debug(2, "exiting");
1921
1922 return b2f0_result;
1923}
1924
1925/*
1926 * Name: iucv_send2way
1927 * Purpose: This function transmits data to another application.
1928 * Data to be transmitted is in a buffer. The receiver
1929 * of the send is expected to reply to the message and
1930 * a buffer is provided into which IUCV moves the reply
1931 * to this message.
1932 * Input: pathid - path identification number
1933 * trgcls - specifies target class
1934 * srccls - specifies the source message class
1935 * msgtag - specifies a tag associated with the message
1936 * flags1 - option for path
1937 * IPPRTY- specifies if you want to send priority message
1938 * buffer - address of send buffer
1939 * buflen - length of send buffer
1940 * ansbuf - address of buffer to reply with
1941 * anslen - length of buffer to reply with
1942 * Output: msgid - specifies the message ID.
1943 * Return: b2f0_result - return code from CP
1944 * (-EINVAL) - buffer or ansbuf address is NULL
1945 */
1946int
1947iucv_send2way (__u16 pathid,
1948 __u32 * msgid,
1949 __u32 trgcls,
1950 __u32 srccls,
1951 __u32 msgtag,
1952 int flags1,
1953 void *buffer, ulong buflen, void *ansbuf, ulong anslen)
1954{
1955 iparml_db *parm;
1956 ulong b2f0_result;
1957
1958 iucv_debug(2, "entering");
1959
1960 if (!buffer || !ansbuf)
1961 return -EINVAL;
1962
1963 parm = (iparml_db *)grab_param();
1964
1965 parm->ippathid = pathid;
1966 parm->iptrgcls = trgcls;
1967 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1968 parm->ipbfln1f = (__u32) buflen; /* length of message */
1969 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
1970 parm->ipbfln2f = (__u32) anslen;
1971 parm->ipsrccls = srccls;
1972 parm->ipmsgtag = msgtag;
1973 parm->ipflags1 = flags1; /* priority message */
1974
1975 b2f0_result = b2f0(SEND, parm);
1976
1977 if ((!b2f0_result) && (msgid))
1978 *msgid = parm->ipmsgid;
1979 release_param(parm);
1980
1981 iucv_debug(2, "exiting");
1982
1983 return b2f0_result;
1984}
1985
1986/*
1987 * Name: iucv_send2way_array
1988 * Purpose: This function transmits data to another application.
1989 * The contents of buffer is the address of the array of
1990 * addresses and lengths of discontiguous buffers that hold
1991 * the message text. The receiver of the send is expected to
1992 * reply to the message and a buffer is provided into which
1993 * IUCV moves the reply to this message.
1994 * Input: pathid - path identification number
1995 * trgcls - specifies target class
1996 * srccls - specifies the source message class
1997 * msgtag - spcifies a tag to be associated with the message
1998 * flags1 - option for path
1999 * IPPRTY- specifies if you want to send priority message
2000 * buffer - address of array of send buffers
2001 * buflen - total length of send buffers
2002 * ansbuf - address of buffer to reply with
2003 * anslen - length of buffer to reply with
2004 * Output: msgid - specifies the message ID.
2005 * Return: b2f0_result - return code from CP
2006 * (-EINVAL) - buffer address is NULL
2007 */
2008int
2009iucv_send2way_array (__u16 pathid,
2010 __u32 * msgid,
2011 __u32 trgcls,
2012 __u32 srccls,
2013 __u32 msgtag,
2014 int flags1,
2015 iucv_array_t * buffer,
2016 ulong buflen, iucv_array_t * ansbuf, ulong anslen)
2017{
2018 iparml_db *parm;
2019 ulong b2f0_result;
2020
2021 iucv_debug(2, "entering");
2022
2023 if (!buffer || !ansbuf)
2024 return -EINVAL;
2025
2026 parm = (iparml_db *)grab_param();
2027
2028 parm->ippathid = pathid;
2029 parm->iptrgcls = trgcls;
2030 parm->ipbfadr1 = (__u32) ((ulong) buffer);
2031 parm->ipbfln1f = (__u32) buflen; /* length of message */
2032 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2033 parm->ipbfln2f = (__u32) anslen;
2034 parm->ipsrccls = srccls;
2035 parm->ipmsgtag = msgtag;
2036 parm->ipflags1 = (IPBUFLST | IPANSLST | flags1);
2037 b2f0_result = b2f0(SEND, parm);
2038 if ((!b2f0_result) && (msgid))
2039 *msgid = parm->ipmsgid;
2040 release_param(parm);
2041
2042 iucv_debug(2, "exiting");
2043 return b2f0_result;
2044}
2045
2046/*
2047 * Name: iucv_send2way_prmmsg
2048 * Purpose: This function transmits data to another application.
2049 * Prmmsg specifies that the 8-bytes of data are to be moved
2050 * into the parameter list. This is a two-way message and the
2051 * receiver of the message is expected to reply. A buffer
2052 * is provided into which IUCV moves the reply to this
2053 * message.
2054 * Input: pathid - path identification number
2055 * trgcls - specifies target class
2056 * srccls - specifies the source message class
2057 * msgtag - specifies a tag to be associated with the message
2058 * flags1 - option for path
2059 * IPPRTY- specifies if you want to send priority message
2060 * prmmsg - 8-bytes of data to be placed in parameter list
2061 * ansbuf - address of buffer to reply with
2062 * anslen - length of buffer to reply with
2063 * Output: msgid - specifies the message ID.
2064 * Return: b2f0_result - return code from CP
2065 * (-EINVAL) - buffer address is NULL
2066*/
2067int
2068iucv_send2way_prmmsg (__u16 pathid,
2069 __u32 * msgid,
2070 __u32 trgcls,
2071 __u32 srccls,
2072 __u32 msgtag,
2073 ulong flags1, __u8 prmmsg[8], void *ansbuf, ulong anslen)
2074{
2075 iparml_dpl *parm;
2076 ulong b2f0_result;
2077
2078 iucv_debug(2, "entering");
2079
2080 if (!ansbuf)
2081 return -EINVAL;
2082
2083 parm = (iparml_dpl *)grab_param();
2084
2085 parm->ippathid = pathid;
2086 parm->iptrgcls = trgcls;
2087 parm->ipsrccls = srccls;
2088 parm->ipmsgtag = msgtag;
2089 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2090 parm->ipbfln2f = (__u32) anslen;
2091 parm->ipflags1 = (IPRMDATA | flags1); /* message in prmlist */
2092 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
2093
2094 b2f0_result = b2f0(SEND, parm);
2095
2096 if ((!b2f0_result) && (msgid))
2097 *msgid = parm->ipmsgid;
2098 release_param(parm);
2099
2100 iucv_debug(2, "exiting");
2101
2102 return b2f0_result;
2103}
2104
2105/*
2106 * Name: iucv_send2way_prmmsg_array
2107 * Purpose: This function transmits data to another application.
2108 * Prmmsg specifies that the 8-bytes of data are to be moved
2109 * into the parameter list. This is a two-way message and the
2110 * receiver of the message is expected to reply. A buffer
2111 * is provided into which IUCV moves the reply to this
2112 * message. The contents of ansbuf is the address of the
2113 * array of addresses and lengths of discontiguous buffers
2114 * that contain the reply.
2115 * Input: pathid - path identification number
2116 * trgcls - specifies target class
2117 * srccls - specifies the source message class
2118 * msgtag - specifies a tag to be associated with the message
2119 * flags1 - option for path
2120 * IPPRTY- specifies if you want to send priority message
2121 * prmmsg - 8-bytes of data to be placed into the parameter list
2122 * ansbuf - address of buffer to reply with
2123 * anslen - length of buffer to reply with
2124 * Output: msgid - specifies the message ID.
2125 * Return: b2f0_result - return code from CP
2126 * (-EINVAL) - ansbuf address is NULL
2127 */
2128int
2129iucv_send2way_prmmsg_array (__u16 pathid,
2130 __u32 * msgid,
2131 __u32 trgcls,
2132 __u32 srccls,
2133 __u32 msgtag,
2134 int flags1,
2135 __u8 prmmsg[8],
2136 iucv_array_t * ansbuf, ulong anslen)
2137{
2138 iparml_dpl *parm;
2139 ulong b2f0_result;
2140
2141 iucv_debug(2, "entering");
2142
2143 if (!ansbuf)
2144 return -EINVAL;
2145
2146 parm = (iparml_dpl *)grab_param();
2147
2148 parm->ippathid = pathid;
2149 parm->iptrgcls = trgcls;
2150 parm->ipsrccls = srccls;
2151 parm->ipmsgtag = msgtag;
2152 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2153 parm->ipbfln2f = (__u32) anslen;
2154 parm->ipflags1 = (IPRMDATA | IPANSLST | flags1);
2155 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
2156 b2f0_result = b2f0(SEND, parm);
2157 if ((!b2f0_result) && (msgid))
2158 *msgid = parm->ipmsgid;
2159 release_param(parm);
2160
2161 iucv_debug(2, "exiting");
2162 return b2f0_result;
2163}
2164
2165void
2166iucv_setmask_cpuid (void *result)
2167{
2168 iparml_set_mask *parm;
2169
2170 iucv_debug(1, "entering");
2171 parm = (iparml_set_mask *)grab_param();
2172 parm->ipmask = *((__u8*)result);
2173 *((ulong *)result) = b2f0(SETMASK, parm);
2174 release_param(parm);
2175
2176 iucv_debug(1, "b2f0_result = %ld", *((ulong *)result));
2177 iucv_debug(1, "exiting");
2178}
2179
2180/*
2181 * Name: iucv_setmask
2182 * Purpose: This function enables or disables the following IUCV
2183 * external interruptions: Nonpriority and priority message
2184 * interrupts, nonpriority and priority reply interrupts.
2185 * Input: SetMaskFlag - options for interrupts
2186 * 0x80 - Nonpriority_MessagePendingInterruptsFlag
2187 * 0x40 - Priority_MessagePendingInterruptsFlag
2188 * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
2189 * 0x10 - Priority_MessageCompletionInterruptsFlag
2190 * 0x08 - IUCVControlInterruptsFlag
2191 * Output: NA
2192 * Return: b2f0_result - return code from CP
2193*/
2194int
2195iucv_setmask (int SetMaskFlag)
2196{
2197 union {
2198 ulong result;
2199 __u8 param;
2200 } u;
2201 int cpu;
2202
2203 u.param = SetMaskFlag;
2204 cpu = get_cpu();
2205 smp_call_function_on(iucv_setmask_cpuid, &u, 0, 1, iucv_cpuid);
2206 put_cpu();
2207
2208 return u.result;
2209}
2210
2211/**
2212 * iucv_sever:
2213 * @pathid: Path identification number
2214 * @user_data: 16-byte of user data
2215 *
2216 * This function terminates an iucv path.
2217 * Returns: return code from CP
2218 */
2219int
2220iucv_sever(__u16 pathid, __u8 user_data[16])
2221{
2222 iparml_control *parm;
2223 ulong b2f0_result = 0;
2224
2225 iucv_debug(1, "entering");
2226 parm = (iparml_control *)grab_param();
2227
2228 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
2229 parm->ippathid = pathid;
2230
2231 b2f0_result = b2f0(SEVER, parm);
2232
2233 if (!b2f0_result)
2234 iucv_remove_pathid(pathid);
2235 release_param(parm);
2236
2237 iucv_debug(1, "exiting");
2238 return b2f0_result;
2239}
2240
2241/*
2242 * Interrupt Handlers
2243 *******************************************************************************/
2244
2245/**
2246 * iucv_irq_handler:
2247 * @regs: Current registers
2248 * @code: irq code
2249 *
2250 * Handles external interrupts coming in from CP.
2251 * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
2252 */
2253static void
2254iucv_irq_handler(__u16 code)
2255{
2256 iucv_irqdata *irqdata;
2257
2258 irqdata = kmalloc(sizeof(iucv_irqdata), GFP_ATOMIC);
2259 if (!irqdata) {
2260 printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__);
2261 return;
2262 }
2263
2264 memcpy(&irqdata->data, iucv_external_int_buffer,
2265 sizeof(iucv_GeneralInterrupt));
2266
2267 spin_lock(&iucv_irq_queue_lock);
2268 list_add_tail(&irqdata->queue, &iucv_irq_queue);
2269 spin_unlock(&iucv_irq_queue_lock);
2270
2271 tasklet_schedule(&iucv_tasklet);
2272}
2273
2274/**
2275 * iucv_do_int:
2276 * @int_buf: Pointer to copy of external interrupt buffer
2277 *
2278 * The workhorse for handling interrupts queued by iucv_irq_handler().
2279 * This function is called from the bottom half iucv_tasklet_handler().
2280 */
2281static void
2282iucv_do_int(iucv_GeneralInterrupt * int_buf)
2283{
2284 handler *h = NULL;
2285 struct list_head *lh;
2286 ulong flags;
2287 iucv_interrupt_ops_t *interrupt = NULL; /* interrupt addresses */
2288 __u8 temp_buff1[24], temp_buff2[24]; /* masked handler id. */
2289 int rc = 0, j = 0;
2290 __u8 no_listener[16] = "NO LISTENER";
2291
2292 iucv_debug(2, "entering, pathid %d, type %02X",
2293 int_buf->ippathid, int_buf->iptype);
2294 iucv_dumpit("External Interrupt Buffer:",
2295 int_buf, sizeof(iucv_GeneralInterrupt));
2296
2297 ASCEBC (no_listener, 16);
2298
2299 if (int_buf->iptype != 01) {
2300 if ((int_buf->ippathid) > (max_connections - 1)) {
2301 printk(KERN_WARNING "%s: Got interrupt with pathid %d"
2302 " > max_connections (%ld)\n", __FUNCTION__,
2303 int_buf->ippathid, max_connections - 1);
2304 } else {
2305 h = iucv_pathid_table[int_buf->ippathid];
2306 interrupt = h->interrupt_table;
2307 iucv_dumpit("Handler:", h, sizeof(handler));
2308 }
2309 }
2310
2311 /* end of if statement */
2312 switch (int_buf->iptype) {
2313 case 0x01: /* connection pending */
2314 if (messagesDisabled) {
2315 iucv_setmask(~0);
2316 messagesDisabled = 0;
2317 }
2318 spin_lock_irqsave(&iucv_lock, flags);
2319 list_for_each(lh, &iucv_handler_table) {
2320 h = list_entry(lh, handler, list);
2321 memcpy(temp_buff1, &(int_buf->ipvmid), 24);
2322 memcpy(temp_buff2, &(h->id.userid), 24);
2323 for (j = 0; j < 24; j++) {
2324 temp_buff1[j] &= (h->id.mask)[j];
2325 temp_buff2[j] &= (h->id.mask)[j];
2326 }
2327
2328 iucv_dumpit("temp_buff1:",
2329 temp_buff1, sizeof(temp_buff1));
2330 iucv_dumpit("temp_buff2",
2331 temp_buff2, sizeof(temp_buff2));
2332
2333 if (!memcmp (temp_buff1, temp_buff2, 24)) {
2334
2335 iucv_debug(2,
2336 "found a matching handler");
2337 break;
2338 } else
2339 h = NULL;
2340 }
2341 spin_unlock_irqrestore (&iucv_lock, flags);
2342 if (h) {
2343 /* ADD PATH TO PATHID TABLE */
2344 rc = iucv_add_pathid(int_buf->ippathid, h);
2345 if (rc) {
2346 iucv_sever (int_buf->ippathid,
2347 no_listener);
2348 iucv_debug(1,
2349 "add_pathid failed, rc = %d",
2350 rc);
2351 } else {
2352 interrupt = h->interrupt_table;
2353 if (interrupt->ConnectionPending) {
2354 EBCASC (int_buf->ipvmid, 8);
2355 interrupt->ConnectionPending(
2356 (iucv_ConnectionPending *)int_buf,
2357 h->pgm_data);
2358 } else
2359 iucv_sever(int_buf->ippathid,
2360 no_listener);
2361 }
2362 } else
2363 iucv_sever(int_buf->ippathid, no_listener);
2364 break;
2365
2366 case 0x02: /*connection complete */
2367 if (messagesDisabled) {
2368 iucv_setmask(~0);
2369 messagesDisabled = 0;
2370 }
2371 if (h) {
2372 if (interrupt->ConnectionComplete)
2373 {
2374 interrupt->ConnectionComplete(
2375 (iucv_ConnectionComplete *)int_buf,
2376 h->pgm_data);
2377 }
2378 else
2379 iucv_debug(1,
2380 "ConnectionComplete not called");
2381 } else
2382 iucv_sever(int_buf->ippathid, no_listener);
2383 break;
2384
2385 case 0x03: /* connection severed */
2386 if (messagesDisabled) {
2387 iucv_setmask(~0);
2388 messagesDisabled = 0;
2389 }
2390 if (h) {
2391 if (interrupt->ConnectionSevered)
2392 interrupt->ConnectionSevered(
2393 (iucv_ConnectionSevered *)int_buf,
2394 h->pgm_data);
2395
2396 else
2397 iucv_sever (int_buf->ippathid, no_listener);
2398 } else
2399 iucv_sever(int_buf->ippathid, no_listener);
2400 break;
2401
2402 case 0x04: /* connection quiesced */
2403 if (messagesDisabled) {
2404 iucv_setmask(~0);
2405 messagesDisabled = 0;
2406 }
2407 if (h) {
2408 if (interrupt->ConnectionQuiesced)
2409 interrupt->ConnectionQuiesced(
2410 (iucv_ConnectionQuiesced *)int_buf,
2411 h->pgm_data);
2412 else
2413 iucv_debug(1,
2414 "ConnectionQuiesced not called");
2415 }
2416 break;
2417
2418 case 0x05: /* connection resumed */
2419 if (messagesDisabled) {
2420 iucv_setmask(~0);
2421 messagesDisabled = 0;
2422 }
2423 if (h) {
2424 if (interrupt->ConnectionResumed)
2425 interrupt->ConnectionResumed(
2426 (iucv_ConnectionResumed *)int_buf,
2427 h->pgm_data);
2428 else
2429 iucv_debug(1,
2430 "ConnectionResumed not called");
2431 }
2432 break;
2433
2434 case 0x06: /* priority message complete */
2435 case 0x07: /* nonpriority message complete */
2436 if (h) {
2437 if (interrupt->MessageComplete)
2438 interrupt->MessageComplete(
2439 (iucv_MessageComplete *)int_buf,
2440 h->pgm_data);
2441 else
2442 iucv_debug(2,
2443 "MessageComplete not called");
2444 }
2445 break;
2446
2447 case 0x08: /* priority message pending */
2448 case 0x09: /* nonpriority message pending */
2449 if (h) {
2450 if (interrupt->MessagePending)
2451 interrupt->MessagePending(
2452 (iucv_MessagePending *) int_buf,
2453 h->pgm_data);
2454 else
2455 iucv_debug(2,
2456 "MessagePending not called");
2457 }
2458 break;
2459 default: /* unknown iucv type */
2460 printk(KERN_WARNING "%s: unknown iucv interrupt\n",
2461 __FUNCTION__);
2462 break;
2463 } /* end switch */
2464
2465 iucv_debug(2, "exiting pathid %d, type %02X",
2466 int_buf->ippathid, int_buf->iptype);
2467
2468 return;
2469}
2470
2471/**
2472 * iucv_tasklet_handler:
2473 *
2474 * This function loops over the queue of irq buffers and runs iucv_do_int()
2475 * on every queue element.
2476 */
2477static void
2478iucv_tasklet_handler(unsigned long ignored)
2479{
2480 struct list_head head;
2481 struct list_head *next;
2482 ulong flags;
2483
2484 spin_lock_irqsave(&iucv_irq_queue_lock, flags);
2485 list_add(&head, &iucv_irq_queue);
2486 list_del_init(&iucv_irq_queue);
2487 spin_unlock_irqrestore (&iucv_irq_queue_lock, flags);
2488
2489 next = head.next;
2490 while (next != &head) {
2491 iucv_irqdata *p = list_entry(next, iucv_irqdata, queue);
2492
2493 next = next->next;
2494 iucv_do_int(&p->data);
2495 kfree(p);
2496 }
2497
2498 return;
2499}
2500
2501subsys_initcall(iucv_init);
2502module_exit(iucv_exit);
2503
2504/**
2505 * Export all public stuff
2506 */
2507EXPORT_SYMBOL (iucv_bus);
2508EXPORT_SYMBOL (iucv_root);
2509EXPORT_SYMBOL (iucv_accept);
2510EXPORT_SYMBOL (iucv_connect);
2511#if 0
2512EXPORT_SYMBOL (iucv_purge);
2513EXPORT_SYMBOL (iucv_query_maxconn);
2514EXPORT_SYMBOL (iucv_query_bufsize);
2515EXPORT_SYMBOL (iucv_quiesce);
2516#endif
2517EXPORT_SYMBOL (iucv_receive);
2518#if 0
2519EXPORT_SYMBOL (iucv_receive_array);
2520#endif
2521EXPORT_SYMBOL (iucv_reject);
2522#if 0
2523EXPORT_SYMBOL (iucv_reply);
2524EXPORT_SYMBOL (iucv_reply_array);
2525EXPORT_SYMBOL (iucv_resume);
2526#endif
2527EXPORT_SYMBOL (iucv_reply_prmmsg);
2528EXPORT_SYMBOL (iucv_send);
2529EXPORT_SYMBOL (iucv_send2way);
2530EXPORT_SYMBOL (iucv_send2way_array);
2531EXPORT_SYMBOL (iucv_send2way_prmmsg);
2532EXPORT_SYMBOL (iucv_send2way_prmmsg_array);
2533#if 0
2534EXPORT_SYMBOL (iucv_send_array);
2535EXPORT_SYMBOL (iucv_send_prmmsg);
2536EXPORT_SYMBOL (iucv_setmask);
2537#endif
2538EXPORT_SYMBOL (iucv_sever);
2539EXPORT_SYMBOL (iucv_register_program);
2540EXPORT_SYMBOL (iucv_unregister_program);
diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h
deleted file mode 100644
index 5b6b1b7241c9..000000000000
--- a/drivers/s390/net/iucv.h
+++ /dev/null
@@ -1,849 +0,0 @@
1/*
2 * drivers/s390/net/iucv.h
3 * IUCV base support.
4 *
5 * S390 version
6 * Copyright (C) 2000 IBM Corporation
7 * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com)
8 * Xenia Tkatschow (xenia@us.ibm.com)
9 *
10 *
11 * Functionality:
12 * To explore any of the IUCV functions, one must first register
13 * their program using iucv_register_program(). Once your program has
14 * successfully completed a register, it can exploit the other functions.
15 * For furthur reference on all IUCV functionality, refer to the
16 * CP Programming Services book, also available on the web
17 * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
18 *
19 * Definition of Return Codes
20 * -All positive return codes including zero are reflected back
21 * from CP except for iucv_register_program. The definition of each
22 * return code can be found in CP Programming Services book.
23 * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
24 * - Return Code of:
25 * (-EINVAL) Invalid value
26 * (-ENOMEM) storage allocation failed
27 * pgmask defined in iucv_register_program will be set depending on input
28 * paramters.
29 *
30 */
31
32#include <linux/types.h>
33#include <asm/debug.h>
34
35/**
36 * Debug Facility stuff
37 */
38#define IUCV_DBF_SETUP_NAME "iucv_setup"
39#define IUCV_DBF_SETUP_LEN 32
40#define IUCV_DBF_SETUP_PAGES 2
41#define IUCV_DBF_SETUP_NR_AREAS 1
42#define IUCV_DBF_SETUP_LEVEL 3
43
44#define IUCV_DBF_DATA_NAME "iucv_data"
45#define IUCV_DBF_DATA_LEN 128
46#define IUCV_DBF_DATA_PAGES 2
47#define IUCV_DBF_DATA_NR_AREAS 1
48#define IUCV_DBF_DATA_LEVEL 2
49
50#define IUCV_DBF_TRACE_NAME "iucv_trace"
51#define IUCV_DBF_TRACE_LEN 16
52#define IUCV_DBF_TRACE_PAGES 4
53#define IUCV_DBF_TRACE_NR_AREAS 1
54#define IUCV_DBF_TRACE_LEVEL 3
55
56#define IUCV_DBF_TEXT(name,level,text) \
57 do { \
58 debug_text_event(iucv_dbf_##name,level,text); \
59 } while (0)
60
61#define IUCV_DBF_HEX(name,level,addr,len) \
62 do { \
63 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
64 } while (0)
65
66DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
67
68#define IUCV_DBF_TEXT_(name,level,text...) \
69 do { \
70 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
71 sprintf(iucv_dbf_txt_buf, text); \
72 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
73 put_cpu_var(iucv_dbf_txt_buf); \
74 } while (0)
75
76#define IUCV_DBF_SPRINTF(name,level,text...) \
77 do { \
78 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
79 debug_sprintf_event(iucv_dbf_trace, level, text ); \
80 } while (0)
81
82/**
83 * some more debug stuff
84 */
85#define IUCV_HEXDUMP16(importance,header,ptr) \
86PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
87 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
88 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
89 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
90 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
91 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
92 *(((char*)ptr)+12),*(((char*)ptr)+13), \
93 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
94PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
95 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
96 *(((char*)ptr)+16),*(((char*)ptr)+17), \
97 *(((char*)ptr)+18),*(((char*)ptr)+19), \
98 *(((char*)ptr)+20),*(((char*)ptr)+21), \
99 *(((char*)ptr)+22),*(((char*)ptr)+23), \
100 *(((char*)ptr)+24),*(((char*)ptr)+25), \
101 *(((char*)ptr)+26),*(((char*)ptr)+27), \
102 *(((char*)ptr)+28),*(((char*)ptr)+29), \
103 *(((char*)ptr)+30),*(((char*)ptr)+31));
104
105static inline void
106iucv_hex_dump(unsigned char *buf, size_t len)
107{
108 size_t i;
109
110 for (i = 0; i < len; i++) {
111 if (i && !(i % 16))
112 printk("\n");
113 printk("%02x ", *(buf + i));
114 }
115 printk("\n");
116}
117/**
118 * end of debug stuff
119 */
120
121#define uchar unsigned char
122#define ushort unsigned short
123#define ulong unsigned long
124#define iucv_handle_t void *
125
126/* flags1:
127 * All flags are defined in the field IPFLAGS1 of each function
128 * and can be found in CP Programming Services.
129 * IPLOCAL - Indicates the connect can only be satisfied on the
130 * local system
131 * IPPRTY - Indicates a priority message
132 * IPQUSCE - Indicates you do not want to receive messages on a
133 * path until an iucv_resume is issued
134 * IPRMDATA - Indicates that the message is in the parameter list
135 */
136#define IPLOCAL 0x01
137#define IPPRTY 0x20
138#define IPQUSCE 0x40
139#define IPRMDATA 0x80
140
141/* flags1_out:
142 * All flags are defined in the output field of IPFLAGS1 for each function
143 * and can be found in CP Programming Services.
144 * IPNORPY - Specifies this is a one-way message and no reply is expected.
145 * IPPRTY - Indicates a priority message is permitted. Defined in flags1.
146 */
147#define IPNORPY 0x10
148
149#define Nonpriority_MessagePendingInterruptsFlag 0x80
150#define Priority_MessagePendingInterruptsFlag 0x40
151#define Nonpriority_MessageCompletionInterruptsFlag 0x20
152#define Priority_MessageCompletionInterruptsFlag 0x10
153#define IUCVControlInterruptsFlag 0x08
154#define AllInterrupts 0xf8
155/*
156 * Mapping of external interrupt buffers should be used with the corresponding
157 * interrupt types.
158 * Names: iucv_ConnectionPending -> connection pending
159 * iucv_ConnectionComplete -> connection complete
160 * iucv_ConnectionSevered -> connection severed
161 * iucv_ConnectionQuiesced -> connection quiesced
162 * iucv_ConnectionResumed -> connection resumed
163 * iucv_MessagePending -> message pending
164 * iucv_MessageComplete -> message complete
165 */
166typedef struct {
167 u16 ippathid;
168 uchar ipflags1;
169 uchar iptype;
170 u16 ipmsglim;
171 u16 res1;
172 uchar ipvmid[8];
173 uchar ipuser[16];
174 u32 res3;
175 uchar ippollfg;
176 uchar res4[3];
177} iucv_ConnectionPending;
178
179typedef struct {
180 u16 ippathid;
181 uchar ipflags1;
182 uchar iptype;
183 u16 ipmsglim;
184 u16 res1;
185 uchar res2[8];
186 uchar ipuser[16];
187 u32 res3;
188 uchar ippollfg;
189 uchar res4[3];
190} iucv_ConnectionComplete;
191
192typedef struct {
193 u16 ippathid;
194 uchar res1;
195 uchar iptype;
196 u32 res2;
197 uchar res3[8];
198 uchar ipuser[16];
199 u32 res4;
200 uchar ippollfg;
201 uchar res5[3];
202} iucv_ConnectionSevered;
203
204typedef struct {
205 u16 ippathid;
206 uchar res1;
207 uchar iptype;
208 u32 res2;
209 uchar res3[8];
210 uchar ipuser[16];
211 u32 res4;
212 uchar ippollfg;
213 uchar res5[3];
214} iucv_ConnectionQuiesced;
215
216typedef struct {
217 u16 ippathid;
218 uchar res1;
219 uchar iptype;
220 u32 res2;
221 uchar res3[8];
222 uchar ipuser[16];
223 u32 res4;
224 uchar ippollfg;
225 uchar res5[3];
226} iucv_ConnectionResumed;
227
228typedef struct {
229 u16 ippathid;
230 uchar ipflags1;
231 uchar iptype;
232 u32 ipmsgid;
233 u32 iptrgcls;
234 union u2 {
235 u32 iprmmsg1_u32;
236 uchar iprmmsg1[4];
237 } ln1msg1;
238 union u1 {
239 u32 ipbfln1f;
240 uchar iprmmsg2[4];
241 } ln1msg2;
242 u32 res1[3];
243 u32 ipbfln2f;
244 uchar ippollfg;
245 uchar res2[3];
246} iucv_MessagePending;
247
248typedef struct {
249 u16 ippathid;
250 uchar ipflags1;
251 uchar iptype;
252 u32 ipmsgid;
253 u32 ipaudit;
254 uchar iprmmsg[8];
255 u32 ipsrccls;
256 u32 ipmsgtag;
257 u32 res;
258 u32 ipbfln2f;
259 uchar ippollfg;
260 uchar res2[3];
261} iucv_MessageComplete;
262
263/*
264 * iucv_interrupt_ops_t: Is a vector of functions that handle
265 * IUCV interrupts.
266 * Parameter list:
267 * eib - is a pointer to a 40-byte area described
268 * with one of the structures above.
269 * pgm_data - this data is strictly for the
270 * interrupt handler that is passed by
271 * the application. This may be an address
272 * or token.
273*/
274typedef struct {
275 void (*ConnectionPending) (iucv_ConnectionPending * eib,
276 void *pgm_data);
277 void (*ConnectionComplete) (iucv_ConnectionComplete * eib,
278 void *pgm_data);
279 void (*ConnectionSevered) (iucv_ConnectionSevered * eib,
280 void *pgm_data);
281 void (*ConnectionQuiesced) (iucv_ConnectionQuiesced * eib,
282 void *pgm_data);
283 void (*ConnectionResumed) (iucv_ConnectionResumed * eib,
284 void *pgm_data);
285 void (*MessagePending) (iucv_MessagePending * eib, void *pgm_data);
286 void (*MessageComplete) (iucv_MessageComplete * eib, void *pgm_data);
287} iucv_interrupt_ops_t;
288
289/*
290 *iucv_array_t : Defines buffer array.
291 * Inside the array may be 31- bit addresses and 31-bit lengths.
292*/
293typedef struct {
294 u32 address;
295 u32 length;
296} iucv_array_t __attribute__ ((aligned (8)));
297
298extern struct bus_type iucv_bus;
299extern struct device *iucv_root;
300
301/* -prototypes- */
302/*
303 * Name: iucv_register_program
304 * Purpose: Registers an application with IUCV
305 * Input: prmname - user identification
306 * userid - machine identification
307 * pgmmask - indicates which bits in the prmname and userid combined will be
308 * used to determine who is given control
309 * ops - address of vector of interrupt handlers
310 * pgm_data- application data passed to interrupt handlers
311 * Output: NA
312 * Return: address of handler
313 * (0) - Error occurred, registration not completed.
314 * NOTE: Exact cause of failure will be recorded in syslog.
315*/
316iucv_handle_t iucv_register_program (uchar pgmname[16],
317 uchar userid[8],
318 uchar pgmmask[24],
319 iucv_interrupt_ops_t * ops,
320 void *pgm_data);
321
322/*
323 * Name: iucv_unregister_program
324 * Purpose: Unregister application with IUCV
325 * Input: address of handler
326 * Output: NA
327 * Return: (0) - Normal return
328 * (-EINVAL) - Internal error, wild pointer
329*/
330int iucv_unregister_program (iucv_handle_t handle);
331
332/*
333 * Name: iucv_accept
334 * Purpose: This function is issued after the user receives a Connection Pending external
335 * interrupt and now wishes to complete the IUCV communication path.
336 * Input: pathid - u16 , Path identification number
337 * msglim_reqstd - u16, The number of outstanding messages requested.
338 * user_data - uchar[16], Data specified by the iucv_connect function.
339 * flags1 - int, Contains options for this path.
340 * -IPPRTY - 0x20- Specifies if you want to send priority message.
341 * -IPRMDATA - 0x80, Specifies whether your program can handle a message
342 * in the parameter list.
343 * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
344 * established.
345 * handle - iucv_handle_t, Address of handler.
346 * pgm_data - void *, Application data passed to interrupt handlers.
347 * flags1_out - int * Contains information about the path
348 * - IPPRTY - 0x20, Indicates you may send priority messages.
349 * msglim - *u16, Number of outstanding messages.
350 * Output: return code from CP IUCV call.
351*/
352
353int iucv_accept (u16 pathid,
354 u16 msglim_reqstd,
355 uchar user_data[16],
356 int flags1,
357 iucv_handle_t handle,
358 void *pgm_data, int *flags1_out, u16 * msglim);
359
360/*
361 * Name: iucv_connect
362 * Purpose: This function establishes an IUCV path. Although the connect may complete
363 * successfully, you are not able to use the path until you receive an IUCV
364 * Connection Complete external interrupt.
365 * Input: pathid - u16 *, Path identification number
366 * msglim_reqstd - u16, Number of outstanding messages requested
367 * user_data - uchar[16], 16-byte user data
368 * userid - uchar[8], User identification
369 * system_name - uchar[8], 8-byte identifying the system name
370 * flags1 - int, Contains options for this path.
371 * -IPPRTY - 0x20, Specifies if you want to send priority message.
372 * -IPRMDATA - 0x80, Specifies whether your program can handle a message
373 * in the parameter list.
374 * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
375 * established.
376 * -IPLOCAL - 0X01, Allows an application to force the partner to be on
377 * the local system. If local is specified then target class cannot be
378 * specified.
379 * flags1_out - int * Contains information about the path
380 * - IPPRTY - 0x20, Indicates you may send priority messages.
381 * msglim - * u16, Number of outstanding messages
382 * handle - iucv_handle_t, Address of handler
383 * pgm_data - void *, Application data passed to interrupt handlers
384 * Output: return code from CP IUCV call
385 * rc - return code from iucv_declare_buffer
386 * -EINVAL - Invalid handle passed by application
387 * -EINVAL - Pathid address is NULL
388 * add_pathid_result - Return code from internal function add_pathid
389*/
390int
391 iucv_connect (u16 * pathid,
392 u16 msglim_reqstd,
393 uchar user_data[16],
394 uchar userid[8],
395 uchar system_name[8],
396 int flags1,
397 int *flags1_out,
398 u16 * msglim, iucv_handle_t handle, void *pgm_data);
399
400/*
401 * Name: iucv_purge
402 * Purpose: This function cancels a message that you have sent.
403 * Input: pathid - Path identification number.
404 * msgid - Specifies the message ID of the message to be purged.
405 * srccls - Specifies the source message class.
406 * Output: audit - Contains information about asynchronous error
407 * that may have affected the normal completion
408 * of this message.
409 * Return: Return code from CP IUCV call.
410*/
411int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit);
412/*
413 * Name: iucv_query_maxconn
414 * Purpose: This function determines the maximum number of communication paths you
415 * may establish.
416 * Return: maxconn - ulong, Maximum number of connection the virtual machine may
417 * establish.
418*/
419ulong iucv_query_maxconn (void);
420
421/*
422 * Name: iucv_query_bufsize
423 * Purpose: This function determines how large an external interrupt
424 * buffer IUCV requires to store information.
425 * Return: bufsize - ulong, Size of external interrupt buffer.
426 */
427ulong iucv_query_bufsize (void);
428
429/*
430 * Name: iucv_quiesce
431 * Purpose: This function temporarily suspends incoming messages on an
432 * IUCV path. You can later reactivate the path by invoking
433 * the iucv_resume function.
434 * Input: pathid - Path identification number
435 * user_data - 16-bytes of user data
436 * Output: NA
437 * Return: Return code from CP IUCV call.
438*/
439int iucv_quiesce (u16 pathid, uchar user_data[16]);
440
441/*
442 * Name: iucv_receive
443 * Purpose: This function receives messages that are being sent to you
444 * over established paths. Data will be returned in buffer for length of
445 * buflen.
446 * Input:
447 * pathid - Path identification number.
448 * buffer - Address of buffer to receive.
449 * buflen - Length of buffer to receive.
450 * msgid - Specifies the message ID.
451 * trgcls - Specifies target class.
452 * Output:
453 * flags1_out: int *, Contains information about this path.
454 * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
455 * expected.
456 * IPPRTY - 0x20 Specifies if you want to send priority message.
457 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
458 * residual_buffer - address of buffer updated by the number
459 * of bytes you have received.
460 * residual_length -
461 * Contains one of the following values, if the receive buffer is:
462 * The same length as the message, this field is zero.
463 * Longer than the message, this field contains the number of
464 * bytes remaining in the buffer.
465 * Shorter than the message, this field contains the residual
466 * count (that is, the number of bytes remaining in the
467 * message that does not fit into the buffer. In this
468 * case b2f0_result = 5.
469 * Return: Return code from CP IUCV call.
470 * (-EINVAL) - buffer address is pointing to NULL
471*/
472int iucv_receive (u16 pathid,
473 u32 msgid,
474 u32 trgcls,
475 void *buffer,
476 ulong buflen,
477 int *flags1_out,
478 ulong * residual_buffer, ulong * residual_length);
479
480 /*
481 * Name: iucv_receive_array
482 * Purpose: This function receives messages that are being sent to you
483 * over established paths. Data will be returned in first buffer for
484 * length of first buffer.
485 * Input: pathid - Path identification number.
486 * msgid - specifies the message ID.
487 * trgcls - Specifies target class.
488 * buffer - Address of array of buffers.
489 * buflen - Total length of buffers.
490 * Output:
491 * flags1_out: int *, Contains information about this path.
492 * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
493 * expected.
494 * IPPRTY - 0x20 Specifies if you want to send priority message.
495 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
496 * residual_buffer - address points to the current list entry IUCV
497 * is working on.
498 * residual_length -
499 * Contains one of the following values, if the receive buffer is:
500 * The same length as the message, this field is zero.
501 * Longer than the message, this field contains the number of
502 * bytes remaining in the buffer.
503 * Shorter than the message, this field contains the residual
504 * count (that is, the number of bytes remaining in the
505 * message that does not fit into the buffer. In this
506 * case b2f0_result = 5.
507 * Return: Return code from CP IUCV call.
508 * (-EINVAL) - Buffer address is NULL.
509 */
510int iucv_receive_array (u16 pathid,
511 u32 msgid,
512 u32 trgcls,
513 iucv_array_t * buffer,
514 ulong buflen,
515 int *flags1_out,
516 ulong * residual_buffer, ulong * residual_length);
517
518/*
519 * Name: iucv_reject
520 * Purpose: The reject function refuses a specified message. Between the
521 * time you are notified of a message and the time that you
522 * complete the message, the message may be rejected.
523 * Input: pathid - Path identification number.
524 * msgid - Specifies the message ID.
525 * trgcls - Specifies target class.
526 * Output: NA
527 * Return: Return code from CP IUCV call.
528*/
529int iucv_reject (u16 pathid, u32 msgid, u32 trgcls);
530
531/*
532 * Name: iucv_reply
533 * Purpose: This function responds to the two-way messages that you
534 * receive. You must identify completely the message to
535 * which you wish to reply. ie, pathid, msgid, and trgcls.
536 * Input: pathid - Path identification number.
537 * msgid - Specifies the message ID.
538 * trgcls - Specifies target class.
539 * flags1 - Option for path.
540 * IPPRTY- 0x20, Specifies if you want to send priority message.
541 * buffer - Address of reply buffer.
542 * buflen - Length of reply buffer.
543 * Output: residual_buffer - Address of buffer updated by the number
544 * of bytes you have moved.
545 * residual_length - Contains one of the following values:
546 * If the answer buffer is the same length as the reply, this field
547 * contains zero.
548 * If the answer buffer is longer than the reply, this field contains
549 * the number of bytes remaining in the buffer.
550 * If the answer buffer is shorter than the reply, this field contains
551 * a residual count (that is, the number of bytes remianing in the
552 * reply that does not fit into the buffer. In this
553 * case b2f0_result = 5.
554 * Return: Return code from CP IUCV call.
555 * (-EINVAL) - Buffer address is NULL.
556*/
557int iucv_reply (u16 pathid,
558 u32 msgid,
559 u32 trgcls,
560 int flags1,
561 void *buffer, ulong buflen, ulong * residual_buffer,
562 ulong * residual_length);
563
564/*
565 * Name: iucv_reply_array
566 * Purpose: This function responds to the two-way messages that you
567 * receive. You must identify completely the message to
568 * which you wish to reply. ie, pathid, msgid, and trgcls.
569 * The array identifies a list of addresses and lengths of
570 * discontiguous buffers that contains the reply data.
571 * Input: pathid - Path identification number
572 * msgid - Specifies the message ID.
573 * trgcls - Specifies target class.
574 * flags1 - Option for path.
575 * IPPRTY- 0x20, Specifies if you want to send priority message.
576 * buffer - Address of array of reply buffers.
577 * buflen - Total length of reply buffers.
578 * Output: residual_buffer - Address of buffer which IUCV is currently working on.
579 * residual_length - Contains one of the following values:
580 * If the answer buffer is the same length as the reply, this field
581 * contains zero.
582 * If the answer buffer is longer than the reply, this field contains
583 * the number of bytes remaining in the buffer.
584 * If the answer buffer is shorter than the reply, this field contains
585 * a residual count (that is, the number of bytes remianing in the
586 * reply that does not fit into the buffer. In this
587 * case b2f0_result = 5.
588 * Return: Return code from CP IUCV call.
589 * (-EINVAL) - Buffer address is NULL.
590*/
591int iucv_reply_array (u16 pathid,
592 u32 msgid,
593 u32 trgcls,
594 int flags1,
595 iucv_array_t * buffer,
596 ulong buflen, ulong * residual_address,
597 ulong * residual_length);
598
599/*
600 * Name: iucv_reply_prmmsg
601 * Purpose: This function responds to the two-way messages that you
602 * receive. You must identify completely the message to
603 * which you wish to reply. ie, pathid, msgid, and trgcls.
604 * Prmmsg signifies the data is moved into the
605 * parameter list.
606 * Input: pathid - Path identification number.
607 * msgid - Specifies the message ID.
608 * trgcls - Specifies target class.
609 * flags1 - Option for path.
610 * IPPRTY- 0x20 Specifies if you want to send priority message.
611 * prmmsg - 8-bytes of data to be placed into the parameter.
612 * list.
613 * Output: NA
614 * Return: Return code from CP IUCV call.
615*/
616int iucv_reply_prmmsg (u16 pathid,
617 u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]);
618
619/*
620 * Name: iucv_resume
621 * Purpose: This function restores communications over a quiesced path
622 * Input: pathid - Path identification number.
623 * user_data - 16-bytes of user data.
624 * Output: NA
625 * Return: Return code from CP IUCV call.
626*/
627int iucv_resume (u16 pathid, uchar user_data[16]);
628
629/*
630 * Name: iucv_send
631 * Purpose: This function transmits data to another application.
632 * Data to be transmitted is in a buffer and this is a
633 * one-way message and the receiver will not reply to the
634 * message.
635 * Input: pathid - Path identification number.
636 * trgcls - Specifies target class.
637 * srccls - Specifies the source message class.
638 * msgtag - Specifies a tag to be associated with the message.
639 * flags1 - Option for path.
640 * IPPRTY- 0x20 Specifies if you want to send priority message.
641 * buffer - Address of send buffer.
642 * buflen - Length of send buffer.
643 * Output: msgid - Specifies the message ID.
644 * Return: Return code from CP IUCV call.
645 * (-EINVAL) - Buffer address is NULL.
646*/
647int iucv_send (u16 pathid,
648 u32 * msgid,
649 u32 trgcls,
650 u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen);
651
652/*
653 * Name: iucv_send_array
654 * Purpose: This function transmits data to another application.
655 * The contents of buffer is the address of the array of
656 * addresses and lengths of discontiguous buffers that hold
657 * the message text. This is a one-way message and the
658 * receiver will not reply to the message.
659 * Input: pathid - Path identification number.
660 * trgcls - Specifies target class.
661 * srccls - Specifies the source message class.
662 * msgtag - Specifies a tag to be associated witht the message.
663 * flags1 - Option for path.
664 * IPPRTY- specifies if you want to send priority message.
665 * buffer - Address of array of send buffers.
666 * buflen - Total length of send buffers.
667 * Output: msgid - Specifies the message ID.
668 * Return: Return code from CP IUCV call.
669 * (-EINVAL) - Buffer address is NULL.
670*/
671int iucv_send_array (u16 pathid,
672 u32 * msgid,
673 u32 trgcls,
674 u32 srccls,
675 u32 msgtag,
676 int flags1, iucv_array_t * buffer, ulong buflen);
677
678/*
679 * Name: iucv_send_prmmsg
680 * Purpose: This function transmits data to another application.
681 * Prmmsg specifies that the 8-bytes of data are to be moved
682 * into the parameter list. This is a one-way message and the
683 * receiver will not reply to the message.
684 * Input: pathid - Path identification number.
685 * trgcls - Specifies target class.
686 * srccls - Specifies the source message class.
687 * msgtag - Specifies a tag to be associated with the message.
688 * flags1 - Option for path.
689 * IPPRTY- 0x20 specifies if you want to send priority message.
690 * prmmsg - 8-bytes of data to be placed into parameter list.
691 * Output: msgid - Specifies the message ID.
692 * Return: Return code from CP IUCV call.
693*/
694int iucv_send_prmmsg (u16 pathid,
695 u32 * msgid,
696 u32 trgcls,
697 u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]);
698
699/*
700 * Name: iucv_send2way
701 * Purpose: This function transmits data to another application.
702 * Data to be transmitted is in a buffer. The receiver
703 * of the send is expected to reply to the message and
704 * a buffer is provided into which IUCV moves the reply
705 * to this message.
706 * Input: pathid - Path identification number.
707 * trgcls - Specifies target class.
708 * srccls - Specifies the source message class.
709 * msgtag - Specifies a tag associated with the message.
710 * flags1 - Option for path.
711 * IPPRTY- 0x20 Specifies if you want to send priority message.
712 * buffer - Address of send buffer.
713 * buflen - Length of send buffer.
714 * ansbuf - Address of buffer into which IUCV moves the reply of
715 * this message.
716 * anslen - Address of length of buffer.
717 * Output: msgid - Specifies the message ID.
718 * Return: Return code from CP IUCV call.
719 * (-EINVAL) - Buffer or ansbuf address is NULL.
720*/
721int iucv_send2way (u16 pathid,
722 u32 * msgid,
723 u32 trgcls,
724 u32 srccls,
725 u32 msgtag,
726 int flags1,
727 void *buffer, ulong buflen, void *ansbuf, ulong anslen);
728
729/*
730 * Name: iucv_send2way_array
731 * Purpose: This function transmits data to another application.
732 * The contents of buffer is the address of the array of
733 * addresses and lengths of discontiguous buffers that hold
734 * the message text. The receiver of the send is expected to
735 * reply to the message and a buffer is provided into which
736 * IUCV moves the reply to this message.
737 * Input: pathid - Path identification number.
738 * trgcls - Specifies target class.
739 * srccls - Specifies the source message class.
740 * msgtag - Specifies a tag to be associated with the message.
741 * flags1 - Option for path.
742 * IPPRTY- 0x20 Specifies if you want to send priority message.
743 * buffer - Sddress of array of send buffers.
744 * buflen - Total length of send buffers.
745 * ansbuf - Address of array of buffer into which IUCV moves the reply
746 * of this message.
747 * anslen - Address of length reply buffers.
748 * Output: msgid - Specifies the message ID.
749 * Return: Return code from CP IUCV call.
750 * (-EINVAL) - Buffer address is NULL.
751*/
752int iucv_send2way_array (u16 pathid,
753 u32 * msgid,
754 u32 trgcls,
755 u32 srccls,
756 u32 msgtag,
757 int flags1,
758 iucv_array_t * buffer,
759 ulong buflen, iucv_array_t * ansbuf, ulong anslen);
760
761/*
762 * Name: iucv_send2way_prmmsg
763 * Purpose: This function transmits data to another application.
764 * Prmmsg specifies that the 8-bytes of data are to be moved
765 * into the parameter list. This is a two-way message and the
766 * receiver of the message is expected to reply. A buffer
767 * is provided into which IUCV moves the reply to this
768 * message.
769 * Input: pathid - Rath identification number.
770 * trgcls - Specifies target class.
771 * srccls - Specifies the source message class.
772 * msgtag - Specifies a tag to be associated with the message.
773 * flags1 - Option for path.
774 * IPPRTY- 0x20 Specifies if you want to send priority message.
775 * prmmsg - 8-bytes of data to be placed in parameter list.
776 * ansbuf - Address of buffer into which IUCV moves the reply of
777 * this message.
778 * anslen - Address of length of buffer.
779 * Output: msgid - Specifies the message ID.
780 * Return: Return code from CP IUCV call.
781 * (-EINVAL) - Buffer address is NULL.
782*/
783int iucv_send2way_prmmsg (u16 pathid,
784 u32 * msgid,
785 u32 trgcls,
786 u32 srccls,
787 u32 msgtag,
788 ulong flags1,
789 uchar prmmsg[8], void *ansbuf, ulong anslen);
790
791/*
792 * Name: iucv_send2way_prmmsg_array
793 * Purpose: This function transmits data to another application.
794 * Prmmsg specifies that the 8-bytes of data are to be moved
795 * into the parameter list. This is a two-way message and the
796 * receiver of the message is expected to reply. A buffer
797 * is provided into which IUCV moves the reply to this
798 * message. The contents of ansbuf is the address of the
799 * array of addresses and lengths of discontiguous buffers
800 * that contain the reply.
801 * Input: pathid - Path identification number.
802 * trgcls - Specifies target class.
803 * srccls - Specifies the source message class.
804 * msgtag - Specifies a tag to be associated with the message.
805 * flags1 - Option for path.
806 * IPPRTY- 0x20 specifies if you want to send priority message.
807 * prmmsg - 8-bytes of data to be placed into the parameter list.
808 * ansbuf - Address of array of buffer into which IUCV moves the reply
809 * of this message.
810 * anslen - Address of length of reply buffers.
811 * Output: msgid - Specifies the message ID.
812 * Return: Return code from CP IUCV call.
813 * (-EINVAL) - Ansbuf address is NULL.
814*/
815int iucv_send2way_prmmsg_array (u16 pathid,
816 u32 * msgid,
817 u32 trgcls,
818 u32 srccls,
819 u32 msgtag,
820 int flags1,
821 uchar prmmsg[8],
822 iucv_array_t * ansbuf, ulong anslen);
823
824/*
825 * Name: iucv_setmask
826 * Purpose: This function enables or disables the following IUCV
827 * external interruptions: Nonpriority and priority message
828 * interrupts, nonpriority and priority reply interrupts.
829 * Input: SetMaskFlag - options for interrupts
830 * 0x80 - Nonpriority_MessagePendingInterruptsFlag
831 * 0x40 - Priority_MessagePendingInterruptsFlag
832 * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
833 * 0x10 - Priority_MessageCompletionInterruptsFlag
834 * 0x08 - IUCVControlInterruptsFlag
835 * Output: NA
836 * Return: Return code from CP IUCV call.
837*/
838int iucv_setmask (int SetMaskFlag);
839
840/*
841 * Name: iucv_sever
842 * Purpose: This function terminates an IUCV path.
843 * Input: pathid - Path identification number.
844 * user_data - 16-bytes of user data.
845 * Output: NA
846 * Return: Return code from CP IUCV call.
847 * (-EINVAL) - Interal error, wild pointer.
848*/
849int iucv_sever (u16 pathid, uchar user_data[16]);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 3346088f47e0..6387b483f2bf 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * IUCV network driver 2 * IUCV network driver
3 * 3 *
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation 4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) 5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * 6 *
7 * Sysfs integration and all bugs therein by Cornelia Huck 7 * Sysfs integration and all bugs therein by Cornelia Huck
@@ -58,13 +58,94 @@
58#include <asm/io.h> 58#include <asm/io.h>
59#include <asm/uaccess.h> 59#include <asm/uaccess.h>
60 60
61#include "iucv.h" 61#include <net/iucv/iucv.h>
62#include "fsm.h" 62#include "fsm.h"
63 63
64MODULE_AUTHOR 64MODULE_AUTHOR
65 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); 65 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
66MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); 66MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
67 67
68/**
69 * Debug Facility stuff
70 */
71#define IUCV_DBF_SETUP_NAME "iucv_setup"
72#define IUCV_DBF_SETUP_LEN 32
73#define IUCV_DBF_SETUP_PAGES 2
74#define IUCV_DBF_SETUP_NR_AREAS 1
75#define IUCV_DBF_SETUP_LEVEL 3
76
77#define IUCV_DBF_DATA_NAME "iucv_data"
78#define IUCV_DBF_DATA_LEN 128
79#define IUCV_DBF_DATA_PAGES 2
80#define IUCV_DBF_DATA_NR_AREAS 1
81#define IUCV_DBF_DATA_LEVEL 2
82
83#define IUCV_DBF_TRACE_NAME "iucv_trace"
84#define IUCV_DBF_TRACE_LEN 16
85#define IUCV_DBF_TRACE_PAGES 4
86#define IUCV_DBF_TRACE_NR_AREAS 1
87#define IUCV_DBF_TRACE_LEVEL 3
88
89#define IUCV_DBF_TEXT(name,level,text) \
90 do { \
91 debug_text_event(iucv_dbf_##name,level,text); \
92 } while (0)
93
94#define IUCV_DBF_HEX(name,level,addr,len) \
95 do { \
96 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
97 } while (0)
98
99DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
100
101#define IUCV_DBF_TEXT_(name,level,text...) \
102 do { \
103 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
104 sprintf(iucv_dbf_txt_buf, text); \
105 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
106 put_cpu_var(iucv_dbf_txt_buf); \
107 } while (0)
108
109#define IUCV_DBF_SPRINTF(name,level,text...) \
110 do { \
111 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
112 debug_sprintf_event(iucv_dbf_trace, level, text ); \
113 } while (0)
114
115/**
116 * some more debug stuff
117 */
118#define IUCV_HEXDUMP16(importance,header,ptr) \
119PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
120 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
121 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
122 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
123 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
124 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
125 *(((char*)ptr)+12),*(((char*)ptr)+13), \
126 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
127PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
128 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
129 *(((char*)ptr)+16),*(((char*)ptr)+17), \
130 *(((char*)ptr)+18),*(((char*)ptr)+19), \
131 *(((char*)ptr)+20),*(((char*)ptr)+21), \
132 *(((char*)ptr)+22),*(((char*)ptr)+23), \
133 *(((char*)ptr)+24),*(((char*)ptr)+25), \
134 *(((char*)ptr)+26),*(((char*)ptr)+27), \
135 *(((char*)ptr)+28),*(((char*)ptr)+29), \
136 *(((char*)ptr)+30),*(((char*)ptr)+31));
137
138static inline void iucv_hex_dump(unsigned char *buf, size_t len)
139{
140 size_t i;
141
142 for (i = 0; i < len; i++) {
143 if (i && !(i % 16))
144 printk("\n");
145 printk("%02x ", *(buf + i));
146 }
147 printk("\n");
148}
68 149
69#define PRINTK_HEADER " iucv: " /* for debugging */ 150#define PRINTK_HEADER " iucv: " /* for debugging */
70 151
@@ -73,6 +154,25 @@ static struct device_driver netiucv_driver = {
73 .bus = &iucv_bus, 154 .bus = &iucv_bus,
74}; 155};
75 156
157static int netiucv_callback_connreq(struct iucv_path *,
158 u8 ipvmid[8], u8 ipuser[16]);
159static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
160static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
161static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
162static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
163static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
164static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
165
166static struct iucv_handler netiucv_handler = {
167 .path_pending = netiucv_callback_connreq,
168 .path_complete = netiucv_callback_connack,
169 .path_severed = netiucv_callback_connrej,
170 .path_quiesced = netiucv_callback_connsusp,
171 .path_resumed = netiucv_callback_connres,
172 .message_pending = netiucv_callback_rx,
173 .message_complete = netiucv_callback_txdone
174};
175
76/** 176/**
77 * Per connection profiling data 177 * Per connection profiling data
78 */ 178 */
@@ -92,9 +192,8 @@ struct connection_profile {
92 * Representation of one iucv connection 192 * Representation of one iucv connection
93 */ 193 */
94struct iucv_connection { 194struct iucv_connection {
95 struct iucv_connection *next; 195 struct list_head list;
96 iucv_handle_t handle; 196 struct iucv_path *path;
97 __u16 pathid;
98 struct sk_buff *rx_buff; 197 struct sk_buff *rx_buff;
99 struct sk_buff *tx_buff; 198 struct sk_buff *tx_buff;
100 struct sk_buff_head collect_queue; 199 struct sk_buff_head collect_queue;
@@ -112,12 +211,9 @@ struct iucv_connection {
112/** 211/**
113 * Linked list of all connection structs. 212 * Linked list of all connection structs.
114 */ 213 */
115struct iucv_connection_struct { 214static struct list_head iucv_connection_list =
116 struct iucv_connection *iucv_connections; 215 LIST_HEAD_INIT(iucv_connection_list);
117 rwlock_t iucv_rwlock; 216static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED;
118};
119
120static struct iucv_connection_struct iucv_conns;
121 217
122/** 218/**
123 * Representation of event-data for the 219 * Representation of event-data for the
@@ -142,11 +238,11 @@ struct netiucv_priv {
142/** 238/**
143 * Link level header for a packet. 239 * Link level header for a packet.
144 */ 240 */
145typedef struct ll_header_t { 241struct ll_header {
146 __u16 next; 242 u16 next;
147} ll_header; 243};
148 244
149#define NETIUCV_HDRLEN (sizeof(ll_header)) 245#define NETIUCV_HDRLEN (sizeof(struct ll_header))
150#define NETIUCV_BUFSIZE_MAX 32768 246#define NETIUCV_BUFSIZE_MAX 32768
151#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX 247#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
152#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) 248#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
@@ -158,36 +254,26 @@ typedef struct ll_header_t {
158 * Compatibility macros for busy handling 254 * Compatibility macros for busy handling
159 * of network devices. 255 * of network devices.
160 */ 256 */
161static __inline__ void netiucv_clear_busy(struct net_device *dev) 257static inline void netiucv_clear_busy(struct net_device *dev)
162{ 258{
163 clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy)); 259 struct netiucv_priv *priv = netdev_priv(dev);
260 clear_bit(0, &priv->tbusy);
164 netif_wake_queue(dev); 261 netif_wake_queue(dev);
165} 262}
166 263
167static __inline__ int netiucv_test_and_set_busy(struct net_device *dev) 264static inline int netiucv_test_and_set_busy(struct net_device *dev)
168{ 265{
266 struct netiucv_priv *priv = netdev_priv(dev);
169 netif_stop_queue(dev); 267 netif_stop_queue(dev);
170 return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy); 268 return test_and_set_bit(0, &priv->tbusy);
171} 269}
172 270
173static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 271static u8 iucvMagic[16] = {
174static __u8 iucvMagic[16] = {
175 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 272 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
176 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 273 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
177}; 274};
178 275
179/** 276/**
180 * This mask means the 16-byte IUCV "magic" and the origin userid must
181 * match exactly as specified in order to give connection_pending()
182 * control.
183 */
184static __u8 netiucv_mask[] = {
185 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
186 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
187 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
188};
189
190/**
191 * Convert an iucv userId to its printable 277 * Convert an iucv userId to its printable
192 * form (strip whitespace at end). 278 * form (strip whitespace at end).
193 * 279 *
@@ -195,8 +281,7 @@ static __u8 netiucv_mask[] = {
195 * 281 *
196 * @returns The printable string (static data!!) 282 * @returns The printable string (static data!!)
197 */ 283 */
198static __inline__ char * 284static inline char *netiucv_printname(char *name)
199netiucv_printname(char *name)
200{ 285{
201 static char tmp[9]; 286 static char tmp[9];
202 char *p = tmp; 287 char *p = tmp;
@@ -379,8 +464,7 @@ static debug_info_t *iucv_dbf_trace = NULL;
379 464
380DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); 465DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
381 466
382static void 467static void iucv_unregister_dbf_views(void)
383iucv_unregister_dbf_views(void)
384{ 468{
385 if (iucv_dbf_setup) 469 if (iucv_dbf_setup)
386 debug_unregister(iucv_dbf_setup); 470 debug_unregister(iucv_dbf_setup);
@@ -389,8 +473,7 @@ iucv_unregister_dbf_views(void)
389 if (iucv_dbf_trace) 473 if (iucv_dbf_trace)
390 debug_unregister(iucv_dbf_trace); 474 debug_unregister(iucv_dbf_trace);
391} 475}
392static int 476static int iucv_register_dbf_views(void)
393iucv_register_dbf_views(void)
394{ 477{
395 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, 478 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
396 IUCV_DBF_SETUP_PAGES, 479 IUCV_DBF_SETUP_PAGES,
@@ -422,125 +505,111 @@ iucv_register_dbf_views(void)
422 return 0; 505 return 0;
423} 506}
424 507
425/** 508/*
426 * Callback-wrappers, called from lowlevel iucv layer. 509 * Callback-wrappers, called from lowlevel iucv layer.
427 *****************************************************************************/ 510 */
428 511
429static void 512static void netiucv_callback_rx(struct iucv_path *path,
430netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data) 513 struct iucv_message *msg)
431{ 514{
432 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 515 struct iucv_connection *conn = path->private;
433 struct iucv_event ev; 516 struct iucv_event ev;
434 517
435 ev.conn = conn; 518 ev.conn = conn;
436 ev.data = (void *)eib; 519 ev.data = msg;
437
438 fsm_event(conn->fsm, CONN_EVENT_RX, &ev); 520 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
439} 521}
440 522
441static void 523static void netiucv_callback_txdone(struct iucv_path *path,
442netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data) 524 struct iucv_message *msg)
443{ 525{
444 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 526 struct iucv_connection *conn = path->private;
445 struct iucv_event ev; 527 struct iucv_event ev;
446 528
447 ev.conn = conn; 529 ev.conn = conn;
448 ev.data = (void *)eib; 530 ev.data = msg;
449 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); 531 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
450} 532}
451 533
452static void 534static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
453netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data)
454{ 535{
455 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 536 struct iucv_connection *conn = path->private;
456 struct iucv_event ev;
457 537
458 ev.conn = conn; 538 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
459 ev.data = (void *)eib;
460 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev);
461} 539}
462 540
463static void 541static int netiucv_callback_connreq(struct iucv_path *path,
464netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data) 542 u8 ipvmid[8], u8 ipuser[16])
465{ 543{
466 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 544 struct iucv_connection *conn = path->private;
467 struct iucv_event ev; 545 struct iucv_event ev;
546 int rc;
468 547
469 ev.conn = conn; 548 if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
470 ev.data = (void *)eib; 549 /* ipuser must match iucvMagic. */
471 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); 550 return -EINVAL;
551 rc = -EINVAL;
552 read_lock_bh(&iucv_connection_rwlock);
553 list_for_each_entry(conn, &iucv_connection_list, list) {
554 if (strncmp(ipvmid, conn->userid, 8))
555 continue;
556 /* Found a matching connection for this path. */
557 conn->path = path;
558 ev.conn = conn;
559 ev.data = path;
560 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
561 rc = 0;
562 }
563 read_unlock_bh(&iucv_connection_rwlock);
564 return rc;
472} 565}
473 566
474static void 567static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
475netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data)
476{ 568{
477 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 569 struct iucv_connection *conn = path->private;
478 struct iucv_event ev;
479 570
480 ev.conn = conn; 571 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
481 ev.data = (void *)eib;
482 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev);
483} 572}
484 573
485static void 574static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
486netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data)
487{ 575{
488 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 576 struct iucv_connection *conn = path->private;
489 struct iucv_event ev;
490 577
491 ev.conn = conn; 578 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
492 ev.data = (void *)eib;
493 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev);
494} 579}
495 580
496static void 581static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
497netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data)
498{ 582{
499 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 583 struct iucv_connection *conn = path->private;
500 struct iucv_event ev;
501 584
502 ev.conn = conn; 585 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
503 ev.data = (void *)eib; 586}
504 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev);
505}
506
507static iucv_interrupt_ops_t netiucv_ops = {
508 .ConnectionPending = netiucv_callback_connreq,
509 .ConnectionComplete = netiucv_callback_connack,
510 .ConnectionSevered = netiucv_callback_connrej,
511 .ConnectionQuiesced = netiucv_callback_connsusp,
512 .ConnectionResumed = netiucv_callback_connres,
513 .MessagePending = netiucv_callback_rx,
514 .MessageComplete = netiucv_callback_txdone
515};
516 587
517/** 588/**
518 * Dummy NOP action for all statemachines 589 * Dummy NOP action for all statemachines
519 */ 590 */
520static void 591static void fsm_action_nop(fsm_instance *fi, int event, void *arg)
521fsm_action_nop(fsm_instance *fi, int event, void *arg)
522{ 592{
523} 593}
524 594
525/** 595/*
526 * Actions of the connection statemachine 596 * Actions of the connection statemachine
527 *****************************************************************************/ 597 */
528 598
529/** 599/**
530 * Helper function for conn_action_rx() 600 * netiucv_unpack_skb
531 * Unpack a just received skb and hand it over to 601 * @conn: The connection where this skb has been received.
532 * upper layers. 602 * @pskb: The received skb.
533 * 603 *
534 * @param conn The connection where this skb has been received. 604 * Unpack a just received skb and hand it over to upper layers.
535 * @param pskb The received skb. 605 * Helper function for conn_action_rx.
536 */ 606 */
537//static __inline__ void 607static void netiucv_unpack_skb(struct iucv_connection *conn,
538static void 608 struct sk_buff *pskb)
539netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
540{ 609{
541 struct net_device *dev = conn->netdev; 610 struct net_device *dev = conn->netdev;
542 struct netiucv_priv *privptr = dev->priv; 611 struct netiucv_priv *privptr = netdev_priv(dev);
543 __u16 offset = 0; 612 u16 offset = 0;
544 613
545 skb_put(pskb, NETIUCV_HDRLEN); 614 skb_put(pskb, NETIUCV_HDRLEN);
546 pskb->dev = dev; 615 pskb->dev = dev;
@@ -549,7 +618,7 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
549 618
550 while (1) { 619 while (1) {
551 struct sk_buff *skb; 620 struct sk_buff *skb;
552 ll_header *header = (ll_header *)pskb->data; 621 struct ll_header *header = (struct ll_header *) pskb->data;
553 622
554 if (!header->next) 623 if (!header->next)
555 break; 624 break;
@@ -595,40 +664,37 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
595 } 664 }
596} 665}
597 666
598static void 667static void conn_action_rx(fsm_instance *fi, int event, void *arg)
599conn_action_rx(fsm_instance *fi, int event, void *arg)
600{ 668{
601 struct iucv_event *ev = (struct iucv_event *)arg; 669 struct iucv_event *ev = arg;
602 struct iucv_connection *conn = ev->conn; 670 struct iucv_connection *conn = ev->conn;
603 iucv_MessagePending *eib = (iucv_MessagePending *)ev->data; 671 struct iucv_message *msg = ev->data;
604 struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv; 672 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
605
606 __u32 msglen = eib->ln1msg2.ipbfln1f;
607 int rc; 673 int rc;
608 674
609 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 675 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
610 676
611 if (!conn->netdev) { 677 if (!conn->netdev) {
612 /* FRITZ: How to tell iucv LL to drop the msg? */ 678 iucv_message_reject(conn->path, msg);
613 PRINT_WARN("Received data for unlinked connection\n"); 679 PRINT_WARN("Received data for unlinked connection\n");
614 IUCV_DBF_TEXT(data, 2, 680 IUCV_DBF_TEXT(data, 2,
615 "Received data for unlinked connection\n"); 681 "Received data for unlinked connection\n");
616 return; 682 return;
617 } 683 }
618 if (msglen > conn->max_buffsize) { 684 if (msg->length > conn->max_buffsize) {
619 /* FRITZ: How to tell iucv LL to drop the msg? */ 685 iucv_message_reject(conn->path, msg);
620 privptr->stats.rx_dropped++; 686 privptr->stats.rx_dropped++;
621 PRINT_WARN("msglen %d > max_buffsize %d\n", 687 PRINT_WARN("msglen %d > max_buffsize %d\n",
622 msglen, conn->max_buffsize); 688 msg->length, conn->max_buffsize);
623 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", 689 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
624 msglen, conn->max_buffsize); 690 msg->length, conn->max_buffsize);
625 return; 691 return;
626 } 692 }
627 conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; 693 conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
628 conn->rx_buff->len = 0; 694 conn->rx_buff->len = 0;
629 rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls, 695 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
630 conn->rx_buff->data, msglen, NULL, NULL, NULL); 696 msg->length, NULL);
631 if (rc || msglen < 5) { 697 if (rc || msg->length < 5) {
632 privptr->stats.rx_errors++; 698 privptr->stats.rx_errors++;
633 PRINT_WARN("iucv_receive returned %08x\n", rc); 699 PRINT_WARN("iucv_receive returned %08x\n", rc);
634 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); 700 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
@@ -637,26 +703,26 @@ conn_action_rx(fsm_instance *fi, int event, void *arg)
637 netiucv_unpack_skb(conn, conn->rx_buff); 703 netiucv_unpack_skb(conn, conn->rx_buff);
638} 704}
639 705
640static void 706static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
641conn_action_txdone(fsm_instance *fi, int event, void *arg)
642{ 707{
643 struct iucv_event *ev = (struct iucv_event *)arg; 708 struct iucv_event *ev = arg;
644 struct iucv_connection *conn = ev->conn; 709 struct iucv_connection *conn = ev->conn;
645 iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data; 710 struct iucv_message *msg = ev->data;
711 struct iucv_message txmsg;
646 struct netiucv_priv *privptr = NULL; 712 struct netiucv_priv *privptr = NULL;
647 /* Shut up, gcc! skb is always below 2G. */ 713 u32 single_flag = msg->tag;
648 __u32 single_flag = eib->ipmsgtag; 714 u32 txbytes = 0;
649 __u32 txbytes = 0; 715 u32 txpackets = 0;
650 __u32 txpackets = 0; 716 u32 stat_maxcq = 0;
651 __u32 stat_maxcq = 0;
652 struct sk_buff *skb; 717 struct sk_buff *skb;
653 unsigned long saveflags; 718 unsigned long saveflags;
654 ll_header header; 719 struct ll_header header;
720 int rc;
655 721
656 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 722 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
657 723
658 if (conn && conn->netdev && conn->netdev->priv) 724 if (conn && conn->netdev)
659 privptr = (struct netiucv_priv *)conn->netdev->priv; 725 privptr = netdev_priv(conn->netdev);
660 conn->prof.tx_pending--; 726 conn->prof.tx_pending--;
661 if (single_flag) { 727 if (single_flag) {
662 if ((skb = skb_dequeue(&conn->commit_queue))) { 728 if ((skb = skb_dequeue(&conn->commit_queue))) {
@@ -688,56 +754,55 @@ conn_action_txdone(fsm_instance *fi, int event, void *arg)
688 conn->prof.maxmulti = conn->collect_len; 754 conn->prof.maxmulti = conn->collect_len;
689 conn->collect_len = 0; 755 conn->collect_len = 0;
690 spin_unlock_irqrestore(&conn->collect_lock, saveflags); 756 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
691 if (conn->tx_buff->len) { 757 if (conn->tx_buff->len == 0) {
692 int rc; 758 fsm_newstate(fi, CONN_STATE_IDLE);
693 759 return;
694 header.next = 0; 760 }
695 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
696 NETIUCV_HDRLEN);
697 761
698 conn->prof.send_stamp = xtime; 762 header.next = 0;
699 rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0, 763 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
764 conn->prof.send_stamp = xtime;
765 txmsg.class = 0;
766 txmsg.tag = 0;
767 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
700 conn->tx_buff->data, conn->tx_buff->len); 768 conn->tx_buff->data, conn->tx_buff->len);
701 conn->prof.doios_multi++; 769 conn->prof.doios_multi++;
702 conn->prof.txlen += conn->tx_buff->len; 770 conn->prof.txlen += conn->tx_buff->len;
703 conn->prof.tx_pending++; 771 conn->prof.tx_pending++;
704 if (conn->prof.tx_pending > conn->prof.tx_max_pending) 772 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
705 conn->prof.tx_max_pending = conn->prof.tx_pending; 773 conn->prof.tx_max_pending = conn->prof.tx_pending;
706 if (rc) { 774 if (rc) {
707 conn->prof.tx_pending--; 775 conn->prof.tx_pending--;
708 fsm_newstate(fi, CONN_STATE_IDLE);
709 if (privptr)
710 privptr->stats.tx_errors += txpackets;
711 PRINT_WARN("iucv_send returned %08x\n", rc);
712 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
713 } else {
714 if (privptr) {
715 privptr->stats.tx_packets += txpackets;
716 privptr->stats.tx_bytes += txbytes;
717 }
718 if (stat_maxcq > conn->prof.maxcqueue)
719 conn->prof.maxcqueue = stat_maxcq;
720 }
721 } else
722 fsm_newstate(fi, CONN_STATE_IDLE); 776 fsm_newstate(fi, CONN_STATE_IDLE);
777 if (privptr)
778 privptr->stats.tx_errors += txpackets;
779 PRINT_WARN("iucv_send returned %08x\n", rc);
780 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
781 } else {
782 if (privptr) {
783 privptr->stats.tx_packets += txpackets;
784 privptr->stats.tx_bytes += txbytes;
785 }
786 if (stat_maxcq > conn->prof.maxcqueue)
787 conn->prof.maxcqueue = stat_maxcq;
788 }
723} 789}
724 790
725static void 791static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
726conn_action_connaccept(fsm_instance *fi, int event, void *arg)
727{ 792{
728 struct iucv_event *ev = (struct iucv_event *)arg; 793 struct iucv_event *ev = arg;
729 struct iucv_connection *conn = ev->conn; 794 struct iucv_connection *conn = ev->conn;
730 iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data; 795 struct iucv_path *path = ev->data;
731 struct net_device *netdev = conn->netdev; 796 struct net_device *netdev = conn->netdev;
732 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; 797 struct netiucv_priv *privptr = netdev_priv(netdev);
733 int rc; 798 int rc;
734 __u16 msglimit;
735 __u8 udata[16];
736 799
737 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 800 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
738 801
739 rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0, 802 conn->path = path;
740 conn->handle, conn, NULL, &msglimit); 803 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
804 path->flags = 0;
805 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
741 if (rc) { 806 if (rc) {
742 PRINT_WARN("%s: IUCV accept failed with error %d\n", 807 PRINT_WARN("%s: IUCV accept failed with error %d\n",
743 netdev->name, rc); 808 netdev->name, rc);
@@ -745,183 +810,126 @@ conn_action_connaccept(fsm_instance *fi, int event, void *arg)
745 return; 810 return;
746 } 811 }
747 fsm_newstate(fi, CONN_STATE_IDLE); 812 fsm_newstate(fi, CONN_STATE_IDLE);
748 conn->pathid = eib->ippathid; 813 netdev->tx_queue_len = conn->path->msglim;
749 netdev->tx_queue_len = msglimit;
750 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); 814 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
751} 815}
752 816
753static void 817static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
754conn_action_connreject(fsm_instance *fi, int event, void *arg)
755{ 818{
756 struct iucv_event *ev = (struct iucv_event *)arg; 819 struct iucv_event *ev = arg;
757 struct iucv_connection *conn = ev->conn; 820 struct iucv_path *path = ev->data;
758 struct net_device *netdev = conn->netdev;
759 iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
760 __u8 udata[16];
761 821
762 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 822 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
763 823 iucv_path_sever(path, NULL);
764 iucv_sever(eib->ippathid, udata);
765 if (eib->ippathid != conn->pathid) {
766 PRINT_INFO("%s: IR Connection Pending; "
767 "pathid %d does not match original pathid %d\n",
768 netdev->name, eib->ippathid, conn->pathid);
769 IUCV_DBF_TEXT_(data, 2,
770 "connreject: IR pathid %d, conn. pathid %d\n",
771 eib->ippathid, conn->pathid);
772 iucv_sever(conn->pathid, udata);
773 }
774} 824}
775 825
776static void 826static void conn_action_connack(fsm_instance *fi, int event, void *arg)
777conn_action_connack(fsm_instance *fi, int event, void *arg)
778{ 827{
779 struct iucv_event *ev = (struct iucv_event *)arg; 828 struct iucv_connection *conn = arg;
780 struct iucv_connection *conn = ev->conn;
781 iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data;
782 struct net_device *netdev = conn->netdev; 829 struct net_device *netdev = conn->netdev;
783 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; 830 struct netiucv_priv *privptr = netdev_priv(netdev);
784 831
785 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 832 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
786
787 fsm_deltimer(&conn->timer); 833 fsm_deltimer(&conn->timer);
788 fsm_newstate(fi, CONN_STATE_IDLE); 834 fsm_newstate(fi, CONN_STATE_IDLE);
789 if (eib->ippathid != conn->pathid) { 835 netdev->tx_queue_len = conn->path->msglim;
790 PRINT_INFO("%s: IR Connection Complete; "
791 "pathid %d does not match original pathid %d\n",
792 netdev->name, eib->ippathid, conn->pathid);
793 IUCV_DBF_TEXT_(data, 2,
794 "connack: IR pathid %d, conn. pathid %d\n",
795 eib->ippathid, conn->pathid);
796 conn->pathid = eib->ippathid;
797 }
798 netdev->tx_queue_len = eib->ipmsglim;
799 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); 836 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
800} 837}
801 838
802static void 839static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
803conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
804{ 840{
805 struct iucv_connection *conn = (struct iucv_connection *)arg; 841 struct iucv_connection *conn = arg;
806 __u8 udata[16];
807 842
808 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 843 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
809
810 fsm_deltimer(&conn->timer); 844 fsm_deltimer(&conn->timer);
811 iucv_sever(conn->pathid, udata); 845 iucv_path_sever(conn->path, NULL);
812 fsm_newstate(fi, CONN_STATE_STARTWAIT); 846 fsm_newstate(fi, CONN_STATE_STARTWAIT);
813} 847}
814 848
815static void 849static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
816conn_action_connsever(fsm_instance *fi, int event, void *arg)
817{ 850{
818 struct iucv_event *ev = (struct iucv_event *)arg; 851 struct iucv_connection *conn = arg;
819 struct iucv_connection *conn = ev->conn;
820 struct net_device *netdev = conn->netdev; 852 struct net_device *netdev = conn->netdev;
821 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; 853 struct netiucv_priv *privptr = netdev_priv(netdev);
822 __u8 udata[16];
823 854
824 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 855 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
825 856
826 fsm_deltimer(&conn->timer); 857 fsm_deltimer(&conn->timer);
827 iucv_sever(conn->pathid, udata); 858 iucv_path_sever(conn->path, NULL);
828 PRINT_INFO("%s: Remote dropped connection\n", netdev->name); 859 PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
829 IUCV_DBF_TEXT(data, 2, 860 IUCV_DBF_TEXT(data, 2,
830 "conn_action_connsever: Remote dropped connection\n"); 861 "conn_action_connsever: Remote dropped connection\n");
831 fsm_newstate(fi, CONN_STATE_STARTWAIT); 862 fsm_newstate(fi, CONN_STATE_STARTWAIT);
832 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); 863 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
833} 864}
834 865
835static void 866static void conn_action_start(fsm_instance *fi, int event, void *arg)
836conn_action_start(fsm_instance *fi, int event, void *arg)
837{ 867{
838 struct iucv_event *ev = (struct iucv_event *)arg; 868 struct iucv_connection *conn = arg;
839 struct iucv_connection *conn = ev->conn;
840 __u16 msglimit;
841 int rc; 869 int rc;
842 870
843 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 871 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
844 872
845 if (!conn->handle) { 873 fsm_newstate(fi, CONN_STATE_STARTWAIT);
846 IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
847 conn->handle =
848 iucv_register_program(iucvMagic, conn->userid,
849 netiucv_mask,
850 &netiucv_ops, conn);
851 fsm_newstate(fi, CONN_STATE_STARTWAIT);
852 if (!conn->handle) {
853 fsm_newstate(fi, CONN_STATE_REGERR);
854 conn->handle = NULL;
855 IUCV_DBF_TEXT(setup, 2,
856 "NULL from iucv_register_program\n");
857 return;
858 }
859
860 PRINT_DEBUG("%s('%s'): registered successfully\n",
861 conn->netdev->name, conn->userid);
862 }
863
864 PRINT_DEBUG("%s('%s'): connecting ...\n", 874 PRINT_DEBUG("%s('%s'): connecting ...\n",
865 conn->netdev->name, conn->userid); 875 conn->netdev->name, conn->userid);
866 876
867 /* We must set the state before calling iucv_connect because the callback 877 /*
868 * handler could be called at any point after the connection request is 878 * We must set the state before calling iucv_connect because the
869 * sent */ 879 * callback handler could be called at any point after the connection
880 * request is sent
881 */
870 882
871 fsm_newstate(fi, CONN_STATE_SETUPWAIT); 883 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
872 rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic, 884 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
873 conn->userid, iucv_host, 0, NULL, &msglimit, 885 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
874 conn->handle, conn); 886 NULL, iucvMagic, conn);
875 switch (rc) { 887 switch (rc) {
876 case 0: 888 case 0:
877 conn->netdev->tx_queue_len = msglimit; 889 conn->netdev->tx_queue_len = conn->path->msglim;
878 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, 890 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
879 CONN_EVENT_TIMER, conn); 891 CONN_EVENT_TIMER, conn);
880 return; 892 return;
881 case 11: 893 case 11:
882 PRINT_INFO("%s: User %s is currently not available.\n", 894 PRINT_INFO("%s: User %s is currently not available.\n",
883 conn->netdev->name, 895 conn->netdev->name,
884 netiucv_printname(conn->userid)); 896 netiucv_printname(conn->userid));
885 fsm_newstate(fi, CONN_STATE_STARTWAIT); 897 fsm_newstate(fi, CONN_STATE_STARTWAIT);
886 return; 898 break;
887 case 12: 899 case 12:
888 PRINT_INFO("%s: User %s is currently not ready.\n", 900 PRINT_INFO("%s: User %s is currently not ready.\n",
889 conn->netdev->name, 901 conn->netdev->name,
890 netiucv_printname(conn->userid)); 902 netiucv_printname(conn->userid));
891 fsm_newstate(fi, CONN_STATE_STARTWAIT); 903 fsm_newstate(fi, CONN_STATE_STARTWAIT);
892 return; 904 break;
893 case 13: 905 case 13:
894 PRINT_WARN("%s: Too many IUCV connections.\n", 906 PRINT_WARN("%s: Too many IUCV connections.\n",
895 conn->netdev->name); 907 conn->netdev->name);
896 fsm_newstate(fi, CONN_STATE_CONNERR); 908 fsm_newstate(fi, CONN_STATE_CONNERR);
897 break; 909 break;
898 case 14: 910 case 14:
899 PRINT_WARN( 911 PRINT_WARN("%s: User %s has too many IUCV connections.\n",
900 "%s: User %s has too many IUCV connections.\n", 912 conn->netdev->name,
901 conn->netdev->name, 913 netiucv_printname(conn->userid));
902 netiucv_printname(conn->userid)); 914 fsm_newstate(fi, CONN_STATE_CONNERR);
903 fsm_newstate(fi, CONN_STATE_CONNERR); 915 break;
904 break; 916 case 15:
905 case 15: 917 PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
906 PRINT_WARN( 918 conn->netdev->name);
907 "%s: No IUCV authorization in CP directory.\n", 919 fsm_newstate(fi, CONN_STATE_CONNERR);
908 conn->netdev->name); 920 break;
909 fsm_newstate(fi, CONN_STATE_CONNERR); 921 default:
910 break; 922 PRINT_WARN("%s: iucv_connect returned error %d\n",
911 default: 923 conn->netdev->name, rc);
912 PRINT_WARN("%s: iucv_connect returned error %d\n", 924 fsm_newstate(fi, CONN_STATE_CONNERR);
913 conn->netdev->name, rc); 925 break;
914 fsm_newstate(fi, CONN_STATE_CONNERR);
915 break;
916 } 926 }
917 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); 927 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
918 IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); 928 kfree(conn->path);
919 iucv_unregister_program(conn->handle); 929 conn->path = NULL;
920 conn->handle = NULL;
921} 930}
922 931
923static void 932static void netiucv_purge_skb_queue(struct sk_buff_head *q)
924netiucv_purge_skb_queue(struct sk_buff_head *q)
925{ 933{
926 struct sk_buff *skb; 934 struct sk_buff *skb;
927 935
@@ -931,36 +939,34 @@ netiucv_purge_skb_queue(struct sk_buff_head *q)
931 } 939 }
932} 940}
933 941
934static void 942static void conn_action_stop(fsm_instance *fi, int event, void *arg)
935conn_action_stop(fsm_instance *fi, int event, void *arg)
936{ 943{
937 struct iucv_event *ev = (struct iucv_event *)arg; 944 struct iucv_event *ev = arg;
938 struct iucv_connection *conn = ev->conn; 945 struct iucv_connection *conn = ev->conn;
939 struct net_device *netdev = conn->netdev; 946 struct net_device *netdev = conn->netdev;
940 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; 947 struct netiucv_priv *privptr = netdev_priv(netdev);
941 948
942 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 949 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
943 950
944 fsm_deltimer(&conn->timer); 951 fsm_deltimer(&conn->timer);
945 fsm_newstate(fi, CONN_STATE_STOPPED); 952 fsm_newstate(fi, CONN_STATE_STOPPED);
946 netiucv_purge_skb_queue(&conn->collect_queue); 953 netiucv_purge_skb_queue(&conn->collect_queue);
947 if (conn->handle) 954 if (conn->path) {
948 IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); 955 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
949 iucv_unregister_program(conn->handle); 956 iucv_path_sever(conn->path, iucvMagic);
950 conn->handle = NULL; 957 kfree(conn->path);
958 conn->path = NULL;
959 }
951 netiucv_purge_skb_queue(&conn->commit_queue); 960 netiucv_purge_skb_queue(&conn->commit_queue);
952 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); 961 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
953} 962}
954 963
955static void 964static void conn_action_inval(fsm_instance *fi, int event, void *arg)
956conn_action_inval(fsm_instance *fi, int event, void *arg)
957{ 965{
958 struct iucv_event *ev = (struct iucv_event *)arg; 966 struct iucv_connection *conn = arg;
959 struct iucv_connection *conn = ev->conn;
960 struct net_device *netdev = conn->netdev; 967 struct net_device *netdev = conn->netdev;
961 968
962 PRINT_WARN("%s: Cannot connect without username\n", 969 PRINT_WARN("%s: Cannot connect without username\n", netdev->name);
963 netdev->name);
964 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); 970 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
965} 971}
966 972
@@ -999,29 +1005,27 @@ static const fsm_node conn_fsm[] = {
999static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); 1005static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1000 1006
1001 1007
1002/** 1008/*
1003 * Actions for interface - statemachine. 1009 * Actions for interface - statemachine.
1004 *****************************************************************************/ 1010 */
1005 1011
1006/** 1012/**
1007 * Startup connection by sending CONN_EVENT_START to it. 1013 * dev_action_start
1014 * @fi: An instance of an interface statemachine.
1015 * @event: The event, just happened.
1016 * @arg: Generic pointer, casted from struct net_device * upon call.
1008 * 1017 *
1009 * @param fi An instance of an interface statemachine. 1018 * Startup connection by sending CONN_EVENT_START to it.
1010 * @param event The event, just happened.
1011 * @param arg Generic pointer, casted from struct net_device * upon call.
1012 */ 1019 */
1013static void 1020static void dev_action_start(fsm_instance *fi, int event, void *arg)
1014dev_action_start(fsm_instance *fi, int event, void *arg)
1015{ 1021{
1016 struct net_device *dev = (struct net_device *)arg; 1022 struct net_device *dev = arg;
1017 struct netiucv_priv *privptr = dev->priv; 1023 struct netiucv_priv *privptr = netdev_priv(dev);
1018 struct iucv_event ev;
1019 1024
1020 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1025 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1021 1026
1022 ev.conn = privptr->conn;
1023 fsm_newstate(fi, DEV_STATE_STARTWAIT); 1027 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1024 fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev); 1028 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1025} 1029}
1026 1030
1027/** 1031/**
@@ -1034,8 +1038,8 @@ dev_action_start(fsm_instance *fi, int event, void *arg)
1034static void 1038static void
1035dev_action_stop(fsm_instance *fi, int event, void *arg) 1039dev_action_stop(fsm_instance *fi, int event, void *arg)
1036{ 1040{
1037 struct net_device *dev = (struct net_device *)arg; 1041 struct net_device *dev = arg;
1038 struct netiucv_priv *privptr = dev->priv; 1042 struct netiucv_priv *privptr = netdev_priv(dev);
1039 struct iucv_event ev; 1043 struct iucv_event ev;
1040 1044
1041 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1045 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
@@ -1057,8 +1061,8 @@ dev_action_stop(fsm_instance *fi, int event, void *arg)
1057static void 1061static void
1058dev_action_connup(fsm_instance *fi, int event, void *arg) 1062dev_action_connup(fsm_instance *fi, int event, void *arg)
1059{ 1063{
1060 struct net_device *dev = (struct net_device *)arg; 1064 struct net_device *dev = arg;
1061 struct netiucv_priv *privptr = dev->priv; 1065 struct netiucv_priv *privptr = netdev_priv(dev);
1062 1066
1063 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1067 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1064 1068
@@ -1131,11 +1135,13 @@ static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1131 * 1135 *
1132 * @return 0 on success, -ERRNO on failure. (Never fails.) 1136 * @return 0 on success, -ERRNO on failure. (Never fails.)
1133 */ 1137 */
1134static int 1138static int netiucv_transmit_skb(struct iucv_connection *conn,
1135netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { 1139 struct sk_buff *skb)
1140{
1141 struct iucv_message msg;
1136 unsigned long saveflags; 1142 unsigned long saveflags;
1137 ll_header header; 1143 struct ll_header header;
1138 int rc = 0; 1144 int rc;
1139 1145
1140 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { 1146 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1141 int l = skb->len + NETIUCV_HDRLEN; 1147 int l = skb->len + NETIUCV_HDRLEN;
@@ -1145,11 +1151,12 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1145 (conn->max_buffsize - NETIUCV_HDRLEN)) { 1151 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1146 rc = -EBUSY; 1152 rc = -EBUSY;
1147 IUCV_DBF_TEXT(data, 2, 1153 IUCV_DBF_TEXT(data, 2,
1148 "EBUSY from netiucv_transmit_skb\n"); 1154 "EBUSY from netiucv_transmit_skb\n");
1149 } else { 1155 } else {
1150 atomic_inc(&skb->users); 1156 atomic_inc(&skb->users);
1151 skb_queue_tail(&conn->collect_queue, skb); 1157 skb_queue_tail(&conn->collect_queue, skb);
1152 conn->collect_len += l; 1158 conn->collect_len += l;
1159 rc = 0;
1153 } 1160 }
1154 spin_unlock_irqrestore(&conn->collect_lock, saveflags); 1161 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1155 } else { 1162 } else {
@@ -1188,9 +1195,10 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1188 fsm_newstate(conn->fsm, CONN_STATE_TX); 1195 fsm_newstate(conn->fsm, CONN_STATE_TX);
1189 conn->prof.send_stamp = xtime; 1196 conn->prof.send_stamp = xtime;
1190 1197
1191 rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */, 1198 msg.tag = 1;
1192 0, nskb->data, nskb->len); 1199 msg.class = 0;
1193 /* Shut up, gcc! nskb is always below 2G. */ 1200 rc = iucv_message_send(conn->path, &msg, 0, 0,
1201 nskb->data, nskb->len);
1194 conn->prof.doios_single++; 1202 conn->prof.doios_single++;
1195 conn->prof.txlen += skb->len; 1203 conn->prof.txlen += skb->len;
1196 conn->prof.tx_pending++; 1204 conn->prof.tx_pending++;
@@ -1200,7 +1208,7 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1200 struct netiucv_priv *privptr; 1208 struct netiucv_priv *privptr;
1201 fsm_newstate(conn->fsm, CONN_STATE_IDLE); 1209 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1202 conn->prof.tx_pending--; 1210 conn->prof.tx_pending--;
1203 privptr = (struct netiucv_priv *)conn->netdev->priv; 1211 privptr = netdev_priv(conn->netdev);
1204 if (privptr) 1212 if (privptr)
1205 privptr->stats.tx_errors++; 1213 privptr->stats.tx_errors++;
1206 if (copied) 1214 if (copied)
@@ -1226,9 +1234,9 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1226 return rc; 1234 return rc;
1227} 1235}
1228 1236
1229/** 1237/*
1230 * Interface API for upper network layers 1238 * Interface API for upper network layers
1231 *****************************************************************************/ 1239 */
1232 1240
1233/** 1241/**
1234 * Open an interface. 1242 * Open an interface.
@@ -1238,9 +1246,11 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1238 * 1246 *
1239 * @return 0 on success, -ERRNO on failure. (Never fails.) 1247 * @return 0 on success, -ERRNO on failure. (Never fails.)
1240 */ 1248 */
1241static int 1249static int netiucv_open(struct net_device *dev)
1242netiucv_open(struct net_device *dev) { 1250{
1243 fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev); 1251 struct netiucv_priv *priv = netdev_priv(dev);
1252
1253 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1244 return 0; 1254 return 0;
1245} 1255}
1246 1256
@@ -1252,9 +1262,11 @@ netiucv_open(struct net_device *dev) {
1252 * 1262 *
1253 * @return 0 on success, -ERRNO on failure. (Never fails.) 1263 * @return 0 on success, -ERRNO on failure. (Never fails.)
1254 */ 1264 */
1255static int 1265static int netiucv_close(struct net_device *dev)
1256netiucv_close(struct net_device *dev) { 1266{
1257 fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev); 1267 struct netiucv_priv *priv = netdev_priv(dev);
1268
1269 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1258 return 0; 1270 return 0;
1259} 1271}
1260 1272
@@ -1271,8 +1283,8 @@ netiucv_close(struct net_device *dev) {
1271 */ 1283 */
1272static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) 1284static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1273{ 1285{
1274 int rc = 0; 1286 struct netiucv_priv *privptr = netdev_priv(dev);
1275 struct netiucv_priv *privptr = dev->priv; 1287 int rc;
1276 1288
1277 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1289 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1278 /** 1290 /**
@@ -1312,40 +1324,41 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1312 return -EBUSY; 1324 return -EBUSY;
1313 } 1325 }
1314 dev->trans_start = jiffies; 1326 dev->trans_start = jiffies;
1315 if (netiucv_transmit_skb(privptr->conn, skb)) 1327 rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
1316 rc = 1;
1317 netiucv_clear_busy(dev); 1328 netiucv_clear_busy(dev);
1318 return rc; 1329 return rc;
1319} 1330}
1320 1331
1321/** 1332/**
1322 * Returns interface statistics of a device. 1333 * netiucv_stats
1334 * @dev: Pointer to interface struct.
1323 * 1335 *
1324 * @param dev Pointer to interface struct. 1336 * Returns interface statistics of a device.
1325 * 1337 *
1326 * @return Pointer to stats struct of this interface. 1338 * Returns pointer to stats struct of this interface.
1327 */ 1339 */
1328static struct net_device_stats * 1340static struct net_device_stats *netiucv_stats (struct net_device * dev)
1329netiucv_stats (struct net_device * dev)
1330{ 1341{
1342 struct netiucv_priv *priv = netdev_priv(dev);
1343
1331 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1344 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1332 return &((struct netiucv_priv *)dev->priv)->stats; 1345 return &priv->stats;
1333} 1346}
1334 1347
1335/** 1348/**
1336 * Sets MTU of an interface. 1349 * netiucv_change_mtu
1350 * @dev: Pointer to interface struct.
1351 * @new_mtu: The new MTU to use for this interface.
1337 * 1352 *
1338 * @param dev Pointer to interface struct. 1353 * Sets MTU of an interface.
1339 * @param new_mtu The new MTU to use for this interface.
1340 * 1354 *
1341 * @return 0 on success, -EINVAL if MTU is out of valid range. 1355 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1342 * (valid range is 576 .. NETIUCV_MTU_MAX). 1356 * (valid range is 576 .. NETIUCV_MTU_MAX).
1343 */ 1357 */
1344static int 1358static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1345netiucv_change_mtu (struct net_device * dev, int new_mtu)
1346{ 1359{
1347 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1360 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1348 if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) { 1361 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1349 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); 1362 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1350 return -EINVAL; 1363 return -EINVAL;
1351 } 1364 }
@@ -1353,12 +1366,12 @@ netiucv_change_mtu (struct net_device * dev, int new_mtu)
1353 return 0; 1366 return 0;
1354} 1367}
1355 1368
1356/** 1369/*
1357 * attributes in sysfs 1370 * attributes in sysfs
1358 *****************************************************************************/ 1371 */
1359 1372
1360static ssize_t 1373static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1361user_show (struct device *dev, struct device_attribute *attr, char *buf) 1374 char *buf)
1362{ 1375{
1363 struct netiucv_priv *priv = dev->driver_data; 1376 struct netiucv_priv *priv = dev->driver_data;
1364 1377
@@ -1366,8 +1379,8 @@ user_show (struct device *dev, struct device_attribute *attr, char *buf)
1366 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); 1379 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1367} 1380}
1368 1381
1369static ssize_t 1382static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1370user_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1383 const char *buf, size_t count)
1371{ 1384{
1372 struct netiucv_priv *priv = dev->driver_data; 1385 struct netiucv_priv *priv = dev->driver_data;
1373 struct net_device *ndev = priv->conn->netdev; 1386 struct net_device *ndev = priv->conn->netdev;
@@ -1375,80 +1388,70 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf,
1375 char *tmp; 1388 char *tmp;
1376 char username[9]; 1389 char username[9];
1377 int i; 1390 int i;
1378 struct iucv_connection **clist = &iucv_conns.iucv_connections; 1391 struct iucv_connection *cp;
1379 unsigned long flags;
1380 1392
1381 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1393 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1382 if (count>9) { 1394 if (count > 9) {
1383 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); 1395 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1384 IUCV_DBF_TEXT_(setup, 2, 1396 IUCV_DBF_TEXT_(setup, 2,
1385 "%d is length of username\n", (int)count); 1397 "%d is length of username\n", (int) count);
1386 return -EINVAL; 1398 return -EINVAL;
1387 } 1399 }
1388 1400
1389 tmp = strsep((char **) &buf, "\n"); 1401 tmp = strsep((char **) &buf, "\n");
1390 for (i=0, p=tmp; i<8 && *p; i++, p++) { 1402 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1391 if (isalnum(*p) || (*p == '$')) 1403 if (isalnum(*p) || (*p == '$')) {
1392 username[i]= toupper(*p); 1404 username[i]= toupper(*p);
1393 else if (*p == '\n') { 1405 continue;
1406 }
1407 if (*p == '\n') {
1394 /* trailing lf, grr */ 1408 /* trailing lf, grr */
1395 break; 1409 break;
1396 } else {
1397 PRINT_WARN("netiucv: Invalid char %c in username!\n",
1398 *p);
1399 IUCV_DBF_TEXT_(setup, 2,
1400 "username: invalid character %c\n",
1401 *p);
1402 return -EINVAL;
1403 } 1410 }
1411 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1412 IUCV_DBF_TEXT_(setup, 2,
1413 "username: invalid character %c\n", *p);
1414 return -EINVAL;
1404 } 1415 }
1405 while (i<8) 1416 while (i < 8)
1406 username[i++] = ' '; 1417 username[i++] = ' ';
1407 username[8] = '\0'; 1418 username[8] = '\0';
1408 1419
1409 if (memcmp(username, priv->conn->userid, 9)) { 1420 if (memcmp(username, priv->conn->userid, 9) &&
1410 /* username changed */ 1421 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1411 if (ndev->flags & (IFF_UP | IFF_RUNNING)) { 1422 /* username changed while the interface is active. */
1412 PRINT_WARN( 1423 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1413 "netiucv: device %s active, connected to %s\n", 1424 dev->bus_id, priv->conn->userid);
1414 dev->bus_id, priv->conn->userid); 1425 PRINT_WARN("netiucv: user cannot be updated\n");
1415 PRINT_WARN("netiucv: user cannot be updated\n"); 1426 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1416 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); 1427 return -EBUSY;
1417 return -EBUSY; 1428 }
1429 read_lock_bh(&iucv_connection_rwlock);
1430 list_for_each_entry(cp, &iucv_connection_list, list) {
1431 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1432 read_unlock_bh(&iucv_connection_rwlock);
1433 PRINT_WARN("netiucv: Connection to %s already "
1434 "exists\n", username);
1435 return -EEXIST;
1418 } 1436 }
1419 } 1437 }
1420 read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); 1438 read_unlock_bh(&iucv_connection_rwlock);
1421 while (*clist) {
1422 if (!strncmp(username, (*clist)->userid, 9) ||
1423 ((*clist)->netdev != ndev))
1424 break;
1425 clist = &((*clist)->next);
1426 }
1427 read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
1428 if (*clist) {
1429 PRINT_WARN("netiucv: Connection to %s already exists\n",
1430 username);
1431 return -EEXIST;
1432 }
1433 memcpy(priv->conn->userid, username, 9); 1439 memcpy(priv->conn->userid, username, 9);
1434
1435 return count; 1440 return count;
1436
1437} 1441}
1438 1442
1439static DEVICE_ATTR(user, 0644, user_show, user_write); 1443static DEVICE_ATTR(user, 0644, user_show, user_write);
1440 1444
1441static ssize_t 1445static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1442buffer_show (struct device *dev, struct device_attribute *attr, char *buf) 1446 char *buf)
1443{ 1447{ struct netiucv_priv *priv = dev->driver_data;
1444 struct netiucv_priv *priv = dev->driver_data;
1445 1448
1446 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1449 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1447 return sprintf(buf, "%d\n", priv->conn->max_buffsize); 1450 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1448} 1451}
1449 1452
1450static ssize_t 1453static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1451buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1454 const char *buf, size_t count)
1452{ 1455{
1453 struct netiucv_priv *priv = dev->driver_data; 1456 struct netiucv_priv *priv = dev->driver_data;
1454 struct net_device *ndev = priv->conn->netdev; 1457 struct net_device *ndev = priv->conn->netdev;
@@ -1502,8 +1505,8 @@ buffer_write (struct device *dev, struct device_attribute *attr, const char *buf
1502 1505
1503static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); 1506static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1504 1507
1505static ssize_t 1508static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1506dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) 1509 char *buf)
1507{ 1510{
1508 struct netiucv_priv *priv = dev->driver_data; 1511 struct netiucv_priv *priv = dev->driver_data;
1509 1512
@@ -1513,8 +1516,8 @@ dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
1513 1516
1514static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); 1517static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1515 1518
1516static ssize_t 1519static ssize_t conn_fsm_show (struct device *dev,
1517conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) 1520 struct device_attribute *attr, char *buf)
1518{ 1521{
1519 struct netiucv_priv *priv = dev->driver_data; 1522 struct netiucv_priv *priv = dev->driver_data;
1520 1523
@@ -1524,8 +1527,8 @@ conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
1524 1527
1525static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); 1528static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1526 1529
1527static ssize_t 1530static ssize_t maxmulti_show (struct device *dev,
1528maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf) 1531 struct device_attribute *attr, char *buf)
1529{ 1532{
1530 struct netiucv_priv *priv = dev->driver_data; 1533 struct netiucv_priv *priv = dev->driver_data;
1531 1534
@@ -1533,8 +1536,9 @@ maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf)
1533 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); 1536 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1534} 1537}
1535 1538
1536static ssize_t 1539static ssize_t maxmulti_write (struct device *dev,
1537maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1540 struct device_attribute *attr,
1541 const char *buf, size_t count)
1538{ 1542{
1539 struct netiucv_priv *priv = dev->driver_data; 1543 struct netiucv_priv *priv = dev->driver_data;
1540 1544
@@ -1545,8 +1549,8 @@ maxmulti_write (struct device *dev, struct device_attribute *attr, const char *b
1545 1549
1546static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); 1550static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1547 1551
1548static ssize_t 1552static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1549maxcq_show (struct device *dev, struct device_attribute *attr, char *buf) 1553 char *buf)
1550{ 1554{
1551 struct netiucv_priv *priv = dev->driver_data; 1555 struct netiucv_priv *priv = dev->driver_data;
1552 1556
@@ -1554,8 +1558,8 @@ maxcq_show (struct device *dev, struct device_attribute *attr, char *buf)
1554 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); 1558 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1555} 1559}
1556 1560
1557static ssize_t 1561static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1558maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1562 const char *buf, size_t count)
1559{ 1563{
1560 struct netiucv_priv *priv = dev->driver_data; 1564 struct netiucv_priv *priv = dev->driver_data;
1561 1565
@@ -1566,8 +1570,8 @@ maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf,
1566 1570
1567static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); 1571static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1568 1572
1569static ssize_t 1573static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1570sdoio_show (struct device *dev, struct device_attribute *attr, char *buf) 1574 char *buf)
1571{ 1575{
1572 struct netiucv_priv *priv = dev->driver_data; 1576 struct netiucv_priv *priv = dev->driver_data;
1573 1577
@@ -1575,8 +1579,8 @@ sdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
1575 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); 1579 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1576} 1580}
1577 1581
1578static ssize_t 1582static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1579sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1583 const char *buf, size_t count)
1580{ 1584{
1581 struct netiucv_priv *priv = dev->driver_data; 1585 struct netiucv_priv *priv = dev->driver_data;
1582 1586
@@ -1587,8 +1591,8 @@ sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf,
1587 1591
1588static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); 1592static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1589 1593
1590static ssize_t 1594static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1591mdoio_show (struct device *dev, struct device_attribute *attr, char *buf) 1595 char *buf)
1592{ 1596{
1593 struct netiucv_priv *priv = dev->driver_data; 1597 struct netiucv_priv *priv = dev->driver_data;
1594 1598
@@ -1596,8 +1600,8 @@ mdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
1596 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); 1600 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1597} 1601}
1598 1602
1599static ssize_t 1603static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1600mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1604 const char *buf, size_t count)
1601{ 1605{
1602 struct netiucv_priv *priv = dev->driver_data; 1606 struct netiucv_priv *priv = dev->driver_data;
1603 1607
@@ -1608,8 +1612,8 @@ mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf,
1608 1612
1609static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); 1613static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1610 1614
1611static ssize_t 1615static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1612txlen_show (struct device *dev, struct device_attribute *attr, char *buf) 1616 char *buf)
1613{ 1617{
1614 struct netiucv_priv *priv = dev->driver_data; 1618 struct netiucv_priv *priv = dev->driver_data;
1615 1619
@@ -1617,8 +1621,8 @@ txlen_show (struct device *dev, struct device_attribute *attr, char *buf)
1617 return sprintf(buf, "%ld\n", priv->conn->prof.txlen); 1621 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1618} 1622}
1619 1623
1620static ssize_t 1624static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1621txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1625 const char *buf, size_t count)
1622{ 1626{
1623 struct netiucv_priv *priv = dev->driver_data; 1627 struct netiucv_priv *priv = dev->driver_data;
1624 1628
@@ -1629,8 +1633,8 @@ txlen_write (struct device *dev, struct device_attribute *attr, const char *buf,
1629 1633
1630static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); 1634static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1631 1635
1632static ssize_t 1636static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1633txtime_show (struct device *dev, struct device_attribute *attr, char *buf) 1637 char *buf)
1634{ 1638{
1635 struct netiucv_priv *priv = dev->driver_data; 1639 struct netiucv_priv *priv = dev->driver_data;
1636 1640
@@ -1638,8 +1642,8 @@ txtime_show (struct device *dev, struct device_attribute *attr, char *buf)
1638 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); 1642 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1639} 1643}
1640 1644
1641static ssize_t 1645static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1642txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1646 const char *buf, size_t count)
1643{ 1647{
1644 struct netiucv_priv *priv = dev->driver_data; 1648 struct netiucv_priv *priv = dev->driver_data;
1645 1649
@@ -1650,8 +1654,8 @@ txtime_write (struct device *dev, struct device_attribute *attr, const char *buf
1650 1654
1651static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); 1655static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1652 1656
1653static ssize_t 1657static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1654txpend_show (struct device *dev, struct device_attribute *attr, char *buf) 1658 char *buf)
1655{ 1659{
1656 struct netiucv_priv *priv = dev->driver_data; 1660 struct netiucv_priv *priv = dev->driver_data;
1657 1661
@@ -1659,8 +1663,8 @@ txpend_show (struct device *dev, struct device_attribute *attr, char *buf)
1659 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); 1663 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1660} 1664}
1661 1665
1662static ssize_t 1666static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1663txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1667 const char *buf, size_t count)
1664{ 1668{
1665 struct netiucv_priv *priv = dev->driver_data; 1669 struct netiucv_priv *priv = dev->driver_data;
1666 1670
@@ -1671,8 +1675,8 @@ txpend_write (struct device *dev, struct device_attribute *attr, const char *buf
1671 1675
1672static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); 1676static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1673 1677
1674static ssize_t 1678static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1675txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf) 1679 char *buf)
1676{ 1680{
1677 struct netiucv_priv *priv = dev->driver_data; 1681 struct netiucv_priv *priv = dev->driver_data;
1678 1682
@@ -1680,8 +1684,8 @@ txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf)
1680 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); 1684 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1681} 1685}
1682 1686
1683static ssize_t 1687static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1684txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1688 const char *buf, size_t count)
1685{ 1689{
1686 struct netiucv_priv *priv = dev->driver_data; 1690 struct netiucv_priv *priv = dev->driver_data;
1687 1691
@@ -1721,8 +1725,7 @@ static struct attribute_group netiucv_stat_attr_group = {
1721 .attrs = netiucv_stat_attrs, 1725 .attrs = netiucv_stat_attrs,
1722}; 1726};
1723 1727
1724static inline int 1728static inline int netiucv_add_files(struct device *dev)
1725netiucv_add_files(struct device *dev)
1726{ 1729{
1727 int ret; 1730 int ret;
1728 1731
@@ -1736,18 +1739,16 @@ netiucv_add_files(struct device *dev)
1736 return ret; 1739 return ret;
1737} 1740}
1738 1741
1739static inline void 1742static inline void netiucv_remove_files(struct device *dev)
1740netiucv_remove_files(struct device *dev)
1741{ 1743{
1742 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1744 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1743 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); 1745 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1744 sysfs_remove_group(&dev->kobj, &netiucv_attr_group); 1746 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1745} 1747}
1746 1748
1747static int 1749static int netiucv_register_device(struct net_device *ndev)
1748netiucv_register_device(struct net_device *ndev)
1749{ 1750{
1750 struct netiucv_priv *priv = ndev->priv; 1751 struct netiucv_priv *priv = netdev_priv(ndev);
1751 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); 1752 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1752 int ret; 1753 int ret;
1753 1754
@@ -1786,8 +1787,7 @@ out_unreg:
1786 return ret; 1787 return ret;
1787} 1788}
1788 1789
1789static void 1790static void netiucv_unregister_device(struct device *dev)
1790netiucv_unregister_device(struct device *dev)
1791{ 1791{
1792 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1792 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1793 netiucv_remove_files(dev); 1793 netiucv_remove_files(dev);
@@ -1798,107 +1798,89 @@ netiucv_unregister_device(struct device *dev)
1798 * Allocate and initialize a new connection structure. 1798 * Allocate and initialize a new connection structure.
1799 * Add it to the list of netiucv connections; 1799 * Add it to the list of netiucv connections;
1800 */ 1800 */
1801static struct iucv_connection * 1801static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1802netiucv_new_connection(struct net_device *dev, char *username) 1802 char *username)
1803{ 1803{
1804 unsigned long flags; 1804 struct iucv_connection *conn;
1805 struct iucv_connection **clist = &iucv_conns.iucv_connections;
1806 struct iucv_connection *conn =
1807 kzalloc(sizeof(struct iucv_connection), GFP_KERNEL);
1808
1809 if (conn) {
1810 skb_queue_head_init(&conn->collect_queue);
1811 skb_queue_head_init(&conn->commit_queue);
1812 spin_lock_init(&conn->collect_lock);
1813 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1814 conn->netdev = dev;
1815
1816 conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
1817 GFP_KERNEL | GFP_DMA);
1818 if (!conn->rx_buff) {
1819 kfree(conn);
1820 return NULL;
1821 }
1822 conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
1823 GFP_KERNEL | GFP_DMA);
1824 if (!conn->tx_buff) {
1825 kfree_skb(conn->rx_buff);
1826 kfree(conn);
1827 return NULL;
1828 }
1829 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1830 conn_event_names, NR_CONN_STATES,
1831 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1832 GFP_KERNEL);
1833 if (!conn->fsm) {
1834 kfree_skb(conn->tx_buff);
1835 kfree_skb(conn->rx_buff);
1836 kfree(conn);
1837 return NULL;
1838 }
1839 fsm_settimer(conn->fsm, &conn->timer);
1840 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1841
1842 if (username) {
1843 memcpy(conn->userid, username, 9);
1844 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1845 }
1846 1805
1847 write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); 1806 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1848 conn->next = *clist; 1807 if (!conn)
1849 *clist = conn; 1808 goto out;
1850 write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); 1809 skb_queue_head_init(&conn->collect_queue);
1810 skb_queue_head_init(&conn->commit_queue);
1811 spin_lock_init(&conn->collect_lock);
1812 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1813 conn->netdev = dev;
1814
1815 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1816 if (!conn->rx_buff)
1817 goto out_conn;
1818 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1819 if (!conn->tx_buff)
1820 goto out_rx;
1821 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1822 conn_event_names, NR_CONN_STATES,
1823 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1824 GFP_KERNEL);
1825 if (!conn->fsm)
1826 goto out_tx;
1827
1828 fsm_settimer(conn->fsm, &conn->timer);
1829 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1830
1831 if (username) {
1832 memcpy(conn->userid, username, 9);
1833 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1851 } 1834 }
1835
1836 write_lock_bh(&iucv_connection_rwlock);
1837 list_add_tail(&conn->list, &iucv_connection_list);
1838 write_unlock_bh(&iucv_connection_rwlock);
1852 return conn; 1839 return conn;
1840
1841out_tx:
1842 kfree_skb(conn->tx_buff);
1843out_rx:
1844 kfree_skb(conn->rx_buff);
1845out_conn:
1846 kfree(conn);
1847out:
1848 return NULL;
1853} 1849}
1854 1850
1855/** 1851/**
1856 * Release a connection structure and remove it from the 1852 * Release a connection structure and remove it from the
1857 * list of netiucv connections. 1853 * list of netiucv connections.
1858 */ 1854 */
1859static void 1855static void netiucv_remove_connection(struct iucv_connection *conn)
1860netiucv_remove_connection(struct iucv_connection *conn)
1861{ 1856{
1862 struct iucv_connection **clist = &iucv_conns.iucv_connections;
1863 unsigned long flags;
1864
1865 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1857 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1866 if (conn == NULL) 1858 write_lock_bh(&iucv_connection_rwlock);
1867 return; 1859 list_del_init(&conn->list);
1868 write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); 1860 write_unlock_bh(&iucv_connection_rwlock);
1869 while (*clist) { 1861 if (conn->path) {
1870 if (*clist == conn) { 1862 iucv_path_sever(conn->path, iucvMagic);
1871 *clist = conn->next; 1863 kfree(conn->path);
1872 write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); 1864 conn->path = NULL;
1873 if (conn->handle) {
1874 iucv_unregister_program(conn->handle);
1875 conn->handle = NULL;
1876 }
1877 fsm_deltimer(&conn->timer);
1878 kfree_fsm(conn->fsm);
1879 kfree_skb(conn->rx_buff);
1880 kfree_skb(conn->tx_buff);
1881 return;
1882 }
1883 clist = &((*clist)->next);
1884 } 1865 }
1885 write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); 1866 fsm_deltimer(&conn->timer);
1867 kfree_fsm(conn->fsm);
1868 kfree_skb(conn->rx_buff);
1869 kfree_skb(conn->tx_buff);
1886} 1870}
1887 1871
1888/** 1872/**
1889 * Release everything of a net device. 1873 * Release everything of a net device.
1890 */ 1874 */
1891static void 1875static void netiucv_free_netdevice(struct net_device *dev)
1892netiucv_free_netdevice(struct net_device *dev)
1893{ 1876{
1894 struct netiucv_priv *privptr; 1877 struct netiucv_priv *privptr = netdev_priv(dev);
1895 1878
1896 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1879 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1897 1880
1898 if (!dev) 1881 if (!dev)
1899 return; 1882 return;
1900 1883
1901 privptr = (struct netiucv_priv *)dev->priv;
1902 if (privptr) { 1884 if (privptr) {
1903 if (privptr->conn) 1885 if (privptr->conn)
1904 netiucv_remove_connection(privptr->conn); 1886 netiucv_remove_connection(privptr->conn);
@@ -1913,11 +1895,8 @@ netiucv_free_netdevice(struct net_device *dev)
1913/** 1895/**
1914 * Initialize a net device. (Called from kernel in alloc_netdev()) 1896 * Initialize a net device. (Called from kernel in alloc_netdev())
1915 */ 1897 */
1916static void 1898static void netiucv_setup_netdevice(struct net_device *dev)
1917netiucv_setup_netdevice(struct net_device *dev)
1918{ 1899{
1919 memset(dev->priv, 0, sizeof(struct netiucv_priv));
1920
1921 dev->mtu = NETIUCV_MTU_DEFAULT; 1900 dev->mtu = NETIUCV_MTU_DEFAULT;
1922 dev->hard_start_xmit = netiucv_tx; 1901 dev->hard_start_xmit = netiucv_tx;
1923 dev->open = netiucv_open; 1902 dev->open = netiucv_open;
@@ -1936,8 +1915,7 @@ netiucv_setup_netdevice(struct net_device *dev)
1936/** 1915/**
1937 * Allocate and initialize everything of a net device. 1916 * Allocate and initialize everything of a net device.
1938 */ 1917 */
1939static struct net_device * 1918static struct net_device *netiucv_init_netdevice(char *username)
1940netiucv_init_netdevice(char *username)
1941{ 1919{
1942 struct netiucv_priv *privptr; 1920 struct netiucv_priv *privptr;
1943 struct net_device *dev; 1921 struct net_device *dev;
@@ -1946,40 +1924,40 @@ netiucv_init_netdevice(char *username)
1946 netiucv_setup_netdevice); 1924 netiucv_setup_netdevice);
1947 if (!dev) 1925 if (!dev)
1948 return NULL; 1926 return NULL;
1949 if (dev_alloc_name(dev, dev->name) < 0) { 1927 if (dev_alloc_name(dev, dev->name) < 0)
1950 free_netdev(dev); 1928 goto out_netdev;
1951 return NULL;
1952 }
1953 1929
1954 privptr = (struct netiucv_priv *)dev->priv; 1930 privptr = netdev_priv(dev);
1955 privptr->fsm = init_fsm("netiucvdev", dev_state_names, 1931 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1956 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, 1932 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1957 dev_fsm, DEV_FSM_LEN, GFP_KERNEL); 1933 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1958 if (!privptr->fsm) { 1934 if (!privptr->fsm)
1959 free_netdev(dev); 1935 goto out_netdev;
1960 return NULL; 1936
1961 }
1962 privptr->conn = netiucv_new_connection(dev, username); 1937 privptr->conn = netiucv_new_connection(dev, username);
1963 if (!privptr->conn) { 1938 if (!privptr->conn) {
1964 kfree_fsm(privptr->fsm);
1965 free_netdev(dev);
1966 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); 1939 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1967 return NULL; 1940 goto out_fsm;
1968 } 1941 }
1969 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); 1942 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1970
1971 return dev; 1943 return dev;
1944
1945out_fsm:
1946 kfree_fsm(privptr->fsm);
1947out_netdev:
1948 free_netdev(dev);
1949 return NULL;
1972} 1950}
1973 1951
1974static ssize_t 1952static ssize_t conn_write(struct device_driver *drv,
1975conn_write(struct device_driver *drv, const char *buf, size_t count) 1953 const char *buf, size_t count)
1976{ 1954{
1977 char *p; 1955 const char *p;
1978 char username[9]; 1956 char username[9];
1979 int i, ret; 1957 int i, rc;
1980 struct net_device *dev; 1958 struct net_device *dev;
1981 struct iucv_connection **clist = &iucv_conns.iucv_connections; 1959 struct netiucv_priv *priv;
1982 unsigned long flags; 1960 struct iucv_connection *cp;
1983 1961
1984 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1962 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1985 if (count>9) { 1963 if (count>9) {
@@ -1988,83 +1966,82 @@ conn_write(struct device_driver *drv, const char *buf, size_t count)
1988 return -EINVAL; 1966 return -EINVAL;
1989 } 1967 }
1990 1968
1991 for (i=0, p=(char *)buf; i<8 && *p; i++, p++) { 1969 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1992 if (isalnum(*p) || (*p == '$')) 1970 if (isalnum(*p) || *p == '$') {
1993 username[i]= toupper(*p); 1971 username[i] = toupper(*p);
1994 else if (*p == '\n') { 1972 continue;
1973 }
1974 if (*p == '\n')
1995 /* trailing lf, grr */ 1975 /* trailing lf, grr */
1996 break; 1976 break;
1997 } else { 1977 PRINT_WARN("netiucv: Invalid character in username!\n");
1998 PRINT_WARN("netiucv: Invalid character in username!\n"); 1978 IUCV_DBF_TEXT_(setup, 2,
1999 IUCV_DBF_TEXT_(setup, 2, 1979 "conn_write: invalid character %c\n", *p);
2000 "conn_write: invalid character %c\n", *p); 1980 return -EINVAL;
2001 return -EINVAL;
2002 }
2003 } 1981 }
2004 while (i<8) 1982 while (i < 8)
2005 username[i++] = ' '; 1983 username[i++] = ' ';
2006 username[8] = '\0'; 1984 username[8] = '\0';
2007 1985
2008 read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); 1986 read_lock_bh(&iucv_connection_rwlock);
2009 while (*clist) { 1987 list_for_each_entry(cp, &iucv_connection_list, list) {
2010 if (!strncmp(username, (*clist)->userid, 9)) 1988 if (!strncmp(username, cp->userid, 9)) {
2011 break; 1989 read_unlock_bh(&iucv_connection_rwlock);
2012 clist = &((*clist)->next); 1990 PRINT_WARN("netiucv: Connection to %s already "
2013 } 1991 "exists\n", username);
2014 read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); 1992 return -EEXIST;
2015 if (*clist) { 1993 }
2016 PRINT_WARN("netiucv: Connection to %s already exists\n",
2017 username);
2018 return -EEXIST;
2019 } 1994 }
1995 read_unlock_bh(&iucv_connection_rwlock);
1996
2020 dev = netiucv_init_netdevice(username); 1997 dev = netiucv_init_netdevice(username);
2021 if (!dev) { 1998 if (!dev) {
2022 PRINT_WARN( 1999 PRINT_WARN("netiucv: Could not allocate network device "
2023 "netiucv: Could not allocate network device structure " 2000 "structure for user '%s'\n",
2024 "for user '%s'\n", netiucv_printname(username)); 2001 netiucv_printname(username));
2025 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); 2002 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2026 return -ENODEV; 2003 return -ENODEV;
2027 } 2004 }
2028 2005
2029 if ((ret = netiucv_register_device(dev))) { 2006 rc = netiucv_register_device(dev);
2007 if (rc) {
2030 IUCV_DBF_TEXT_(setup, 2, 2008 IUCV_DBF_TEXT_(setup, 2,
2031 "ret %d from netiucv_register_device\n", ret); 2009 "ret %d from netiucv_register_device\n", rc);
2032 goto out_free_ndev; 2010 goto out_free_ndev;
2033 } 2011 }
2034 2012
2035 /* sysfs magic */ 2013 /* sysfs magic */
2036 SET_NETDEV_DEV(dev, 2014 priv = netdev_priv(dev);
2037 (struct device*)((struct netiucv_priv*)dev->priv)->dev); 2015 SET_NETDEV_DEV(dev, priv->dev);
2038 2016
2039 if ((ret = register_netdev(dev))) { 2017 rc = register_netdev(dev);
2040 netiucv_unregister_device((struct device*) 2018 if (rc)
2041 ((struct netiucv_priv*)dev->priv)->dev); 2019 goto out_unreg;
2042 goto out_free_ndev;
2043 }
2044 2020
2045 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); 2021 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2046 2022
2047 return count; 2023 return count;
2048 2024
2025out_unreg:
2026 netiucv_unregister_device(priv->dev);
2049out_free_ndev: 2027out_free_ndev:
2050 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); 2028 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2051 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); 2029 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2052 netiucv_free_netdevice(dev); 2030 netiucv_free_netdevice(dev);
2053 return ret; 2031 return rc;
2054} 2032}
2055 2033
2056static DRIVER_ATTR(connection, 0200, NULL, conn_write); 2034static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2057 2035
2058static ssize_t 2036static ssize_t remove_write (struct device_driver *drv,
2059remove_write (struct device_driver *drv, const char *buf, size_t count) 2037 const char *buf, size_t count)
2060{ 2038{
2061 struct iucv_connection **clist = &iucv_conns.iucv_connections; 2039 struct iucv_connection *cp;
2062 unsigned long flags;
2063 struct net_device *ndev; 2040 struct net_device *ndev;
2064 struct netiucv_priv *priv; 2041 struct netiucv_priv *priv;
2065 struct device *dev; 2042 struct device *dev;
2066 char name[IFNAMSIZ]; 2043 char name[IFNAMSIZ];
2067 char *p; 2044 const char *p;
2068 int i; 2045 int i;
2069 2046
2070 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2047 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
@@ -2072,33 +2049,27 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
2072 if (count >= IFNAMSIZ) 2049 if (count >= IFNAMSIZ)
2073 count = IFNAMSIZ - 1;; 2050 count = IFNAMSIZ - 1;;
2074 2051
2075 for (i=0, p=(char *)buf; i<count && *p; i++, p++) { 2052 for (i = 0, p = buf; i < count && *p; i++, p++) {
2076 if ((*p == '\n') || (*p == ' ')) { 2053 if (*p == '\n' || *p == ' ')
2077 /* trailing lf, grr */ 2054 /* trailing lf, grr */
2078 break; 2055 break;
2079 } else { 2056 name[i] = *p;
2080 name[i]=*p;
2081 }
2082 } 2057 }
2083 name[i] = '\0'; 2058 name[i] = '\0';
2084 2059
2085 read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); 2060 read_lock_bh(&iucv_connection_rwlock);
2086 while (*clist) { 2061 list_for_each_entry(cp, &iucv_connection_list, list) {
2087 ndev = (*clist)->netdev; 2062 ndev = cp->netdev;
2088 priv = (struct netiucv_priv*)ndev->priv; 2063 priv = netdev_priv(ndev);
2089 dev = priv->dev; 2064 dev = priv->dev;
2090 2065 if (strncmp(name, ndev->name, count))
2091 if (strncmp(name, ndev->name, count)) { 2066 continue;
2092 clist = &((*clist)->next); 2067 read_unlock_bh(&iucv_connection_rwlock);
2093 continue;
2094 }
2095 read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
2096 if (ndev->flags & (IFF_UP | IFF_RUNNING)) { 2068 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2097 PRINT_WARN( 2069 PRINT_WARN("netiucv: net device %s active with peer "
2098 "netiucv: net device %s active with peer %s\n", 2070 "%s\n", ndev->name, priv->conn->userid);
2099 ndev->name, priv->conn->userid);
2100 PRINT_WARN("netiucv: %s cannot be removed\n", 2071 PRINT_WARN("netiucv: %s cannot be removed\n",
2101 ndev->name); 2072 ndev->name);
2102 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); 2073 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2103 return -EBUSY; 2074 return -EBUSY;
2104 } 2075 }
@@ -2106,7 +2077,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
2106 netiucv_unregister_device(dev); 2077 netiucv_unregister_device(dev);
2107 return count; 2078 return count;
2108 } 2079 }
2109 read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); 2080 read_unlock_bh(&iucv_connection_rwlock);
2110 PRINT_WARN("netiucv: net device %s unknown\n", name); 2081 PRINT_WARN("netiucv: net device %s unknown\n", name);
2111 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); 2082 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2112 return -EINVAL; 2083 return -EINVAL;
@@ -2114,67 +2085,86 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
2114 2085
2115static DRIVER_ATTR(remove, 0200, NULL, remove_write); 2086static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2116 2087
2117static void 2088static struct attribute * netiucv_drv_attrs[] = {
2118netiucv_banner(void) 2089 &driver_attr_connection.attr,
2090 &driver_attr_remove.attr,
2091 NULL,
2092};
2093
2094static struct attribute_group netiucv_drv_attr_group = {
2095 .attrs = netiucv_drv_attrs,
2096};
2097
2098static void netiucv_banner(void)
2119{ 2099{
2120 PRINT_INFO("NETIUCV driver initialized\n"); 2100 PRINT_INFO("NETIUCV driver initialized\n");
2121} 2101}
2122 2102
2123static void __exit 2103static void __exit netiucv_exit(void)
2124netiucv_exit(void)
2125{ 2104{
2105 struct iucv_connection *cp;
2106 struct net_device *ndev;
2107 struct netiucv_priv *priv;
2108 struct device *dev;
2109
2126 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2110 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2127 while (iucv_conns.iucv_connections) { 2111 while (!list_empty(&iucv_connection_list)) {
2128 struct net_device *ndev = iucv_conns.iucv_connections->netdev; 2112 cp = list_entry(iucv_connection_list.next,
2129 struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv; 2113 struct iucv_connection, list);
2130 struct device *dev = priv->dev; 2114 list_del(&cp->list);
2115 ndev = cp->netdev;
2116 priv = netdev_priv(ndev);
2117 dev = priv->dev;
2131 2118
2132 unregister_netdev(ndev); 2119 unregister_netdev(ndev);
2133 netiucv_unregister_device(dev); 2120 netiucv_unregister_device(dev);
2134 } 2121 }
2135 2122
2136 driver_remove_file(&netiucv_driver, &driver_attr_connection); 2123 sysfs_remove_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
2137 driver_remove_file(&netiucv_driver, &driver_attr_remove);
2138 driver_unregister(&netiucv_driver); 2124 driver_unregister(&netiucv_driver);
2125 iucv_unregister(&netiucv_handler, 1);
2139 iucv_unregister_dbf_views(); 2126 iucv_unregister_dbf_views();
2140 2127
2141 PRINT_INFO("NETIUCV driver unloaded\n"); 2128 PRINT_INFO("NETIUCV driver unloaded\n");
2142 return; 2129 return;
2143} 2130}
2144 2131
2145static int __init 2132static int __init netiucv_init(void)
2146netiucv_init(void)
2147{ 2133{
2148 int ret; 2134 int rc;
2149 2135
2150 ret = iucv_register_dbf_views(); 2136 rc = iucv_register_dbf_views();
2151 if (ret) { 2137 if (rc)
2152 PRINT_WARN("netiucv_init failed, " 2138 goto out;
2153 "iucv_register_dbf_views rc = %d\n", ret); 2139 rc = iucv_register(&netiucv_handler, 1);
2154 return ret; 2140 if (rc)
2155 } 2141 goto out_dbf;
2156 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2142 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2157 ret = driver_register(&netiucv_driver); 2143 rc = driver_register(&netiucv_driver);
2158 if (ret) { 2144 if (rc) {
2159 PRINT_ERR("NETIUCV: failed to register driver.\n"); 2145 PRINT_ERR("NETIUCV: failed to register driver.\n");
2160 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret); 2146 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2161 iucv_unregister_dbf_views(); 2147 goto out_iucv;
2162 return ret;
2163 } 2148 }
2164 2149
2165 /* Add entry for specifying connections. */ 2150 rc = sysfs_create_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
2166 ret = driver_create_file(&netiucv_driver, &driver_attr_connection); 2151 if (rc) {
2167 if (!ret) { 2152 PRINT_ERR("NETIUCV: failed to add driver attributes.\n");
2168 ret = driver_create_file(&netiucv_driver, &driver_attr_remove); 2153 IUCV_DBF_TEXT_(setup, 2,
2169 netiucv_banner(); 2154 "ret %d - netiucv_drv_attr_group\n", rc);
2170 rwlock_init(&iucv_conns.iucv_rwlock); 2155 goto out_driver;
2171 } else {
2172 PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
2173 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
2174 driver_unregister(&netiucv_driver);
2175 iucv_unregister_dbf_views();
2176 } 2156 }
2177 return ret; 2157 netiucv_banner();
2158 return rc;
2159
2160out_driver:
2161 driver_unregister(&netiucv_driver);
2162out_iucv:
2163 iucv_unregister(&netiucv_handler, 1);
2164out_dbf:
2165 iucv_unregister_dbf_views();
2166out:
2167 return rc;
2178} 2168}
2179 2169
2180module_init(netiucv_init); 2170module_init(netiucv_init);
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index b8179c27ceb6..3ccca5871fdf 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * IUCV special message driver 2 * IUCV special message driver
3 * 3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 4 * Copyright 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -23,10 +23,10 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/device.h> 25#include <linux/device.h>
26#include <net/iucv/iucv.h>
26#include <asm/cpcmd.h> 27#include <asm/cpcmd.h>
27#include <asm/ebcdic.h> 28#include <asm/ebcdic.h>
28 29#include "smsgiucv.h"
29#include "iucv.h"
30 30
31struct smsg_callback { 31struct smsg_callback {
32 struct list_head list; 32 struct list_head list;
@@ -39,38 +39,46 @@ MODULE_AUTHOR
39 ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); 39 ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)");
40MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); 40MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
41 41
42static iucv_handle_t smsg_handle; 42static struct iucv_path *smsg_path;
43static unsigned short smsg_pathid; 43
44static DEFINE_SPINLOCK(smsg_list_lock); 44static DEFINE_SPINLOCK(smsg_list_lock);
45static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list); 45static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list);
46 46
47static void 47static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
48smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data) 48static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
49
50static struct iucv_handler smsg_handler = {
51 .path_pending = smsg_path_pending,
52 .message_pending = smsg_message_pending,
53};
54
55static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
56 u8 ipuser[16])
49{ 57{
58 if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0)
59 return -EINVAL;
60 /* Path pending from *MSG. */
61 return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL);
50} 62}
51 63
52 64static void smsg_message_pending(struct iucv_path *path,
53static void 65 struct iucv_message *msg)
54smsg_message_pending(iucv_MessagePending *eib, void *pgm_data)
55{ 66{
56 struct smsg_callback *cb; 67 struct smsg_callback *cb;
57 unsigned char *msg; 68 unsigned char *buffer;
58 unsigned char sender[9]; 69 unsigned char sender[9];
59 unsigned short len;
60 int rc, i; 70 int rc, i;
61 71
62 len = eib->ln1msg2.ipbfln1f; 72 buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA);
63 msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA); 73 if (!buffer) {
64 if (!msg) { 74 iucv_message_reject(path, msg);
65 iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls);
66 return; 75 return;
67 } 76 }
68 rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls, 77 rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL);
69 msg, len, NULL, NULL, NULL);
70 if (rc == 0) { 78 if (rc == 0) {
71 msg[len] = 0; 79 buffer[msg->length] = 0;
72 EBCASC(msg, len); 80 EBCASC(buffer, msg->length);
73 memcpy(sender, msg, 8); 81 memcpy(sender, buffer, 8);
74 sender[8] = 0; 82 sender[8] = 0;
75 /* Remove trailing whitespace from the sender name. */ 83 /* Remove trailing whitespace from the sender name. */
76 for (i = 7; i >= 0; i--) { 84 for (i = 7; i >= 0; i--) {
@@ -80,27 +88,17 @@ smsg_message_pending(iucv_MessagePending *eib, void *pgm_data)
80 } 88 }
81 spin_lock(&smsg_list_lock); 89 spin_lock(&smsg_list_lock);
82 list_for_each_entry(cb, &smsg_list, list) 90 list_for_each_entry(cb, &smsg_list, list)
83 if (strncmp(msg + 8, cb->prefix, cb->len) == 0) { 91 if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) {
84 cb->callback(sender, msg + 8); 92 cb->callback(sender, buffer + 8);
85 break; 93 break;
86 } 94 }
87 spin_unlock(&smsg_list_lock); 95 spin_unlock(&smsg_list_lock);
88 } 96 }
89 kfree(msg); 97 kfree(buffer);
90} 98}
91 99
92static iucv_interrupt_ops_t smsg_ops = { 100int smsg_register_callback(char *prefix,
93 .ConnectionComplete = smsg_connection_complete, 101 void (*callback)(char *from, char *str))
94 .MessagePending = smsg_message_pending,
95};
96
97static struct device_driver smsg_driver = {
98 .name = "SMSGIUCV",
99 .bus = &iucv_bus,
100};
101
102int
103smsg_register_callback(char *prefix, void (*callback)(char *from, char *str))
104{ 102{
105 struct smsg_callback *cb; 103 struct smsg_callback *cb;
106 104
@@ -110,18 +108,18 @@ smsg_register_callback(char *prefix, void (*callback)(char *from, char *str))
110 cb->prefix = prefix; 108 cb->prefix = prefix;
111 cb->len = strlen(prefix); 109 cb->len = strlen(prefix);
112 cb->callback = callback; 110 cb->callback = callback;
113 spin_lock(&smsg_list_lock); 111 spin_lock_bh(&smsg_list_lock);
114 list_add_tail(&cb->list, &smsg_list); 112 list_add_tail(&cb->list, &smsg_list);
115 spin_unlock(&smsg_list_lock); 113 spin_unlock_bh(&smsg_list_lock);
116 return 0; 114 return 0;
117} 115}
118 116
119void 117void smsg_unregister_callback(char *prefix,
120smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str)) 118 void (*callback)(char *from, char *str))
121{ 119{
122 struct smsg_callback *cb, *tmp; 120 struct smsg_callback *cb, *tmp;
123 121
124 spin_lock(&smsg_list_lock); 122 spin_lock_bh(&smsg_list_lock);
125 cb = NULL; 123 cb = NULL;
126 list_for_each_entry(tmp, &smsg_list, list) 124 list_for_each_entry(tmp, &smsg_list, list)
127 if (tmp->callback == callback && 125 if (tmp->callback == callback &&
@@ -130,55 +128,58 @@ smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str))
130 list_del(&cb->list); 128 list_del(&cb->list);
131 break; 129 break;
132 } 130 }
133 spin_unlock(&smsg_list_lock); 131 spin_unlock_bh(&smsg_list_lock);
134 kfree(cb); 132 kfree(cb);
135} 133}
136 134
137static void __exit 135static struct device_driver smsg_driver = {
138smsg_exit(void) 136 .name = "SMSGIUCV",
137 .bus = &iucv_bus,
138};
139
140static void __exit smsg_exit(void)
139{ 141{
140 if (smsg_handle > 0) { 142 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
141 cpcmd("SET SMSG OFF", NULL, 0, NULL); 143 iucv_unregister(&smsg_handler, 1);
142 iucv_sever(smsg_pathid, NULL); 144 driver_unregister(&smsg_driver);
143 iucv_unregister_program(smsg_handle);
144 driver_unregister(&smsg_driver);
145 }
146 return;
147} 145}
148 146
149static int __init 147static int __init smsg_init(void)
150smsg_init(void)
151{ 148{
152 static unsigned char pgmmask[24] = {
153 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
154 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
155 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
156 };
157 int rc; 149 int rc;
158 150
159 rc = driver_register(&smsg_driver); 151 rc = driver_register(&smsg_driver);
160 if (rc != 0) { 152 if (rc != 0)
161 printk(KERN_ERR "SMSGIUCV: failed to register driver.\n"); 153 goto out;
162 return rc; 154 rc = iucv_register(&smsg_handler, 1);
163 } 155 if (rc) {
164 smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ",
165 pgmmask, &smsg_ops, NULL);
166 if (!smsg_handle) {
167 printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); 156 printk(KERN_ERR "SMSGIUCV: failed to register to iucv");
168 driver_unregister(&smsg_driver); 157 rc = -EIO; /* better errno ? */
169 return -EIO; /* better errno ? */ 158 goto out_driver;
159 }
160 smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL);
161 if (!smsg_path) {
162 rc = -ENOMEM;
163 goto out_register;
170 } 164 }
171 rc = iucv_connect (&smsg_pathid, 255, NULL, "*MSG ", NULL, 0, 165 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
172 NULL, NULL, smsg_handle, NULL); 166 NULL, NULL, NULL);
173 if (rc) { 167 if (rc) {
174 printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); 168 printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
175 iucv_unregister_program(smsg_handle); 169 rc = -EIO; /* better errno ? */
176 driver_unregister(&smsg_driver); 170 goto out_free;
177 smsg_handle = NULL;
178 return -EIO;
179 } 171 }
180 cpcmd("SET SMSG IUCV", NULL, 0, NULL); 172 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
181 return 0; 173 return 0;
174
175out_free:
176 iucv_path_free(smsg_path);
177out_register:
178 iucv_unregister(&smsg_handler, 1);
179out_driver:
180 driver_unregister(&smsg_driver);
181out:
182 return rc;
182} 183}
183 184
184module_init(smsg_init); 185module_init(smsg_init);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 7196f50fe152..a86a55ccf874 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -828,9 +828,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
828 mutex_unlock(&crypt_stat->cs_tfm_mutex); 828 mutex_unlock(&crypt_stat->cs_tfm_mutex);
829 goto out; 829 goto out;
830 } 830 }
831 crypto_blkcipher_set_flags(crypt_stat->tfm, 831 crypto_blkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
832 (ECRYPTFS_DEFAULT_CHAINING_MODE
833 | CRYPTO_TFM_REQ_WEAK_KEY));
834 mutex_unlock(&crypt_stat->cs_tfm_mutex); 832 mutex_unlock(&crypt_stat->cs_tfm_mutex);
835 rc = 0; 833 rc = 0;
836out: 834out:
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index afb64bdbe6ad..0f897109759b 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -176,7 +176,6 @@ ecryptfs_get_key_payload_data(struct key *key)
176#define ECRYPTFS_FILE_SIZE_BYTES 8 176#define ECRYPTFS_FILE_SIZE_BYTES 8
177#define ECRYPTFS_DEFAULT_CIPHER "aes" 177#define ECRYPTFS_DEFAULT_CIPHER "aes"
178#define ECRYPTFS_DEFAULT_KEY_BYTES 16 178#define ECRYPTFS_DEFAULT_KEY_BYTES 16
179#define ECRYPTFS_DEFAULT_CHAINING_MODE CRYPTO_TFM_MODE_CBC
180#define ECRYPTFS_DEFAULT_HASH "md5" 179#define ECRYPTFS_DEFAULT_HASH "md5"
181#define ECRYPTFS_TAG_3_PACKET_TYPE 0x8C 180#define ECRYPTFS_TAG_3_PACKET_TYPE 0x8C
182#define ECRYPTFS_TAG_11_PACKET_TYPE 0xED 181#define ECRYPTFS_TAG_11_PACKET_TYPE 0xED
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 5748aecdb414..4e05e93ff681 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -18,8 +18,8 @@ struct module;
18struct seq_file; 18struct seq_file;
19 19
20struct crypto_type { 20struct crypto_type {
21 unsigned int (*ctxsize)(struct crypto_alg *alg); 21 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
22 int (*init)(struct crypto_tfm *tfm); 22 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
23 void (*exit)(struct crypto_tfm *tfm); 23 void (*exit)(struct crypto_tfm *tfm);
24 void (*show)(struct seq_file *m, struct crypto_alg *alg); 24 void (*show)(struct seq_file *m, struct crypto_alg *alg);
25}; 25};
@@ -93,7 +93,8 @@ struct crypto_template *crypto_lookup_template(const char *name);
93int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, 93int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
94 struct crypto_instance *inst); 94 struct crypto_instance *inst);
95void crypto_drop_spawn(struct crypto_spawn *spawn); 95void crypto_drop_spawn(struct crypto_spawn *spawn);
96struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn); 96struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
97 u32 mask);
97 98
98struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len, 99struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len,
99 u32 type, u32 mask); 100 u32 type, u32 mask);
@@ -132,11 +133,28 @@ static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
132 return crypto_tfm_ctx_aligned(&tfm->base); 133 return crypto_tfm_ctx_aligned(&tfm->base);
133} 134}
134 135
136static inline struct crypto_cipher *crypto_spawn_cipher(
137 struct crypto_spawn *spawn)
138{
139 u32 type = CRYPTO_ALG_TYPE_CIPHER;
140 u32 mask = CRYPTO_ALG_TYPE_MASK;
141
142 return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
143}
144
135static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) 145static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
136{ 146{
137 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; 147 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
138} 148}
139 149
150static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
151{
152 u32 type = CRYPTO_ALG_TYPE_HASH;
153 u32 mask = CRYPTO_ALG_TYPE_HASH_MASK;
154
155 return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
156}
157
140static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm) 158static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
141{ 159{
142 return crypto_tfm_ctx_aligned(&tfm->base); 160 return crypto_tfm_ctx_aligned(&tfm->base);
diff --git a/include/linux/atmarp.h b/include/linux/atmarp.h
index ee108f9e9cb7..231f4bdec730 100644
--- a/include/linux/atmarp.h
+++ b/include/linux/atmarp.h
@@ -6,9 +6,7 @@
6#ifndef _LINUX_ATMARP_H 6#ifndef _LINUX_ATMARP_H
7#define _LINUX_ATMARP_H 7#define _LINUX_ATMARP_H
8 8
9#ifdef __KERNEL__
10#include <linux/types.h> 9#include <linux/types.h>
11#endif
12#include <linux/atmapi.h> 10#include <linux/atmapi.h>
13#include <linux/atmioc.h> 11#include <linux/atmioc.h>
14 12
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 4aa9046601da..779aa78ee643 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -51,15 +51,9 @@
51/* 51/*
52 * Transform masks and values (for crt_flags). 52 * Transform masks and values (for crt_flags).
53 */ 53 */
54#define CRYPTO_TFM_MODE_MASK 0x000000ff
55#define CRYPTO_TFM_REQ_MASK 0x000fff00 54#define CRYPTO_TFM_REQ_MASK 0x000fff00
56#define CRYPTO_TFM_RES_MASK 0xfff00000 55#define CRYPTO_TFM_RES_MASK 0xfff00000
57 56
58#define CRYPTO_TFM_MODE_ECB 0x00000001
59#define CRYPTO_TFM_MODE_CBC 0x00000002
60#define CRYPTO_TFM_MODE_CFB 0x00000004
61#define CRYPTO_TFM_MODE_CTR 0x00000008
62
63#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 57#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
64#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 58#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
65#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 59#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
@@ -71,12 +65,8 @@
71/* 65/*
72 * Miscellaneous stuff. 66 * Miscellaneous stuff.
73 */ 67 */
74#define CRYPTO_UNSPEC 0
75#define CRYPTO_MAX_ALG_NAME 64 68#define CRYPTO_MAX_ALG_NAME 64
76 69
77#define CRYPTO_DIR_ENCRYPT 1
78#define CRYPTO_DIR_DECRYPT 0
79
80/* 70/*
81 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 71 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
82 * declaration) is used to ensure that the crypto_tfm context structure is 72 * declaration) is used to ensure that the crypto_tfm context structure is
@@ -148,19 +138,6 @@ struct cipher_alg {
148 unsigned int keylen); 138 unsigned int keylen);
149 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 139 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
150 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 140 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
151
152 unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
153 u8 *dst, const u8 *src,
154 unsigned int nbytes) __deprecated;
155 unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
156 u8 *dst, const u8 *src,
157 unsigned int nbytes) __deprecated;
158 unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
159 u8 *dst, const u8 *src,
160 unsigned int nbytes) __deprecated;
161 unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
162 u8 *dst, const u8 *src,
163 unsigned int nbytes) __deprecated;
164}; 141};
165 142
166struct digest_alg { 143struct digest_alg {
@@ -243,11 +220,6 @@ int crypto_unregister_alg(struct crypto_alg *alg);
243#ifdef CONFIG_CRYPTO 220#ifdef CONFIG_CRYPTO
244int crypto_has_alg(const char *name, u32 type, u32 mask); 221int crypto_has_alg(const char *name, u32 type, u32 mask);
245#else 222#else
246static inline int crypto_alg_available(const char *name, u32 flags)
247{
248 return 0;
249}
250
251static inline int crypto_has_alg(const char *name, u32 type, u32 mask) 223static inline int crypto_has_alg(const char *name, u32 type, u32 mask)
252{ 224{
253 return 0; 225 return 0;
@@ -339,13 +311,18 @@ struct crypto_tfm {
339 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 311 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
340}; 312};
341 313
342#define crypto_cipher crypto_tfm
343#define crypto_comp crypto_tfm
344
345struct crypto_blkcipher { 314struct crypto_blkcipher {
346 struct crypto_tfm base; 315 struct crypto_tfm base;
347}; 316};
348 317
318struct crypto_cipher {
319 struct crypto_tfm base;
320};
321
322struct crypto_comp {
323 struct crypto_tfm base;
324};
325
349struct crypto_hash { 326struct crypto_hash {
350 struct crypto_tfm base; 327 struct crypto_tfm base;
351}; 328};
@@ -395,40 +372,11 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
395 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; 372 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
396} 373}
397 374
398static unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
399 __deprecated;
400static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
401{
402 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
403 return tfm->__crt_alg->cra_cipher.cia_min_keysize;
404}
405
406static unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
407 __deprecated;
408static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
409{
410 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
411 return tfm->__crt_alg->cra_cipher.cia_max_keysize;
412}
413
414static unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm) __deprecated;
415static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm)
416{
417 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
418 return tfm->crt_cipher.cit_ivsize;
419}
420
421static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 375static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
422{ 376{
423 return tfm->__crt_alg->cra_blocksize; 377 return tfm->__crt_alg->cra_blocksize;
424} 378}
425 379
426static inline unsigned int crypto_tfm_alg_digestsize(struct crypto_tfm *tfm)
427{
428 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
429 return tfm->__crt_alg->cra_digest.dia_digestsize;
430}
431
432static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 380static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
433{ 381{
434 return tfm->__crt_alg->cra_alignmask; 382 return tfm->__crt_alg->cra_alignmask;
@@ -633,7 +581,7 @@ static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
633 581
634static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) 582static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
635{ 583{
636 return tfm; 584 return &tfm->base;
637} 585}
638 586
639static inline void crypto_free_cipher(struct crypto_cipher *tfm) 587static inline void crypto_free_cipher(struct crypto_cipher *tfm)
@@ -809,76 +757,6 @@ static inline int crypto_hash_setkey(struct crypto_hash *hash,
809 return crypto_hash_crt(hash)->setkey(hash, key, keylen); 757 return crypto_hash_crt(hash)->setkey(hash, key, keylen);
810} 758}
811 759
812static int crypto_cipher_encrypt(struct crypto_tfm *tfm,
813 struct scatterlist *dst,
814 struct scatterlist *src,
815 unsigned int nbytes) __deprecated;
816static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
817 struct scatterlist *dst,
818 struct scatterlist *src,
819 unsigned int nbytes)
820{
821 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
822 return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
823}
824
825static int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
826 struct scatterlist *dst,
827 struct scatterlist *src,
828 unsigned int nbytes, u8 *iv) __deprecated;
829static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
830 struct scatterlist *dst,
831 struct scatterlist *src,
832 unsigned int nbytes, u8 *iv)
833{
834 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
835 return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv);
836}
837
838static int crypto_cipher_decrypt(struct crypto_tfm *tfm,
839 struct scatterlist *dst,
840 struct scatterlist *src,
841 unsigned int nbytes) __deprecated;
842static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
843 struct scatterlist *dst,
844 struct scatterlist *src,
845 unsigned int nbytes)
846{
847 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
848 return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
849}
850
851static int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
852 struct scatterlist *dst,
853 struct scatterlist *src,
854 unsigned int nbytes, u8 *iv) __deprecated;
855static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
856 struct scatterlist *dst,
857 struct scatterlist *src,
858 unsigned int nbytes, u8 *iv)
859{
860 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
861 return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv);
862}
863
864static void crypto_cipher_set_iv(struct crypto_tfm *tfm,
865 const u8 *src, unsigned int len) __deprecated;
866static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
867 const u8 *src, unsigned int len)
868{
869 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
870 memcpy(tfm->crt_cipher.cit_iv, src, len);
871}
872
873static void crypto_cipher_get_iv(struct crypto_tfm *tfm,
874 u8 *dst, unsigned int len) __deprecated;
875static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
876 u8 *dst, unsigned int len)
877{
878 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
879 memcpy(dst, tfm->crt_cipher.cit_iv, len);
880}
881
882static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 760static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
883{ 761{
884 return (struct crypto_comp *)tfm; 762 return (struct crypto_comp *)tfm;
@@ -903,7 +781,7 @@ static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
903 781
904static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 782static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
905{ 783{
906 return tfm; 784 return &tfm->base;
907} 785}
908 786
909static inline void crypto_free_comp(struct crypto_comp *tfm) 787static inline void crypto_free_comp(struct crypto_comp *tfm)
@@ -934,14 +812,16 @@ static inline int crypto_comp_compress(struct crypto_comp *tfm,
934 const u8 *src, unsigned int slen, 812 const u8 *src, unsigned int slen,
935 u8 *dst, unsigned int *dlen) 813 u8 *dst, unsigned int *dlen)
936{ 814{
937 return crypto_comp_crt(tfm)->cot_compress(tfm, src, slen, dst, dlen); 815 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
816 src, slen, dst, dlen);
938} 817}
939 818
940static inline int crypto_comp_decompress(struct crypto_comp *tfm, 819static inline int crypto_comp_decompress(struct crypto_comp *tfm,
941 const u8 *src, unsigned int slen, 820 const u8 *src, unsigned int slen,
942 u8 *dst, unsigned int *dlen) 821 u8 *dst, unsigned int *dlen)
943{ 822{
944 return crypto_comp_crt(tfm)->cot_decompress(tfm, src, slen, dst, dlen); 823 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
824 src, slen, dst, dlen);
945} 825}
946 826
947#endif /* _LINUX_CRYPTO_H */ 827#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 99393ef3af39..f3de05c30678 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -41,6 +41,7 @@ struct sockaddr_ll
41#define PACKET_RX_RING 5 41#define PACKET_RX_RING 5
42#define PACKET_STATISTICS 6 42#define PACKET_STATISTICS 6
43#define PACKET_COPY_THRESH 7 43#define PACKET_COPY_THRESH 7
44#define PACKET_AUXDATA 8
44 45
45struct tpacket_stats 46struct tpacket_stats
46{ 47{
@@ -48,6 +49,15 @@ struct tpacket_stats
48 unsigned int tp_drops; 49 unsigned int tp_drops;
49}; 50};
50 51
52struct tpacket_auxdata
53{
54 __u32 tp_status;
55 __u32 tp_len;
56 __u32 tp_snaplen;
57 __u16 tp_mac;
58 __u16 tp_net;
59};
60
51struct tpacket_hdr 61struct tpacket_hdr
52{ 62{
53 unsigned long tp_status; 63 unsigned long tp_status;
diff --git a/include/linux/net.h b/include/linux/net.h
index f28d8a2e2c91..4db21e63d8d2 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -24,7 +24,7 @@
24struct poll_table_struct; 24struct poll_table_struct;
25struct inode; 25struct inode;
26 26
27#define NPROTO 32 /* should be enough for now.. */ 27#define NPROTO 33 /* should be enough for now.. */
28 28
29#define SYS_SOCKET 1 /* sys_socket(2) */ 29#define SYS_SOCKET 1 /* sys_socket(2) */
30#define SYS_BIND 2 /* sys_bind(2) */ 30#define SYS_BIND 2 /* sys_bind(2) */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2e37f5012788..1a528548cd1d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -589,7 +589,7 @@ extern int dev_open(struct net_device *dev);
589extern int dev_close(struct net_device *dev); 589extern int dev_close(struct net_device *dev);
590extern int dev_queue_xmit(struct sk_buff *skb); 590extern int dev_queue_xmit(struct sk_buff *skb);
591extern int register_netdevice(struct net_device *dev); 591extern int register_netdevice(struct net_device *dev);
592extern int unregister_netdevice(struct net_device *dev); 592extern void unregister_netdevice(struct net_device *dev);
593extern void free_netdev(struct net_device *dev); 593extern void free_netdev(struct net_device *dev);
594extern void synchronize_net(void); 594extern void synchronize_net(void);
595extern int register_netdevice_notifier(struct notifier_block *nb); 595extern int register_netdevice_notifier(struct notifier_block *nb);
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 6328175a1c3a..43397a414cd6 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -33,6 +33,7 @@ header-y += xt_tcpmss.h
33header-y += xt_tcpudp.h 33header-y += xt_tcpudp.h
34header-y += xt_SECMARK.h 34header-y += xt_SECMARK.h
35header-y += xt_CONNSECMARK.h 35header-y += xt_CONNSECMARK.h
36header-y += xt_TCPMSS.h
36 37
37unifdef-y += nf_conntrack_common.h 38unifdef-y += nf_conntrack_common.h
38unifdef-y += nf_conntrack_ftp.h 39unifdef-y += nf_conntrack_ftp.h
diff --git a/include/linux/netfilter/nf_conntrack_sane.h b/include/linux/netfilter/nf_conntrack_sane.h
new file mode 100644
index 000000000000..4767d6e23e97
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_sane.h
@@ -0,0 +1,21 @@
1#ifndef _NF_CONNTRACK_SANE_H
2#define _NF_CONNTRACK_SANE_H
3/* SANE tracking. */
4
5#ifdef __KERNEL__
6
7#define SANE_PORT 6566
8
9enum sane_state {
10 SANE_STATE_NORMAL,
11 SANE_STATE_START_REQUESTED,
12};
13
14/* This structure exists only once per master */
15struct nf_ct_sane_master {
16 enum sane_state state;
17};
18
19#endif /* __KERNEL__ */
20
21#endif /* _NF_CONNTRACK_SANE_H */
diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h
index 2f4e98b90cc0..007af4c2770b 100644
--- a/include/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/linux/netfilter/nf_conntrack_tcp.h
@@ -27,6 +27,9 @@ enum tcp_conntrack {
27/* This sender sent FIN first */ 27/* This sender sent FIN first */
28#define IP_CT_TCP_FLAG_CLOSE_INIT 0x04 28#define IP_CT_TCP_FLAG_CLOSE_INIT 0x04
29 29
30/* Be liberal in window checking */
31#define IP_CT_TCP_FLAG_BE_LIBERAL 0x08
32
30#ifdef __KERNEL__ 33#ifdef __KERNEL__
31 34
32struct ip_ct_tcp_state { 35struct ip_ct_tcp_state {
@@ -34,7 +37,6 @@ struct ip_ct_tcp_state {
34 u_int32_t td_maxend; /* max of ack + max(win, 1) */ 37 u_int32_t td_maxend; /* max of ack + max(win, 1) */
35 u_int32_t td_maxwin; /* max(win) */ 38 u_int32_t td_maxwin; /* max(win) */
36 u_int8_t td_scale; /* window scale factor */ 39 u_int8_t td_scale; /* window scale factor */
37 u_int8_t loose; /* used when connection picked up from the middle */
38 u_int8_t flags; /* per direction options */ 40 u_int8_t flags; /* per direction options */
39}; 41};
40 42
diff --git a/include/linux/netfilter/xt_TCPMSS.h b/include/linux/netfilter/xt_TCPMSS.h
new file mode 100644
index 000000000000..53a292cd47f3
--- /dev/null
+++ b/include/linux/netfilter/xt_TCPMSS.h
@@ -0,0 +1,10 @@
1#ifndef _XT_TCPMSS_H
2#define _XT_TCPMSS_H
3
4struct xt_tcpmss_info {
5 u_int16_t mss;
6};
7
8#define XT_TCPMSS_CLAMP_PMTU 0xffff
9
10#endif /* _XT_TCPMSS_H */
diff --git a/include/linux/netfilter_ipv4/ip_nat.h b/include/linux/netfilter_ipv4/ip_nat.h
index bdf553620ca1..bbca89aab813 100644
--- a/include/linux/netfilter_ipv4/ip_nat.h
+++ b/include/linux/netfilter_ipv4/ip_nat.h
@@ -16,6 +16,7 @@ enum ip_nat_manip_type
16 16
17#define IP_NAT_RANGE_MAP_IPS 1 17#define IP_NAT_RANGE_MAP_IPS 1
18#define IP_NAT_RANGE_PROTO_SPECIFIED 2 18#define IP_NAT_RANGE_PROTO_SPECIFIED 2
19#define IP_NAT_RANGE_PROTO_RANDOM 4 /* add randomness to "port" selection */
19 20
20/* NAT sequence number modifications */ 21/* NAT sequence number modifications */
21struct ip_nat_seq { 22struct ip_nat_seq {
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 98d566c5e32a..9527296595cd 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -272,25 +272,9 @@ ipt_get_target(struct ipt_entry *e)
272#include <linux/init.h> 272#include <linux/init.h>
273extern void ipt_init(void) __init; 273extern void ipt_init(void) __init;
274 274
275#define ipt_register_target(tgt) \ 275extern int ipt_register_table(struct xt_table *table,
276({ (tgt)->family = AF_INET; \
277 xt_register_target(tgt); })
278#define ipt_unregister_target(tgt) xt_unregister_target(tgt)
279
280#define ipt_register_match(mtch) \
281({ (mtch)->family = AF_INET; \
282 xt_register_match(mtch); })
283#define ipt_unregister_match(mtch) xt_unregister_match(mtch)
284
285//#define ipt_register_table(tbl, repl) xt_register_table(AF_INET, tbl, repl)
286//#define ipt_unregister_table(tbl) xt_unregister_table(AF_INET, tbl)
287
288extern int ipt_register_table(struct ipt_table *table,
289 const struct ipt_replace *repl); 276 const struct ipt_replace *repl);
290extern void ipt_unregister_table(struct ipt_table *table); 277extern void ipt_unregister_table(struct xt_table *table);
291
292/* net/sched/ipt.c: Gimme access to your targets! Gets target->me. */
293extern struct ipt_target *ipt_find_target(const char *name, u8 revision);
294 278
295/* Standard entry. */ 279/* Standard entry. */
296struct ipt_standard 280struct ipt_standard
@@ -315,7 +299,7 @@ extern unsigned int ipt_do_table(struct sk_buff **pskb,
315 unsigned int hook, 299 unsigned int hook,
316 const struct net_device *in, 300 const struct net_device *in,
317 const struct net_device *out, 301 const struct net_device *out,
318 struct ipt_table *table); 302 struct xt_table *table);
319 303
320#define IPT_ALIGN(s) XT_ALIGN(s) 304#define IPT_ALIGN(s) XT_ALIGN(s)
321 305
diff --git a/include/linux/netfilter_ipv4/ipt_TCPMSS.h b/include/linux/netfilter_ipv4/ipt_TCPMSS.h
index aadb39580cd3..7a850f945824 100644
--- a/include/linux/netfilter_ipv4/ipt_TCPMSS.h
+++ b/include/linux/netfilter_ipv4/ipt_TCPMSS.h
@@ -1,10 +1,9 @@
1#ifndef _IPT_TCPMSS_H 1#ifndef _IPT_TCPMSS_H
2#define _IPT_TCPMSS_H 2#define _IPT_TCPMSS_H
3 3
4struct ipt_tcpmss_info { 4#include <linux/netfilter/xt_TCPMSS.h>
5 u_int16_t mss;
6};
7 5
8#define IPT_TCPMSS_CLAMP_PMTU 0xffff 6#define ipt_tcpmss_info xt_tcpmss_info
7#define IPT_TCPMSS_CLAMP_PMTU XT_TCPMSS_CLAMP_PMTU
9 8
10#endif /*_IPT_TCPMSS_H*/ 9#endif /*_IPT_TCPMSS_H*/
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 4aed340401db..61aa10412fc8 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -104,6 +104,25 @@ struct ip6t_entry
104 unsigned char elems[0]; 104 unsigned char elems[0];
105}; 105};
106 106
107/* Standard entry */
108struct ip6t_standard
109{
110 struct ip6t_entry entry;
111 struct ip6t_standard_target target;
112};
113
114struct ip6t_error_target
115{
116 struct ip6t_entry_target target;
117 char errorname[IP6T_FUNCTION_MAXNAMELEN];
118};
119
120struct ip6t_error
121{
122 struct ip6t_entry entry;
123 struct ip6t_error_target target;
124};
125
107/* 126/*
108 * New IP firewall options for [gs]etsockopt at the RAW IP level. 127 * New IP firewall options for [gs]etsockopt at the RAW IP level.
109 * Unlike BSD Linux inherits IP options so you don't have to use 128 * Unlike BSD Linux inherits IP options so you don't have to use
@@ -286,24 +305,14 @@ ip6t_get_target(struct ip6t_entry *e)
286#include <linux/init.h> 305#include <linux/init.h>
287extern void ip6t_init(void) __init; 306extern void ip6t_init(void) __init;
288 307
289#define ip6t_register_target(tgt) \ 308extern int ip6t_register_table(struct xt_table *table,
290({ (tgt)->family = AF_INET6; \
291 xt_register_target(tgt); })
292#define ip6t_unregister_target(tgt) xt_unregister_target(tgt)
293
294#define ip6t_register_match(match) \
295({ (match)->family = AF_INET6; \
296 xt_register_match(match); })
297#define ip6t_unregister_match(match) xt_unregister_match(match)
298
299extern int ip6t_register_table(struct ip6t_table *table,
300 const struct ip6t_replace *repl); 309 const struct ip6t_replace *repl);
301extern void ip6t_unregister_table(struct ip6t_table *table); 310extern void ip6t_unregister_table(struct xt_table *table);
302extern unsigned int ip6t_do_table(struct sk_buff **pskb, 311extern unsigned int ip6t_do_table(struct sk_buff **pskb,
303 unsigned int hook, 312 unsigned int hook,
304 const struct net_device *in, 313 const struct net_device *in,
305 const struct net_device *out, 314 const struct net_device *out,
306 struct ip6t_table *table); 315 struct xt_table *table);
307 316
308/* Check for an extension */ 317/* Check for an extension */
309extern int ip6t_ext_hdr(u8 nexthdr); 318extern int ip6t_ext_hdr(u8 nexthdr);
diff --git a/include/linux/netfilter_ipv6/ip6t_mh.h b/include/linux/netfilter_ipv6/ip6t_mh.h
new file mode 100644
index 000000000000..b9ca9a5f74d0
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_mh.h
@@ -0,0 +1,15 @@
1#ifndef _IP6T_MH_H
2#define _IP6T_MH_H
3
4/* MH matching stuff */
5struct ip6t_mh
6{
7 u_int8_t types[2]; /* MH type range */
8 u_int8_t invflags; /* Inverse flags */
9};
10
11/* Values for "invflags" field in struct ip6t_mh. */
12#define IP6T_MH_INV_TYPE 0x01 /* Invert the sense of type. */
13#define IP6T_MH_INV_MASK 0x01 /* All possible flags. */
14
15#endif /*_IP6T_MH_H*/
diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h
index 265bafab6494..d9db5f62ee48 100644
--- a/include/linux/pfkeyv2.h
+++ b/include/linux/pfkeyv2.h
@@ -251,7 +251,8 @@ struct sadb_x_sec_ctx {
251#define SADB_X_SPDEXPIRE 21 251#define SADB_X_SPDEXPIRE 21
252#define SADB_X_SPDDELETE2 22 252#define SADB_X_SPDDELETE2 22
253#define SADB_X_NAT_T_NEW_MAPPING 23 253#define SADB_X_NAT_T_NEW_MAPPING 23
254#define SADB_MAX 23 254#define SADB_X_MIGRATE 24
255#define SADB_MAX 24
255 256
256/* Security Association flags */ 257/* Security Association flags */
257#define SADB_SAFLAGS_PFS 1 258#define SADB_SAFLAGS_PFS 1
@@ -297,6 +298,7 @@ struct sadb_x_sec_ctx {
297#define SADB_X_EALG_BLOWFISHCBC 7 298#define SADB_X_EALG_BLOWFISHCBC 7
298#define SADB_EALG_NULL 11 299#define SADB_EALG_NULL 11
299#define SADB_X_EALG_AESCBC 12 300#define SADB_X_EALG_AESCBC 12
301#define SADB_X_EALG_CAMELLIACBC 22
300#define SADB_EALG_MAX 253 /* last EALG */ 302#define SADB_EALG_MAX 253 /* last EALG */
301/* private allocations should use 249-255 (RFC2407) */ 303/* private allocations should use 249-255 (RFC2407) */
302#define SADB_X_EALG_SERPENTCBC 252 /* draft-ietf-ipsec-ciph-aes-cbc-00 */ 304#define SADB_X_EALG_SERPENTCBC 252 /* draft-ietf-ipsec-ciph-aes-cbc-00 */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 92cd38efad7f..fcd35a210e7f 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -187,7 +187,8 @@ struct ucred {
187#define AF_LLC 26 /* Linux LLC */ 187#define AF_LLC 26 /* Linux LLC */
188#define AF_TIPC 30 /* TIPC sockets */ 188#define AF_TIPC 30 /* TIPC sockets */
189#define AF_BLUETOOTH 31 /* Bluetooth sockets */ 189#define AF_BLUETOOTH 31 /* Bluetooth sockets */
190#define AF_MAX 32 /* For now.. */ 190#define AF_IUCV 32 /* IUCV sockets */
191#define AF_MAX 33 /* For now.. */
191 192
192/* Protocol families, same as address families. */ 193/* Protocol families, same as address families. */
193#define PF_UNSPEC AF_UNSPEC 194#define PF_UNSPEC AF_UNSPEC
@@ -220,6 +221,7 @@ struct ucred {
220#define PF_LLC AF_LLC 221#define PF_LLC AF_LLC
221#define PF_TIPC AF_TIPC 222#define PF_TIPC AF_TIPC
222#define PF_BLUETOOTH AF_BLUETOOTH 223#define PF_BLUETOOTH AF_BLUETOOTH
224#define PF_IUCV AF_IUCV
223#define PF_MAX AF_MAX 225#define PF_MAX AF_MAX
224 226
225/* Maximum queue length specifiable by listen. */ 227/* Maximum queue length specifiable by listen. */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 81480e613467..665412c4f4b9 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -699,7 +699,8 @@ enum {
699 NET_X25_CALL_REQUEST_TIMEOUT=2, 699 NET_X25_CALL_REQUEST_TIMEOUT=2,
700 NET_X25_RESET_REQUEST_TIMEOUT=3, 700 NET_X25_RESET_REQUEST_TIMEOUT=3,
701 NET_X25_CLEAR_REQUEST_TIMEOUT=4, 701 NET_X25_CLEAR_REQUEST_TIMEOUT=4,
702 NET_X25_ACK_HOLD_BACK_TIMEOUT=5 702 NET_X25_ACK_HOLD_BACK_TIMEOUT=5,
703 NET_X25_FORWARD=6
703}; 704};
704 705
705/* /proc/sys/net/token-ring */ 706/* /proc/sys/net/token-ring */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 3cc70d1a3504..29d3089038ab 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -316,7 +316,7 @@ struct tcp_sock {
316 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ 316 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
317 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ 317 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
318 318
319 struct tcp_sack_block recv_sack_cache[4]; 319 struct tcp_sack_block_wire recv_sack_cache[4];
320 320
321 /* from STCP, retrans queue hinting */ 321 /* from STCP, retrans queue hinting */
322 struct sk_buff* lost_skb_hint; 322 struct sk_buff* lost_skb_hint;
diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h
index 2cd05013edfc..3add87465b1f 100644
--- a/include/linux/wanrouter.h
+++ b/include/linux/wanrouter.h
@@ -516,9 +516,6 @@ struct wan_device {
516/* Public functions available for device drivers */ 516/* Public functions available for device drivers */
517extern int register_wan_device(struct wan_device *wandev); 517extern int register_wan_device(struct wan_device *wandev);
518extern int unregister_wan_device(char *name); 518extern int unregister_wan_device(char *name);
519__be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev);
520int wanrouter_encapsulate(struct sk_buff *skb, struct net_device *dev,
521 unsigned short type);
522 519
523/* Proc interface functions. These must not be called by the drivers! */ 520/* Proc interface functions. These must not be called by the drivers! */
524extern int wanrouter_proc_init(void); 521extern int wanrouter_proc_init(void);
@@ -527,11 +524,6 @@ extern int wanrouter_proc_add(struct wan_device *wandev);
527extern int wanrouter_proc_delete(struct wan_device *wandev); 524extern int wanrouter_proc_delete(struct wan_device *wandev);
528extern int wanrouter_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); 525extern int wanrouter_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
529 526
530extern void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
531extern void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
532
533
534
535/* Public Data */ 527/* Public Data */
536/* list of registered devices */ 528/* list of registered devices */
537extern struct wan_device *wanrouter_router_devlist; 529extern struct wan_device *wanrouter_router_devlist;
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 9529ea1ae392..15ca89e9961b 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -178,6 +178,9 @@ enum {
178 XFRM_MSG_REPORT, 178 XFRM_MSG_REPORT,
179#define XFRM_MSG_REPORT XFRM_MSG_REPORT 179#define XFRM_MSG_REPORT XFRM_MSG_REPORT
180 180
181 XFRM_MSG_MIGRATE,
182#define XFRM_MSG_MIGRATE XFRM_MSG_MIGRATE
183
181 __XFRM_MSG_MAX 184 __XFRM_MSG_MAX
182}; 185};
183#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1) 186#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -256,6 +259,7 @@ enum xfrm_attr_type_t {
256 XFRMA_COADDR, /* xfrm_address_t */ 259 XFRMA_COADDR, /* xfrm_address_t */
257 XFRMA_LASTUSED, 260 XFRMA_LASTUSED,
258 XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */ 261 XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */
262 XFRMA_MIGRATE,
259 __XFRMA_MAX 263 __XFRMA_MAX
260 264
261#define XFRMA_MAX (__XFRMA_MAX - 1) 265#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -351,6 +355,19 @@ struct xfrm_user_report {
351 struct xfrm_selector sel; 355 struct xfrm_selector sel;
352}; 356};
353 357
358struct xfrm_user_migrate {
359 xfrm_address_t old_daddr;
360 xfrm_address_t old_saddr;
361 xfrm_address_t new_daddr;
362 xfrm_address_t new_saddr;
363 __u8 proto;
364 __u8 mode;
365 __u16 reserved;
366 __u32 reqid;
367 __u16 old_family;
368 __u16 new_family;
369};
370
354#ifndef __KERNEL__ 371#ifndef __KERNEL__
355/* backwards compatibility for userspace */ 372/* backwards compatibility for userspace */
356#define XFRMGRP_ACQUIRE 1 373#define XFRMGRP_ACQUIRE 1
@@ -375,6 +392,8 @@ enum xfrm_nlgroups {
375#define XFRMNLGRP_AEVENTS XFRMNLGRP_AEVENTS 392#define XFRMNLGRP_AEVENTS XFRMNLGRP_AEVENTS
376 XFRMNLGRP_REPORT, 393 XFRMNLGRP_REPORT,
377#define XFRMNLGRP_REPORT XFRMNLGRP_REPORT 394#define XFRMNLGRP_REPORT XFRMNLGRP_REPORT
395 XFRMNLGRP_MIGRATE,
396#define XFRMNLGRP_MIGRATE XFRMNLGRP_MIGRATE
378 __XFRMNLGRP_MAX 397 __XFRMNLGRP_MAX
379}; 398};
380#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1) 399#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1)
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 34cc76e3ddb4..d27ee8c0da3f 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -34,12 +34,13 @@
34#include <asm/byteorder.h> 34#include <asm/byteorder.h>
35 35
36/* This is for all connections with a full identity, no wildcards. 36/* This is for all connections with a full identity, no wildcards.
37 * New scheme, half the table is for TIME_WAIT, the other half is 37 * One chain is dedicated to TIME_WAIT sockets.
38 * for the rest. I'll experiment with dynamic table growth later. 38 * I'll experiment with dynamic table growth later.
39 */ 39 */
40struct inet_ehash_bucket { 40struct inet_ehash_bucket {
41 rwlock_t lock; 41 rwlock_t lock;
42 struct hlist_head chain; 42 struct hlist_head chain;
43 struct hlist_head twchain;
43}; 44};
44 45
45/* There are a few simple rules, which allow for local port reuse by 46/* There are a few simple rules, which allow for local port reuse by
@@ -97,8 +98,7 @@ struct inet_hashinfo {
97 * 98 *
98 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE 99 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
99 * 100 *
100 * First half of the table is for sockets not in TIME_WAIT, second half 101 * TIME_WAIT sockets use a separate chain (twchain).
101 * is for TIME_WAIT sockets only.
102 */ 102 */
103 struct inet_ehash_bucket *ehash; 103 struct inet_ehash_bucket *ehash;
104 104
@@ -369,7 +369,7 @@ static inline struct sock *
369 } 369 }
370 370
371 /* Must check for a TIME_WAIT'er before going to listener hash. */ 371 /* Must check for a TIME_WAIT'er before going to listener hash. */
372 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { 372 sk_for_each(sk, node, &head->twchain) {
373 if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif)) 373 if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
374 goto hit; 374 goto hit;
375 } 375 }
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
new file mode 100644
index 000000000000..04d1abb72d25
--- /dev/null
+++ b/include/net/iucv/af_iucv.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright 2006 IBM Corporation
3 * IUCV protocol stack for Linux on zSeries
4 * Version 1.0
5 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
6 *
7 */
8
9#ifndef __AFIUCV_H
10#define __AFIUCV_H
11
12#include <asm/types.h>
13#include <asm/byteorder.h>
14#include <linux/list.h>
15#include <linux/poll.h>
16#include <linux/socket.h>
17
18#ifndef AF_IUCV
19#define AF_IUCV 32
20#define PF_IUCV AF_IUCV
21#endif
22
23/* Connection and socket states */
24enum {
25 IUCV_CONNECTED = 1,
26 IUCV_OPEN,
27 IUCV_BOUND,
28 IUCV_LISTEN,
29 IUCV_SEVERED,
30 IUCV_DISCONN,
31 IUCV_CLOSED
32};
33
34#define IUCV_QUEUELEN_DEFAULT 65535
35#define IUCV_CONN_TIMEOUT (HZ * 40)
36#define IUCV_DISCONN_TIMEOUT (HZ * 2)
37#define IUCV_CONN_IDLE_TIMEOUT (HZ * 60)
38#define IUCV_BUFSIZE_DEFAULT 32768
39
40/* IUCV socket address */
41struct sockaddr_iucv {
42 sa_family_t siucv_family;
43 unsigned short siucv_port; /* Reserved */
44 unsigned int siucv_addr; /* Reserved */
45 char siucv_nodeid[8]; /* Reserved */
46 char siucv_user_id[8]; /* Guest User Id */
47 char siucv_name[8]; /* Application Name */
48};
49
50
51/* Common socket structures and functions */
52
53#define iucv_sk(__sk) ((struct iucv_sock *) __sk)
54
55struct iucv_sock {
56 struct sock sk;
57 char src_user_id[8];
58 char src_name[8];
59 char dst_user_id[8];
60 char dst_name[8];
61 struct list_head accept_q;
62 struct sock *parent;
63 struct iucv_path *path;
64 struct sk_buff_head send_skb_q;
65 unsigned int send_tag;
66};
67
68struct iucv_sock_list {
69 struct hlist_head head;
70 rwlock_t lock;
71 atomic_t autobind_name;
72};
73
74static void iucv_sock_destruct(struct sock *sk);
75static void iucv_sock_cleanup_listen(struct sock *parent);
76static void iucv_sock_kill(struct sock *sk);
77static void iucv_sock_close(struct sock *sk);
78static int iucv_sock_create(struct socket *sock, int proto);
79static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
80 int addr_len);
81static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
82 int alen, int flags);
83static int iucv_sock_listen(struct socket *sock, int backlog);
84static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
85 int flags);
86static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
87 int *len, int peer);
88static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
89 struct msghdr *msg, size_t len);
90static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
91 struct msghdr *msg, size_t len, int flags);
92unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
93 poll_table *wait);
94static int iucv_sock_release(struct socket *sock);
95static int iucv_sock_shutdown(struct socket *sock, int how);
96
97void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
98void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
99int iucv_sock_wait_state(struct sock *sk, int state, int state2,
100 unsigned long timeo);
101int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
102void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
103void iucv_accept_unlink(struct sock *sk);
104struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock);
105
106#endif /* __IUCV_H */
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
new file mode 100644
index 000000000000..746e7416261e
--- /dev/null
+++ b/include/net/iucv/iucv.h
@@ -0,0 +1,415 @@
1/*
2 * drivers/s390/net/iucv.h
3 * IUCV base support.
4 *
5 * S390 version
6 * Copyright 2000, 2006 IBM Corporation
7 * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com)
8 * Xenia Tkatschow (xenia@us.ibm.com)
9 * Rewritten for af_iucv:
10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 *
12 *
13 * Functionality:
14 * To explore any of the IUCV functions, one must first register their
15 * program using iucv_register(). Once your program has successfully
16 * completed a register, it can exploit the other functions.
17 * For furthur reference on all IUCV functionality, refer to the
18 * CP Programming Services book, also available on the web thru
19 * www.ibm.com/s390/vm/pubs, manual # SC24-5760
20 *
21 * Definition of Return Codes
22 * - All positive return codes including zero are reflected back
23 * from CP. The definition of each return code can be found in
24 * CP Programming Services book.
25 * - Return Code of:
26 * -EINVAL: Invalid value
27 * -ENOMEM: storage allocation failed
28 */
29
30#include <linux/types.h>
31#include <asm/debug.h>
32
33/*
34 * IUCV option flags usable by device drivers:
35 *
36 * IUCV_IPRMDATA Indicates that your program can handle a message in the
37 * parameter list / a message is sent in the parameter list.
38 * Used for iucv_path_accept, iucv_path_connect,
39 * iucv_message_reply, iucv_message_send, iucv_message_send2way.
40 * IUCV_IPQUSCE Indicates that you do not want to receive messages on this
41 * path until an iucv_path_resume is issued.
42 * Used for iucv_path_accept, iucv_path_connect.
43 * IUCV_IPBUFLST Indicates that an address list is used for the message data.
44 * Used for iucv_message_receive, iucv_message_send,
45 * iucv_message_send2way.
46 * IUCV_IPPRTY Specifies that you want to send priority messages.
47 * Used for iucv_path_accept, iucv_path_connect,
48 * iucv_message_reply, iucv_message_send, iucv_message_send2way.
49 * IUCV_IPSYNC Indicates a synchronous send request.
50 * Used for iucv_message_send, iucv_message_send2way.
51 * IUCV_IPANSLST Indicates that an address list is used for the reply data.
52 * Used for iucv_message_reply, iucv_message_send2way.
53 * IUCV_IPLOCAL Specifies that the communication partner has to be on the
54 * local system. If local is specified no target class can be
55 * specified.
56 * Used for iucv_path_connect.
57 *
58 * All flags are defined in the input field IPFLAGS1 of each function
59 * and can be found in CP Programming Services.
60 */
61#define IUCV_IPRMDATA 0x80
62#define IUCV_IPQUSCE 0x40
63#define IUCV_IPBUFLST 0x40
64#define IUCV_IPPRTY 0x20
65#define IUCV_IPANSLST 0x08
66#define IUCV_IPSYNC 0x04
67#define IUCV_IPLOCAL 0x01
68
69/*
70 * iucv_array : Defines buffer array.
71 * Inside the array may be 31- bit addresses and 31-bit lengths.
72 * Use a pointer to an iucv_array as the buffer, reply or answer
73 * parameter on iucv_message_send, iucv_message_send2way, iucv_message_receive
74 * and iucv_message_reply if IUCV_IPBUFLST or IUCV_IPANSLST are used.
75 */
76struct iucv_array {
77 u32 address;
78 u32 length;
79} __attribute__ ((aligned (8)));
80
81extern struct bus_type iucv_bus;
82extern struct device *iucv_root;
83
84/*
85 * struct iucv_path
86 * pathid: 16 bit path identification
87 * msglim: 16 bit message limit
88 * flags: properties of the path: IPRMDATA, IPQUSCE, IPPRTY
89 * handler: address of iucv handler structure
90 * private: private information of the handler associated with the path
91 * list: list_head for the iucv_handler path list.
92 */
93struct iucv_path {
94 u16 pathid;
95 u16 msglim;
96 u8 flags;
97 void *private;
98 struct iucv_handler *handler;
99 struct list_head list;
100};
101
102/*
103 * struct iucv_message
104 * id: 32 bit message id
105 * audit: 32 bit error information of purged or replied messages
106 * class: 32 bit target class of a message (source class for replies)
107 * tag: 32 bit tag to be associated with the message
108 * length: 32 bit length of the message / reply
109 * reply_size: 32 bit maximum allowed length of the reply
110 * rmmsg: 8 byte inline message
111 * flags: message properties (IUCV_IPPRTY)
112 */
113struct iucv_message {
114 u32 id;
115 u32 audit;
116 u32 class;
117 u32 tag;
118 u32 length;
119 u32 reply_size;
120 u8 rmmsg[8];
121 u8 flags;
122};
123
124/*
125 * struct iucv_handler
126 *
127 * A vector of functions that handle IUCV interrupts. Each functions gets
128 * a parameter area as defined by the CP Programming Services and private
129 * pointer that is provided by the user of the interface.
130 */
131struct iucv_handler {
132 /*
133 * The path_pending function is called after an iucv interrupt
134 * type 0x01 has been received. The base code allocates a path
135 * structure and "asks" the handler if this path belongs to the
136 * handler. To accept the path the path_pending function needs
137 * to call iucv_path_accept and return 0. If the callback returns
138 * a value != 0 the iucv base code will continue with the next
139 * handler. The order in which the path_pending functions are
140 * called is the order of the registration of the iucv handlers
141 * to the base code.
142 */
143 int (*path_pending)(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
144 /*
145 * The path_complete function is called after an iucv interrupt
146 * type 0x02 has been received for a path that has been established
147 * for this handler with iucv_path_connect and got accepted by the
148 * peer with iucv_path_accept.
149 */
150 void (*path_complete)(struct iucv_path *, u8 ipuser[16]);
151 /*
152 * The path_severed function is called after an iucv interrupt
153 * type 0x03 has been received. The communication peer shutdown
154 * his end of the communication path. The path still exists and
155 * remaining messages can be received until a iucv_path_sever
156 * shuts down the other end of the path as well.
157 */
158 void (*path_severed)(struct iucv_path *, u8 ipuser[16]);
159 /*
160 * The path_quiesced function is called after an icuv interrupt
161 * type 0x04 has been received. The communication peer has quiesced
162 * the path. Delivery of messages is stopped until iucv_path_resume
163 * has been called.
164 */
165 void (*path_quiesced)(struct iucv_path *, u8 ipuser[16]);
166 /*
167 * The path_resumed function is called after an icuv interrupt
168 * type 0x05 has been received. The communication peer has resumed
169 * the path.
170 */
171 void (*path_resumed)(struct iucv_path *, u8 ipuser[16]);
172 /*
173 * The message_pending function is called after an icuv interrupt
174 * type 0x06 or type 0x07 has been received. A new message is
175 * availabe and can be received with iucv_message_receive.
176 */
177 void (*message_pending)(struct iucv_path *, struct iucv_message *);
178 /*
179 * The message_complete function is called after an icuv interrupt
180 * type 0x08 or type 0x09 has been received. A message send with
181 * iucv_message_send2way has been replied to. The reply can be
182 * received with iucv_message_receive.
183 */
184 void (*message_complete)(struct iucv_path *, struct iucv_message *);
185
186 struct list_head list;
187 struct list_head paths;
188};
189
190/**
191 * iucv_register:
192 * @handler: address of iucv handler structure
193 * @smp: != 0 indicates that the handler can deal with out of order messages
194 *
195 * Registers a driver with IUCV.
196 *
197 * Returns 0 on success, -ENOMEM if the memory allocation for the pathid
198 * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
199 */
200int iucv_register(struct iucv_handler *handler, int smp);
201
202/**
203 * iucv_unregister
204 * @handler: address of iucv handler structure
205 * @smp: != 0 indicates that the handler can deal with out of order messages
206 *
207 * Unregister driver from IUCV.
208 */
209void iucv_unregister(struct iucv_handler *handle, int smp);
210
211/**
212 * iucv_path_alloc
213 * @msglim: initial message limit
214 * @flags: initial flags
215 * @gfp: kmalloc allocation flag
216 *
217 * Allocate a new path structure for use with iucv_connect.
218 *
219 * Returns NULL if the memory allocation failed or a pointer to the
220 * path structure.
221 */
222static inline struct iucv_path *iucv_path_alloc(u16 msglim, u8 flags, gfp_t gfp)
223{
224 struct iucv_path *path;
225
226 path = kzalloc(sizeof(struct iucv_path), gfp);
227 if (path) {
228 path->msglim = msglim;
229 path->flags = flags;
230 }
231 return path;
232}
233
234/**
235 * iucv_path_free
236 * @path: address of iucv path structure
237 *
238 * Frees a path structure.
239 */
240static inline void iucv_path_free(struct iucv_path *path)
241{
242 kfree(path);
243}
244
245/**
246 * iucv_path_accept
247 * @path: address of iucv path structure
248 * @handler: address of iucv handler structure
249 * @userdata: 16 bytes of data reflected to the communication partner
250 * @private: private data passed to interrupt handlers for this path
251 *
252 * This function is issued after the user received a connection pending
253 * external interrupt and now wishes to complete the IUCV communication path.
254 *
255 * Returns the result of the CP IUCV call.
256 */
257int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
258 u8 userdata[16], void *private);
259
260/**
261 * iucv_path_connect
262 * @path: address of iucv path structure
263 * @handler: address of iucv handler structure
264 * @userid: 8-byte user identification
265 * @system: 8-byte target system identification
266 * @userdata: 16 bytes of data reflected to the communication partner
267 * @private: private data passed to interrupt handlers for this path
268 *
269 * This function establishes an IUCV path. Although the connect may complete
270 * successfully, you are not able to use the path until you receive an IUCV
271 * Connection Complete external interrupt.
272 *
273 * Returns the result of the CP IUCV call.
274 */
275int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
276 u8 userid[8], u8 system[8], u8 userdata[16],
277 void *private);
278
279/**
280 * iucv_path_quiesce:
281 * @path: address of iucv path structure
282 * @userdata: 16 bytes of data reflected to the communication partner
283 *
284 * This function temporarily suspends incoming messages on an IUCV path.
285 * You can later reactivate the path by invoking the iucv_resume function.
286 *
287 * Returns the result from the CP IUCV call.
288 */
289int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]);
290
291/**
292 * iucv_path_resume:
293 * @path: address of iucv path structure
294 * @userdata: 16 bytes of data reflected to the communication partner
295 *
296 * This function resumes incoming messages on an IUCV path that has
297 * been stopped with iucv_path_quiesce.
298 *
299 * Returns the result from the CP IUCV call.
300 */
301int iucv_path_resume(struct iucv_path *path, u8 userdata[16]);
302
303/**
304 * iucv_path_sever
305 * @path: address of iucv path structure
306 * @userdata: 16 bytes of data reflected to the communication partner
307 *
308 * This function terminates an IUCV path.
309 *
310 * Returns the result from the CP IUCV call.
311 */
312int iucv_path_sever(struct iucv_path *path, u8 userdata[16]);
313
314/**
315 * iucv_message_purge
316 * @path: address of iucv path structure
317 * @msg: address of iucv msg structure
318 * @srccls: source class of message
319 *
320 * Cancels a message you have sent.
321 *
322 * Returns the result from the CP IUCV call.
323 */
324int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
325 u32 srccls);
326
327/**
328 * iucv_message_receive
329 * @path: address of iucv path structure
330 * @msg: address of iucv msg structure
331 * @flags: flags that affect how the message is received (IUCV_IPBUFLST)
332 * @buffer: address of data buffer or address of struct iucv_array
333 * @size: length of data buffer
334 * @residual:
335 *
336 * This function receives messages that are being sent to you over
337 * established paths. This function will deal with RMDATA messages
338 * embedded in struct iucv_message as well.
339 *
340 * Returns the result from the CP IUCV call.
341 */
342int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
343 u8 flags, void *buffer, size_t size, size_t *residual);
344
345/**
346 * iucv_message_reject
347 * @path: address of iucv path structure
348 * @msg: address of iucv msg structure
349 *
350 * The reject function refuses a specified message. Between the time you
351 * are notified of a message and the time that you complete the message,
352 * the message may be rejected.
353 *
354 * Returns the result from the CP IUCV call.
355 */
356int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg);
357
358/**
359 * iucv_message_reply
360 * @path: address of iucv path structure
361 * @msg: address of iucv msg structure
362 * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
363 * @reply: address of data buffer or address of struct iucv_array
364 * @size: length of reply data buffer
365 *
366 * This function responds to the two-way messages that you receive. You
367 * must identify completely the message to which you wish to reply. ie,
368 * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
369 * the parameter list.
370 *
371 * Returns the result from the CP IUCV call.
372 */
373int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
374 u8 flags, void *reply, size_t size);
375
376/**
377 * iucv_message_send
378 * @path: address of iucv path structure
379 * @msg: address of iucv msg structure
380 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
381 * @srccls: source class of message
382 * @buffer: address of data buffer or address of struct iucv_array
383 * @size: length of send buffer
384 *
385 * This function transmits data to another application. Data to be
386 * transmitted is in a buffer and this is a one-way message and the
387 * receiver will not reply to the message.
388 *
389 * Returns the result from the CP IUCV call.
390 */
391int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
392 u8 flags, u32 srccls, void *buffer, size_t size);
393
394/**
395 * iucv_message_send2way
396 * @path: address of iucv path structure
397 * @msg: address of iucv msg structure
398 * @flags: how the message is sent and the reply is received
399 * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST)
400 * @srccls: source class of message
401 * @buffer: address of data buffer or address of struct iucv_array
402 * @size: length of send buffer
403 * @ansbuf: address of answer buffer or address of struct iucv_array
404 * @asize: size of reply buffer
405 *
406 * This function transmits data to another application. Data to be
407 * transmitted is in a buffer. The receiver of the send is expected to
408 * reply to the message and a buffer is provided into which IUCV moves
409 * the reply to this message.
410 *
411 * Returns the result from the CP IUCV call.
412 */
413int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
414 u8 flags, u32 srccls, void *buffer, size_t size,
415 void *answer, size_t asize, size_t *residual);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index bd01b4633ee2..68ec27490c20 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -45,6 +45,7 @@ union nf_conntrack_expect_proto {
45#include <linux/netfilter/nf_conntrack_ftp.h> 45#include <linux/netfilter/nf_conntrack_ftp.h>
46#include <linux/netfilter/nf_conntrack_pptp.h> 46#include <linux/netfilter/nf_conntrack_pptp.h>
47#include <linux/netfilter/nf_conntrack_h323.h> 47#include <linux/netfilter/nf_conntrack_h323.h>
48#include <linux/netfilter/nf_conntrack_sane.h>
48 49
49/* per conntrack: application helper private data */ 50/* per conntrack: application helper private data */
50union nf_conntrack_help { 51union nf_conntrack_help {
@@ -52,6 +53,7 @@ union nf_conntrack_help {
52 struct nf_ct_ftp_master ct_ftp_info; 53 struct nf_ct_ftp_master ct_ftp_info;
53 struct nf_ct_pptp_master ct_pptp_info; 54 struct nf_ct_pptp_master ct_pptp_info;
54 struct nf_ct_h323_master ct_h323_info; 55 struct nf_ct_h323_master ct_h323_info;
56 struct nf_ct_sane_master ct_sane_info;
55}; 57};
56 58
57#include <linux/types.h> 59#include <linux/types.h>
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 61c62068ca6b..bc57dd7b9b5c 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -16,6 +16,7 @@ enum nf_nat_manip_type
16 16
17#define IP_NAT_RANGE_MAP_IPS 1 17#define IP_NAT_RANGE_MAP_IPS 1
18#define IP_NAT_RANGE_PROTO_SPECIFIED 2 18#define IP_NAT_RANGE_PROTO_SPECIFIED 2
19#define IP_NAT_RANGE_PROTO_RANDOM 4
19 20
20/* NAT sequence number modifications */ 21/* NAT sequence number modifications */
21struct nf_nat_seq { 22struct nf_nat_seq {
diff --git a/include/net/route.h b/include/net/route.h
index 486e37aff06c..1440bdb5a27d 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -146,7 +146,8 @@ static inline char rt_tos2priority(u8 tos)
146 146
147static inline int ip_route_connect(struct rtable **rp, __be32 dst, 147static inline int ip_route_connect(struct rtable **rp, __be32 dst,
148 __be32 src, u32 tos, int oif, u8 protocol, 148 __be32 src, u32 tos, int oif, u8 protocol,
149 __be16 sport, __be16 dport, struct sock *sk) 149 __be16 sport, __be16 dport, struct sock *sk,
150 int flags)
150{ 151{
151 struct flowi fl = { .oif = oif, 152 struct flowi fl = { .oif = oif,
152 .nl_u = { .ip4_u = { .daddr = dst, 153 .nl_u = { .ip4_u = { .daddr = dst,
@@ -168,7 +169,7 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
168 *rp = NULL; 169 *rp = NULL;
169 } 170 }
170 security_sk_classify_flow(sk, &fl); 171 security_sk_classify_flow(sk, &fl);
171 return ip_route_output_flow(rp, &fl, sk, 0); 172 return ip_route_output_flow(rp, &fl, sk, flags);
172} 173}
173 174
174static inline int ip_route_newports(struct rtable **rp, u8 protocol, 175static inline int ip_route_newports(struct rtable **rp, u8 protocol,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cd8fa0c858ae..5c472f255b77 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -802,9 +802,8 @@ static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
802/* 802/*
803 * Calculate(/check) TCP checksum 803 * Calculate(/check) TCP checksum
804 */ 804 */
805static inline __sum16 tcp_v4_check(struct tcphdr *th, int len, 805static inline __sum16 tcp_v4_check(int len, __be32 saddr,
806 __be32 saddr, __be32 daddr, 806 __be32 daddr, __wsum base)
807 __wsum base)
808{ 807{
809 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); 808 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
810} 809}
diff --git a/include/net/x25.h b/include/net/x25.h
index e47fe440d9d7..fc3f03d976f8 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -161,6 +161,14 @@ struct x25_sock {
161 unsigned long vc_facil_mask; /* inc_call facilities mask */ 161 unsigned long vc_facil_mask; /* inc_call facilities mask */
162}; 162};
163 163
164struct x25_forward {
165 struct list_head node;
166 unsigned int lci;
167 struct net_device *dev1;
168 struct net_device *dev2;
169 atomic_t refcnt;
170};
171
164static inline struct x25_sock *x25_sk(const struct sock *sk) 172static inline struct x25_sock *x25_sk(const struct sock *sk)
165{ 173{
166 return (struct x25_sock *)sk; 174 return (struct x25_sock *)sk;
@@ -172,6 +180,7 @@ extern int sysctl_x25_call_request_timeout;
172extern int sysctl_x25_reset_request_timeout; 180extern int sysctl_x25_reset_request_timeout;
173extern int sysctl_x25_clear_request_timeout; 181extern int sysctl_x25_clear_request_timeout;
174extern int sysctl_x25_ack_holdback_timeout; 182extern int sysctl_x25_ack_holdback_timeout;
183extern int sysctl_x25_forward;
175 184
176extern int x25_addr_ntoa(unsigned char *, struct x25_address *, 185extern int x25_addr_ntoa(unsigned char *, struct x25_address *,
177 struct x25_address *); 186 struct x25_address *);
@@ -198,6 +207,13 @@ extern int x25_negotiate_facilities(struct sk_buff *, struct sock *,
198 struct x25_dte_facilities *); 207 struct x25_dte_facilities *);
199extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *); 208extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
200 209
210/* x25_forward.c */
211extern void x25_clear_forward_by_lci(unsigned int lci);
212extern void x25_clear_forward_by_dev(struct net_device *);
213extern int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
214extern int x25_forward_call(struct x25_address *, struct x25_neigh *,
215 struct sk_buff *, int);
216
201/* x25_in.c */ 217/* x25_in.c */
202extern int x25_process_rx_frame(struct sock *, struct sk_buff *); 218extern int x25_process_rx_frame(struct sock *, struct sk_buff *);
203extern int x25_backlog_rcv(struct sock *, struct sk_buff *); 219extern int x25_backlog_rcv(struct sock *, struct sk_buff *);
@@ -282,6 +298,8 @@ extern struct hlist_head x25_list;
282extern rwlock_t x25_list_lock; 298extern rwlock_t x25_list_lock;
283extern struct list_head x25_route_list; 299extern struct list_head x25_route_list;
284extern rwlock_t x25_route_list_lock; 300extern rwlock_t x25_route_list_lock;
301extern struct list_head x25_forward_list;
302extern rwlock_t x25_forward_list_lock;
285 303
286extern int x25_proc_init(void); 304extern int x25_proc_init(void);
287extern void x25_proc_exit(void); 305extern void x25_proc_exit(void);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index e4765413cf80..16924cb772c9 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -252,10 +252,13 @@ struct xfrm_state_afinfo {
252 xfrm_address_t *daddr, xfrm_address_t *saddr); 252 xfrm_address_t *daddr, xfrm_address_t *saddr);
253 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 253 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
254 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); 254 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
255 int (*output)(struct sk_buff *skb);
255}; 256};
256 257
257extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); 258extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
258extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); 259extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
260extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
261extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
259 262
260extern void xfrm_state_delete_tunnel(struct xfrm_state *x); 263extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
261 264
@@ -359,6 +362,19 @@ struct xfrm_policy
359 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; 362 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
360}; 363};
361 364
365struct xfrm_migrate {
366 xfrm_address_t old_daddr;
367 xfrm_address_t old_saddr;
368 xfrm_address_t new_daddr;
369 xfrm_address_t new_saddr;
370 u8 proto;
371 u8 mode;
372 u16 reserved;
373 u32 reqid;
374 u16 old_family;
375 u16 new_family;
376};
377
362#define XFRM_KM_TIMEOUT 30 378#define XFRM_KM_TIMEOUT 30
363/* which seqno */ 379/* which seqno */
364#define XFRM_REPLAY_SEQ 1 380#define XFRM_REPLAY_SEQ 1
@@ -385,6 +401,7 @@ struct xfrm_mgr
385 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); 401 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
386 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c); 402 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
387 int (*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr); 403 int (*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
404 int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles);
388}; 405};
389 406
390extern int xfrm_register_km(struct xfrm_mgr *km); 407extern int xfrm_register_km(struct xfrm_mgr *km);
@@ -985,6 +1002,16 @@ extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
985 struct flowi *fl, int family, int strict); 1002 struct flowi *fl, int family, int strict);
986extern void xfrm_init_pmtu(struct dst_entry *dst); 1003extern void xfrm_init_pmtu(struct dst_entry *dst);
987 1004
1005#ifdef CONFIG_XFRM_MIGRATE
1006extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1007 struct xfrm_migrate *m, int num_bundles);
1008extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
1009extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1010 struct xfrm_migrate *m);
1011extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1012 struct xfrm_migrate *m, int num_bundles);
1013#endif
1014
988extern wait_queue_head_t km_waitq; 1015extern wait_queue_head_t km_waitq;
989extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); 1016extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
990extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid); 1017extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
@@ -1050,5 +1077,25 @@ static inline void xfrm_aevent_doreplay(struct xfrm_state *x)
1050 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 1077 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1051} 1078}
1052 1079
1080#ifdef CONFIG_XFRM_MIGRATE
1081static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1082{
1083 return (struct xfrm_algo *)kmemdup(orig, sizeof(*orig) + orig->alg_key_len, GFP_KERNEL);
1084}
1085
1086static inline void xfrm_states_put(struct xfrm_state **states, int n)
1087{
1088 int i;
1089 for (i = 0; i < n; i++)
1090 xfrm_state_put(*(states + i));
1091}
1092
1093static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1094{
1095 int i;
1096 for (i = 0; i < n; i++)
1097 xfrm_state_delete(*(states + i));
1098}
1099#endif
1053 1100
1054#endif /* _NET_XFRM_H */ 1101#endif /* _NET_XFRM_H */
diff --git a/net/Kconfig b/net/Kconfig
index 7dfc94920697..915657832d94 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -37,6 +37,7 @@ config NETDEBUG
37source "net/packet/Kconfig" 37source "net/packet/Kconfig"
38source "net/unix/Kconfig" 38source "net/unix/Kconfig"
39source "net/xfrm/Kconfig" 39source "net/xfrm/Kconfig"
40source "net/iucv/Kconfig"
40 41
41config INET 42config INET
42 bool "TCP/IP networking" 43 bool "TCP/IP networking"
diff --git a/net/Makefile b/net/Makefile
index ad4d14f4bb29..4854ac506313 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_IP_SCTP) += sctp/
47obj-$(CONFIG_IEEE80211) += ieee80211/ 47obj-$(CONFIG_IEEE80211) += ieee80211/
48obj-$(CONFIG_TIPC) += tipc/ 48obj-$(CONFIG_TIPC) += tipc/
49obj-$(CONFIG_NETLABEL) += netlabel/ 49obj-$(CONFIG_NETLABEL) += netlabel/
50obj-$(CONFIG_IUCV) += iucv/
50 51
51ifeq ($(CONFIG_NET),y) 52ifeq ($(CONFIG_NET),y)
52obj-$(CONFIG_SYSCTL) += sysctl_net.o 53obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/atm/common.c b/net/atm/common.c
index fbabff494468..a2878e92c3ab 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -816,7 +816,8 @@ static void __exit atm_exit(void)
816 proto_unregister(&vcc_proto); 816 proto_unregister(&vcc_proto);
817} 817}
818 818
819module_init(atm_init); 819subsys_initcall(atm_init);
820
820module_exit(atm_exit); 821module_exit(atm_exit);
821 822
822MODULE_LICENSE("GPL"); 823MODULE_LICENSE("GPL");
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index ea3337ad0edc..a25fa8cb5284 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -949,44 +949,29 @@ static ctl_table brnf_net_table[] = {
949}; 949};
950#endif 950#endif
951 951
952int br_netfilter_init(void) 952int __init br_netfilter_init(void)
953{ 953{
954 int i; 954 int ret;
955
956 for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++) {
957 int ret;
958
959 if ((ret = nf_register_hook(&br_nf_ops[i])) >= 0)
960 continue;
961
962 while (i--)
963 nf_unregister_hook(&br_nf_ops[i]);
964 955
956 ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
957 if (ret < 0)
965 return ret; 958 return ret;
966 }
967
968#ifdef CONFIG_SYSCTL 959#ifdef CONFIG_SYSCTL
969 brnf_sysctl_header = register_sysctl_table(brnf_net_table, 0); 960 brnf_sysctl_header = register_sysctl_table(brnf_net_table, 0);
970 if (brnf_sysctl_header == NULL) { 961 if (brnf_sysctl_header == NULL) {
971 printk(KERN_WARNING 962 printk(KERN_WARNING
972 "br_netfilter: can't register to sysctl.\n"); 963 "br_netfilter: can't register to sysctl.\n");
973 for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++) 964 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
974 nf_unregister_hook(&br_nf_ops[i]); 965 return -ENOMEM;
975 return -EFAULT;
976 } 966 }
977#endif 967#endif
978
979 printk(KERN_NOTICE "Bridge firewalling registered\n"); 968 printk(KERN_NOTICE "Bridge firewalling registered\n");
980
981 return 0; 969 return 0;
982} 970}
983 971
984void br_netfilter_fini(void) 972void br_netfilter_fini(void)
985{ 973{
986 int i; 974 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
987
988 for (i = ARRAY_SIZE(br_nf_ops) - 1; i >= 0; i--)
989 nf_unregister_hook(&br_nf_ops[i]);
990#ifdef CONFIG_SYSCTL 975#ifdef CONFIG_SYSCTL
991 unregister_sysctl_table(brnf_sysctl_header); 976 unregister_sysctl_table(brnf_sysctl_header);
992#endif 977#endif
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a9139682c49b..7d68b24b5654 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -45,7 +45,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
45 45
46 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 46 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
47 if (nlh == NULL) 47 if (nlh == NULL)
48 return -ENOBUFS; 48 return -EMSGSIZE;
49 49
50 hdr = nlmsg_data(nlh); 50 hdr = nlmsg_data(nlh);
51 hdr->ifi_family = AF_BRIDGE; 51 hdr->ifi_family = AF_BRIDGE;
@@ -72,7 +72,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
72 return nlmsg_end(skb, nlh); 72 return nlmsg_end(skb, nlh);
73 73
74nla_put_failure: 74nla_put_failure:
75 return nlmsg_cancel(skb, nlh); 75 nlmsg_cancel(skb, nlh);
76 return -EMSGSIZE;
76} 77}
77 78
78/* 79/*
@@ -89,9 +90,12 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
89 goto errout; 90 goto errout;
90 91
91 err = br_fill_ifinfo(skb, port, 0, 0, event, 0); 92 err = br_fill_ifinfo(skb, port, 0, 0, event, 0);
92 /* failure implies BUG in br_nlmsg_size() */ 93 if (err < 0) {
93 BUG_ON(err < 0); 94 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
94 95 WARN_ON(err == -EMSGSIZE);
96 kfree_skb(skb);
97 goto errout;
98 }
95 err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 99 err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
96errout: 100errout:
97 if (err < 0) 101 if (err < 0)
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index e4c642448e1b..6afa4d017d4a 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -93,6 +93,7 @@ static int ebt_ip_check(const char *tablename, unsigned int hookmask,
93 return -EINVAL; 93 return -EINVAL;
94 if (info->protocol != IPPROTO_TCP && 94 if (info->protocol != IPPROTO_TCP &&
95 info->protocol != IPPROTO_UDP && 95 info->protocol != IPPROTO_UDP &&
96 info->protocol != IPPROTO_UDPLITE &&
96 info->protocol != IPPROTO_SCTP && 97 info->protocol != IPPROTO_SCTP &&
97 info->protocol != IPPROTO_DCCP) 98 info->protocol != IPPROTO_DCCP)
98 return -EINVAL; 99 return -EINVAL;
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index a184f879f253..985df82e427b 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -96,6 +96,7 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
96 NIPQUAD(ih->daddr), ih->tos, ih->protocol); 96 NIPQUAD(ih->daddr), ih->tos, ih->protocol);
97 if (ih->protocol == IPPROTO_TCP || 97 if (ih->protocol == IPPROTO_TCP ||
98 ih->protocol == IPPROTO_UDP || 98 ih->protocol == IPPROTO_UDP ||
99 ih->protocol == IPPROTO_UDPLITE ||
99 ih->protocol == IPPROTO_SCTP || 100 ih->protocol == IPPROTO_SCTP ||
100 ih->protocol == IPPROTO_DCCP) { 101 ih->protocol == IPPROTO_DCCP) {
101 struct tcpudphdr _ports, *pptr; 102 struct tcpudphdr _ports, *pptr;
diff --git a/net/core/dev.c b/net/core/dev.c
index 455d589683e8..1e94a1b9a0f4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3247,7 +3247,7 @@ void synchronize_net(void)
3247 * unregister_netdev() instead of this. 3247 * unregister_netdev() instead of this.
3248 */ 3248 */
3249 3249
3250int unregister_netdevice(struct net_device *dev) 3250void unregister_netdevice(struct net_device *dev)
3251{ 3251{
3252 struct net_device *d, **dp; 3252 struct net_device *d, **dp;
3253 3253
@@ -3258,7 +3258,9 @@ int unregister_netdevice(struct net_device *dev)
3258 if (dev->reg_state == NETREG_UNINITIALIZED) { 3258 if (dev->reg_state == NETREG_UNINITIALIZED) {
3259 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never " 3259 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3260 "was registered\n", dev->name, dev); 3260 "was registered\n", dev->name, dev);
3261 return -ENODEV; 3261
3262 WARN_ON(1);
3263 return;
3262 } 3264 }
3263 3265
3264 BUG_ON(dev->reg_state != NETREG_REGISTERED); 3266 BUG_ON(dev->reg_state != NETREG_REGISTERED);
@@ -3280,11 +3282,7 @@ int unregister_netdevice(struct net_device *dev)
3280 break; 3282 break;
3281 } 3283 }
3282 } 3284 }
3283 if (!d) { 3285 BUG_ON(!d);
3284 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3285 dev->name);
3286 return -ENODEV;
3287 }
3288 3286
3289 dev->reg_state = NETREG_UNREGISTERING; 3287 dev->reg_state = NETREG_UNREGISTERING;
3290 3288
@@ -3316,7 +3314,6 @@ int unregister_netdevice(struct net_device *dev)
3316 synchronize_net(); 3314 synchronize_net();
3317 3315
3318 dev_put(dev); 3316 dev_put(dev);
3319 return 0;
3320} 3317}
3321 3318
3322/** 3319/**
diff --git a/net/core/dst.c b/net/core/dst.c
index 836ec6606925..1a53fb39b7e0 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -99,7 +99,14 @@ static void dst_run_gc(unsigned long dummy)
99 printk("dst_total: %d/%d %ld\n", 99 printk("dst_total: %d/%d %ld\n",
100 atomic_read(&dst_total), delayed, dst_gc_timer_expires); 100 atomic_read(&dst_total), delayed, dst_gc_timer_expires);
101#endif 101#endif
102 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); 102 /* if the next desired timer is more than 4 seconds in the future
103 * then round the timer to whole seconds
104 */
105 if (dst_gc_timer_expires > 4*HZ)
106 mod_timer(&dst_gc_timer,
107 round_jiffies(jiffies + dst_gc_timer_expires));
108 else
109 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
103 110
104out: 111out:
105 spin_unlock(&dst_lock); 112 spin_unlock(&dst_lock);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1df6cd4568d3..215f1bff048f 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -331,7 +331,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
331 331
332 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); 332 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
333 if (nlh == NULL) 333 if (nlh == NULL)
334 return -1; 334 return -EMSGSIZE;
335 335
336 frh = nlmsg_data(nlh); 336 frh = nlmsg_data(nlh);
337 frh->table = rule->table; 337 frh->table = rule->table;
@@ -359,7 +359,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
359 return nlmsg_end(skb, nlh); 359 return nlmsg_end(skb, nlh);
360 360
361nla_put_failure: 361nla_put_failure:
362 return nlmsg_cancel(skb, nlh); 362 nlmsg_cancel(skb, nlh);
363 return -EMSGSIZE;
363} 364}
364 365
365int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family) 366int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family)
@@ -405,9 +406,12 @@ static void notify_rule_change(int event, struct fib_rule *rule,
405 goto errout; 406 goto errout;
406 407
407 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); 408 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
408 /* failure implies BUG in fib_rule_nlmsg_size() */ 409 if (err < 0) {
409 BUG_ON(err < 0); 410 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
410 411 WARN_ON(err == -EMSGSIZE);
412 kfree_skb(skb);
413 goto errout;
414 }
411 err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL); 415 err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL);
412errout: 416errout:
413 if (err < 0) 417 if (err < 0)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e7300b6b4079..054d46493d2b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -696,7 +696,10 @@ next_elt:
696 if (!expire) 696 if (!expire)
697 expire = 1; 697 expire = 1;
698 698
699 mod_timer(&tbl->gc_timer, now + expire); 699 if (expire>HZ)
700 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
701 else
702 mod_timer(&tbl->gc_timer, now + expire);
700 703
701 write_unlock(&tbl->lock); 704 write_unlock(&tbl->lock);
702} 705}
@@ -1637,7 +1640,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1637 1640
1638 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 1641 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1639 if (nlh == NULL) 1642 if (nlh == NULL)
1640 return -ENOBUFS; 1643 return -EMSGSIZE;
1641 1644
1642 ndtmsg = nlmsg_data(nlh); 1645 ndtmsg = nlmsg_data(nlh);
1643 1646
@@ -1706,7 +1709,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1706 1709
1707nla_put_failure: 1710nla_put_failure:
1708 read_unlock_bh(&tbl->lock); 1711 read_unlock_bh(&tbl->lock);
1709 return nlmsg_cancel(skb, nlh); 1712 nlmsg_cancel(skb, nlh);
1713 return -EMSGSIZE;
1710} 1714}
1711 1715
1712static int neightbl_fill_param_info(struct sk_buff *skb, 1716static int neightbl_fill_param_info(struct sk_buff *skb,
@@ -1720,7 +1724,7 @@ static int neightbl_fill_param_info(struct sk_buff *skb,
1720 1724
1721 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 1725 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1722 if (nlh == NULL) 1726 if (nlh == NULL)
1723 return -ENOBUFS; 1727 return -EMSGSIZE;
1724 1728
1725 ndtmsg = nlmsg_data(nlh); 1729 ndtmsg = nlmsg_data(nlh);
1726 1730
@@ -1737,7 +1741,8 @@ static int neightbl_fill_param_info(struct sk_buff *skb,
1737 return nlmsg_end(skb, nlh); 1741 return nlmsg_end(skb, nlh);
1738errout: 1742errout:
1739 read_unlock_bh(&tbl->lock); 1743 read_unlock_bh(&tbl->lock);
1740 return nlmsg_cancel(skb, nlh); 1744 nlmsg_cancel(skb, nlh);
1745 return -EMSGSIZE;
1741} 1746}
1742 1747
1743static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, 1748static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
@@ -1955,7 +1960,7 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1955 1960
1956 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 1961 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1957 if (nlh == NULL) 1962 if (nlh == NULL)
1958 return -ENOBUFS; 1963 return -EMSGSIZE;
1959 1964
1960 ndm = nlmsg_data(nlh); 1965 ndm = nlmsg_data(nlh);
1961 ndm->ndm_family = neigh->ops->family; 1966 ndm->ndm_family = neigh->ops->family;
@@ -1987,7 +1992,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1987 return nlmsg_end(skb, nlh); 1992 return nlmsg_end(skb, nlh);
1988 1993
1989nla_put_failure: 1994nla_put_failure:
1990 return nlmsg_cancel(skb, nlh); 1995 nlmsg_cancel(skb, nlh);
1996 return -EMSGSIZE;
1991} 1997}
1992 1998
1993 1999
@@ -2429,9 +2435,12 @@ static void __neigh_notify(struct neighbour *n, int type, int flags)
2429 goto errout; 2435 goto errout;
2430 2436
2431 err = neigh_fill_info(skb, n, 0, 0, type, flags); 2437 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2432 /* failure implies BUG in neigh_nlmsg_size() */ 2438 if (err < 0) {
2433 BUG_ON(err < 0); 2439 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2434 2440 WARN_ON(err == -EMSGSIZE);
2441 kfree_skb(skb);
2442 goto errout;
2443 }
2435 err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 2444 err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2436errout: 2445errout:
2437 if (err < 0) 2446 if (err < 0)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e76539a5eb5e..9bf9ae05f157 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -320,7 +320,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
320 320
321 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 321 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
322 if (nlh == NULL) 322 if (nlh == NULL)
323 return -ENOBUFS; 323 return -EMSGSIZE;
324 324
325 ifm = nlmsg_data(nlh); 325 ifm = nlmsg_data(nlh);
326 ifm->ifi_family = AF_UNSPEC; 326 ifm->ifi_family = AF_UNSPEC;
@@ -384,7 +384,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
384 return nlmsg_end(skb, nlh); 384 return nlmsg_end(skb, nlh);
385 385
386nla_put_failure: 386nla_put_failure:
387 return nlmsg_cancel(skb, nlh); 387 nlmsg_cancel(skb, nlh);
388 return -EMSGSIZE;
388} 389}
389 390
390static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 391static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
@@ -633,9 +634,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
633 634
634 err = rtnl_fill_ifinfo(nskb, dev, iw, iw_buf_len, RTM_NEWLINK, 635 err = rtnl_fill_ifinfo(nskb, dev, iw, iw_buf_len, RTM_NEWLINK,
635 NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 0); 636 NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 0);
636 /* failure impilies BUG in if_nlmsg_size or wireless_rtnetlink_get */ 637 if (err < 0) {
637 BUG_ON(err < 0); 638 /* -EMSGSIZE implies BUG in if_nlmsg_size */
638 639 WARN_ON(err == -EMSGSIZE);
640 kfree_skb(nskb);
641 goto errout;
642 }
639 err = rtnl_unicast(nskb, NETLINK_CB(skb).pid); 643 err = rtnl_unicast(nskb, NETLINK_CB(skb).pid);
640errout: 644errout:
641 kfree(iw_buf); 645 kfree(iw_buf);
@@ -678,9 +682,12 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
678 goto errout; 682 goto errout;
679 683
680 err = rtnl_fill_ifinfo(skb, dev, NULL, 0, type, 0, 0, change, 0); 684 err = rtnl_fill_ifinfo(skb, dev, NULL, 0, type, 0, 0, change, 0);
681 /* failure implies BUG in if_nlmsg_size() */ 685 if (err < 0) {
682 BUG_ON(err < 0); 686 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
683 687 WARN_ON(err == -EMSGSIZE);
688 kfree_skb(skb);
689 goto errout;
690 }
684 err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); 691 err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
685errout: 692errout:
686 if (err < 0) 693 if (err < 0)
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 40402c59506a..5c452a3ec4d1 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -479,7 +479,8 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
479 479
480 ccid3_pr_debug("%s(%p), s=%u, w_init=%llu, " 480 ccid3_pr_debug("%s(%p), s=%u, w_init=%llu, "
481 "R_sample=%dus, X=%u\n", dccp_role(sk), 481 "R_sample=%dus, X=%u\n", dccp_role(sk),
482 sk, hctx->ccid3hctx_s, w_init, 482 sk, hctx->ccid3hctx_s,
483 (unsigned long long)w_init,
483 (int)r_sample, 484 (int)r_sample,
484 (unsigned)(hctx->ccid3hctx_x >> 6)); 485 (unsigned)(hctx->ccid3hctx_x >> 6));
485 486
@@ -1005,7 +1006,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1005 DCCP_BUG_ON(r_sample < 0); 1006 DCCP_BUG_ON(r_sample < 0);
1006 if (unlikely(r_sample <= t_elapsed)) 1007 if (unlikely(r_sample <= t_elapsed))
1007 DCCP_WARN("r_sample=%ldus, t_elapsed=%ldus\n", 1008 DCCP_WARN("r_sample=%ldus, t_elapsed=%ldus\n",
1008 r_sample, t_elapsed); 1009 (long)r_sample, (long)t_elapsed);
1009 else 1010 else
1010 r_sample -= t_elapsed; 1011 r_sample -= t_elapsed;
1011 CCID3_RTT_SANITY_CHECK(r_sample); 1012 CCID3_RTT_SANITY_CHECK(r_sample);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 90c74b4adb73..fa2c982d4309 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -72,7 +72,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
72 tmp = ip_route_connect(&rt, nexthop, inet->saddr, 72 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
73 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 73 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
74 IPPROTO_DCCP, 74 IPPROTO_DCCP,
75 inet->sport, usin->sin_port, sk); 75 inet->sport, usin->sin_port, sk, 1);
76 if (tmp < 0) 76 if (tmp < 0)
77 return tmp; 77 return tmp;
78 78
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6b91a9dd0411..79140b3e592e 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1041,7 +1041,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1041 if (final_p) 1041 if (final_p)
1042 ipv6_addr_copy(&fl.fl6_dst, final_p); 1042 ipv6_addr_copy(&fl.fl6_dst, final_p);
1043 1043
1044 err = xfrm_lookup(&dst, &fl, sk, 0); 1044 err = xfrm_lookup(&dst, &fl, sk, 1);
1045 if (err < 0) 1045 if (err < 0)
1046 goto failure; 1046 goto failure;
1047 1047
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 63b3fa20e14b..48438565d70f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1024,7 +1024,6 @@ static int __init dccp_init(void)
1024 do { 1024 do {
1025 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE / 1025 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
1026 sizeof(struct inet_ehash_bucket); 1026 sizeof(struct inet_ehash_bucket);
1027 dccp_hashinfo.ehash_size >>= 1;
1028 while (dccp_hashinfo.ehash_size & 1027 while (dccp_hashinfo.ehash_size &
1029 (dccp_hashinfo.ehash_size - 1)) 1028 (dccp_hashinfo.ehash_size - 1))
1030 dccp_hashinfo.ehash_size--; 1029 dccp_hashinfo.ehash_size--;
@@ -1037,9 +1036,10 @@ static int __init dccp_init(void)
1037 goto out_free_bind_bucket_cachep; 1036 goto out_free_bind_bucket_cachep;
1038 } 1037 }
1039 1038
1040 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) { 1039 for (i = 0; i < dccp_hashinfo.ehash_size; i++) {
1041 rwlock_init(&dccp_hashinfo.ehash[i].lock); 1040 rwlock_init(&dccp_hashinfo.ehash[i].lock);
1042 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain); 1041 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
1042 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain);
1043 } 1043 }
1044 1044
1045 bhash_order = ehash_order; 1045 bhash_order = ehash_order;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index ed083ab455b7..90b3dfd72b49 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -749,7 +749,7 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
749 749
750 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); 750 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
751 if (nlh == NULL) 751 if (nlh == NULL)
752 return -ENOBUFS; 752 return -EMSGSIZE;
753 753
754 ifm = nlmsg_data(nlh); 754 ifm = nlmsg_data(nlh);
755 ifm->ifa_family = AF_DECnet; 755 ifm->ifa_family = AF_DECnet;
@@ -768,7 +768,8 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
768 return nlmsg_end(skb, nlh); 768 return nlmsg_end(skb, nlh);
769 769
770nla_put_failure: 770nla_put_failure:
771 return nlmsg_cancel(skb, nlh); 771 nlmsg_cancel(skb, nlh);
772 return -EMSGSIZE;
772} 773}
773 774
774static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa) 775static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
@@ -781,9 +782,12 @@ static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
781 goto errout; 782 goto errout;
782 783
783 err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0); 784 err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0);
784 /* failure implies BUG in dn_ifaddr_nlmsg_size() */ 785 if (err < 0) {
785 BUG_ON(err < 0); 786 /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */
786 787 WARN_ON(err == -EMSGSIZE);
788 kfree_skb(skb);
789 goto errout;
790 }
787 err = rtnl_notify(skb, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL); 791 err = rtnl_notify(skb, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL);
788errout: 792errout:
789 if (err < 0) 793 if (err < 0)
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 13b2421991ba..c1f0cc1b1c60 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -350,7 +350,7 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
350nlmsg_failure: 350nlmsg_failure:
351rtattr_failure: 351rtattr_failure:
352 skb_trim(skb, b - skb->data); 352 skb_trim(skb, b - skb->data);
353 return -1; 353 return -EMSGSIZE;
354} 354}
355 355
356 356
@@ -368,9 +368,12 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
368 err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, 368 err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
369 f->fn_type, f->fn_scope, &f->fn_key, z, 369 f->fn_type, f->fn_scope, &f->fn_key, z,
370 DN_FIB_INFO(f), 0); 370 DN_FIB_INFO(f), 0);
371 /* failure implies BUG in dn_fib_nlmsg_size() */ 371 if (err < 0) {
372 BUG_ON(err < 0); 372 /* -EMSGSIZE implies BUG in dn_fib_nlmsg_size() */
373 373 WARN_ON(err == -EMSGSIZE);
374 kfree_skb(skb);
375 goto errout;
376 }
374 err = rtnl_notify(skb, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL); 377 err = rtnl_notify(skb, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
375errout: 378errout:
376 if (err < 0) 379 if (err < 0)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 864009643675..5750a2b2a0d6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1007,7 +1007,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1007 RT_CONN_FLAGS(sk), 1007 RT_CONN_FLAGS(sk),
1008 sk->sk_bound_dev_if, 1008 sk->sk_bound_dev_if,
1009 sk->sk_protocol, 1009 sk->sk_protocol,
1010 inet->sport, inet->dport, sk); 1010 inet->sport, inet->dport, sk, 0);
1011 if (err) 1011 if (err)
1012 return err; 1012 return err;
1013 1013
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 7b068a891953..0072d79f0c2a 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -49,7 +49,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
49 err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr, 49 err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
50 RT_CONN_FLAGS(sk), oif, 50 RT_CONN_FLAGS(sk), oif,
51 sk->sk_protocol, 51 sk->sk_protocol,
52 inet->sport, usin->sin_port, sk); 52 inet->sport, usin->sin_port, sk, 1);
53 if (err) 53 if (err)
54 return err; 54 return err;
55 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) { 55 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 480ace9819f6..c40203640966 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1140,7 +1140,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1140 1140
1141 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); 1141 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
1142 if (nlh == NULL) 1142 if (nlh == NULL)
1143 return -ENOBUFS; 1143 return -EMSGSIZE;
1144 1144
1145 ifm = nlmsg_data(nlh); 1145 ifm = nlmsg_data(nlh);
1146 ifm->ifa_family = AF_INET; 1146 ifm->ifa_family = AF_INET;
@@ -1167,7 +1167,8 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1167 return nlmsg_end(skb, nlh); 1167 return nlmsg_end(skb, nlh);
1168 1168
1169nla_put_failure: 1169nla_put_failure:
1170 return nlmsg_cancel(skb, nlh); 1170 nlmsg_cancel(skb, nlh);
1171 return -EMSGSIZE;
1171} 1172}
1172 1173
1173static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 1174static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
@@ -1225,9 +1226,12 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa, struct nlmsghdr *nlh,
1225 goto errout; 1226 goto errout;
1226 1227
1227 err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0); 1228 err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0);
1228 /* failure implies BUG in inet_nlmsg_size() */ 1229 if (err < 0) {
1229 BUG_ON(err < 0); 1230 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1230 1231 WARN_ON(err == -EMSGSIZE);
1232 kfree_skb(skb);
1233 goto errout;
1234 }
1231 err = rtnl_notify(skb, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); 1235 err = rtnl_notify(skb, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1232errout: 1236errout:
1233 if (err < 0) 1237 if (err < 0)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e63b8a98fb4d..be1028c9933e 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -314,9 +314,12 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
314 err = fib_dump_info(skb, info->pid, seq, event, tb_id, 314 err = fib_dump_info(skb, info->pid, seq, event, tb_id,
315 fa->fa_type, fa->fa_scope, key, dst_len, 315 fa->fa_type, fa->fa_scope, key, dst_len,
316 fa->fa_tos, fa->fa_info, 0); 316 fa->fa_tos, fa->fa_info, 0);
317 /* failure implies BUG in fib_nlmsg_size() */ 317 if (err < 0) {
318 BUG_ON(err < 0); 318 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
319 319 WARN_ON(err == -EMSGSIZE);
320 kfree_skb(skb);
321 goto errout;
322 }
320 err = rtnl_notify(skb, info->pid, RTNLGRP_IPV4_ROUTE, 323 err = rtnl_notify(skb, info->pid, RTNLGRP_IPV4_ROUTE,
321 info->nlh, GFP_KERNEL); 324 info->nlh, GFP_KERNEL);
322errout: 325errout:
@@ -960,7 +963,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
960 963
961 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags); 964 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
962 if (nlh == NULL) 965 if (nlh == NULL)
963 return -ENOBUFS; 966 return -EMSGSIZE;
964 967
965 rtm = nlmsg_data(nlh); 968 rtm = nlmsg_data(nlh);
966 rtm->rtm_family = AF_INET; 969 rtm->rtm_family = AF_INET;
@@ -1031,7 +1034,8 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
1031 return nlmsg_end(skb, nlh); 1034 return nlmsg_end(skb, nlh);
1032 1035
1033nla_put_failure: 1036nla_put_failure:
1034 return nlmsg_cancel(skb, nlh); 1037 nlmsg_cancel(skb, nlh);
1038 return -EMSGSIZE;
1035} 1039}
1036 1040
1037/* 1041/*
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 0017ccb01d6d..024ae56cab25 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -455,6 +455,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
455 skb = add_grhead(skb, pmc, type, &pgr); 455 skb = add_grhead(skb, pmc, type, &pgr);
456 first = 0; 456 first = 0;
457 } 457 }
458 if (!skb)
459 return NULL;
458 psrc = (__be32 *)skb_put(skb, sizeof(__be32)); 460 psrc = (__be32 *)skb_put(skb, sizeof(__be32));
459 *psrc = psf->sf_inaddr; 461 *psrc = psf->sf_inaddr;
460 scount++; stotal++; 462 scount++; stotal++;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 77761ac4f7bb..8aa7d51e6881 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -153,7 +153,7 @@ static int inet_csk_diag_fill(struct sock *sk,
153rtattr_failure: 153rtattr_failure:
154nlmsg_failure: 154nlmsg_failure:
155 skb_trim(skb, b - skb->data); 155 skb_trim(skb, b - skb->data);
156 return -1; 156 return -EMSGSIZE;
157} 157}
158 158
159static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 159static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
@@ -209,7 +209,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
209 return skb->len; 209 return skb->len;
210nlmsg_failure: 210nlmsg_failure:
211 skb_trim(skb, previous_tail - skb->data); 211 skb_trim(skb, previous_tail - skb->data);
212 return -1; 212 return -EMSGSIZE;
213} 213}
214 214
215static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 215static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
@@ -274,11 +274,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
274 if (!rep) 274 if (!rep)
275 goto out; 275 goto out;
276 276
277 if (sk_diag_fill(sk, rep, req->idiag_ext, 277 err = sk_diag_fill(sk, rep, req->idiag_ext,
278 NETLINK_CB(in_skb).pid, 278 NETLINK_CB(in_skb).pid,
279 nlh->nlmsg_seq, 0, nlh) <= 0) 279 nlh->nlmsg_seq, 0, nlh);
280 BUG(); 280 if (err < 0) {
281 281 WARN_ON(err == -EMSGSIZE);
282 kfree_skb(rep);
283 goto out;
284 }
282 err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid, 285 err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
283 MSG_DONTWAIT); 286 MSG_DONTWAIT);
284 if (err > 0) 287 if (err > 0)
@@ -775,7 +778,7 @@ next_normal:
775 struct inet_timewait_sock *tw; 778 struct inet_timewait_sock *tw;
776 779
777 inet_twsk_for_each(tw, node, 780 inet_twsk_for_each(tw, node,
778 &hashinfo->ehash[i + hashinfo->ehash_size].chain) { 781 &head->twchain) {
779 782
780 if (num < s_num) 783 if (num < s_num)
781 goto next_dying; 784 goto next_dying;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 8c79c8a4ea5c..150ace18dc75 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -212,7 +212,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
212 write_lock(&head->lock); 212 write_lock(&head->lock);
213 213
214 /* Check TIME-WAIT sockets first. */ 214 /* Check TIME-WAIT sockets first. */
215 sk_for_each(sk2, node, &(head + hinfo->ehash_size)->chain) { 215 sk_for_each(sk2, node, &head->twchain) {
216 tw = inet_twsk(sk2); 216 tw = inet_twsk(sk2);
217 217
218 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) { 218 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) {
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 9f414e35c488..a73cf93cee36 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -78,8 +78,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
78 if (__sk_del_node_init(sk)) 78 if (__sk_del_node_init(sk))
79 sock_prot_dec_use(sk->sk_prot); 79 sock_prot_dec_use(sk->sk_prot);
80 80
81 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ 81 /* Step 3: Hash TW into TIMEWAIT chain. */
82 inet_twsk_add_node(tw, &(ehead + hashinfo->ehash_size)->chain); 82 inet_twsk_add_node(tw, &ehead->twchain);
83 atomic_inc(&tw->tw_refcnt); 83 atomic_inc(&tw->tw_refcnt);
84 84
85 write_unlock(&ehead->lock); 85 write_unlock(&ehead->lock);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 476cb6084c75..51c83500790f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1008,7 +1008,8 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1008 goto done; 1008 goto done;
1009 dev = t->dev; 1009 dev = t->dev;
1010 } 1010 }
1011 err = unregister_netdevice(dev); 1011 unregister_netdevice(dev);
1012 err = 0;
1012 break; 1013 break;
1013 1014
1014 default: 1015 default:
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 9d719d664e5b..da8bbd20c7ed 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -754,7 +754,8 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
754 goto done; 754 goto done;
755 dev = t->dev; 755 dev = t->dev;
756 } 756 }
757 err = unregister_netdevice(dev); 757 unregister_netdevice(dev);
758 err = 0;
758 break; 759 break;
759 760
760 default: 761 default:
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 47bd3ad18b71..9b08e7ad71bc 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -361,32 +361,6 @@ config IP_NF_TARGET_ULOG
361 361
362 To compile it as a module, choose M here. If unsure, say N. 362 To compile it as a module, choose M here. If unsure, say N.
363 363
364config IP_NF_TARGET_TCPMSS
365 tristate "TCPMSS target support"
366 depends on IP_NF_IPTABLES
367 ---help---
368 This option adds a `TCPMSS' target, which allows you to alter the
369 MSS value of TCP SYN packets, to control the maximum size for that
370 connection (usually limiting it to your outgoing interface's MTU
371 minus 40).
372
373 This is used to overcome criminally braindead ISPs or servers which
374 block ICMP Fragmentation Needed packets. The symptoms of this
375 problem are that everything works fine from your Linux
376 firewall/router, but machines behind it can never exchange large
377 packets:
378 1) Web browsers connect, then hang with no data received.
379 2) Small mail works fine, but large emails hang.
380 3) ssh works fine, but scp hangs after initial handshaking.
381
382 Workaround: activate this option and add a rule to your firewall
383 configuration like:
384
385 iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN \
386 -j TCPMSS --clamp-mss-to-pmtu
387
388 To compile it as a module, choose M here. If unsure, say N.
389
390# NAT + specific targets: ip_conntrack 364# NAT + specific targets: ip_conntrack
391config IP_NF_NAT 365config IP_NF_NAT
392 tristate "Full NAT" 366 tristate "Full NAT"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 16d177b71bf8..6625ec68180c 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -103,7 +103,6 @@ obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
103obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o 103obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o
104obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o 104obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
105obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o 105obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
106obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
107obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o 106obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
108obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o 107obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
109 108
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index 06e4e8a6dd9f..c34f48fe5478 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -50,12 +50,9 @@ static DEFINE_RWLOCK(tcp_lock);
50 If it's non-zero, we mark only out of window RST segments as INVALID. */ 50 If it's non-zero, we mark only out of window RST segments as INVALID. */
51int ip_ct_tcp_be_liberal __read_mostly = 0; 51int ip_ct_tcp_be_liberal __read_mostly = 0;
52 52
53/* When connection is picked up from the middle, how many packets are required 53/* If it is set to zero, we disable picking up already established
54 to pass in each direction when we assume we are in sync - if any side uses
55 window scaling, we lost the game.
56 If it is set to zero, we disable picking up already established
57 connections. */ 54 connections. */
58int ip_ct_tcp_loose __read_mostly = 3; 55int ip_ct_tcp_loose __read_mostly = 1;
59 56
60/* Max number of the retransmitted packets without receiving an (acceptable) 57/* Max number of the retransmitted packets without receiving an (acceptable)
61 ACK from the destination. If this number is reached, a shorter timer 58 ACK from the destination. If this number is reached, a shorter timer
@@ -694,11 +691,10 @@ static int tcp_in_window(struct ip_ct_tcp *state,
694 before(sack, receiver->td_end + 1), 691 before(sack, receiver->td_end + 1),
695 after(ack, receiver->td_end - MAXACKWINDOW(sender))); 692 after(ack, receiver->td_end - MAXACKWINDOW(sender)));
696 693
697 if (sender->loose || receiver->loose || 694 if (before(seq, sender->td_maxend + 1) &&
698 (before(seq, sender->td_maxend + 1) && 695 after(end, sender->td_end - receiver->td_maxwin - 1) &&
699 after(end, sender->td_end - receiver->td_maxwin - 1) && 696 before(sack, receiver->td_end + 1) &&
700 before(sack, receiver->td_end + 1) && 697 after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
701 after(ack, receiver->td_end - MAXACKWINDOW(sender)))) {
702 /* 698 /*
703 * Take into account window scaling (RFC 1323). 699 * Take into account window scaling (RFC 1323).
704 */ 700 */
@@ -743,15 +739,13 @@ static int tcp_in_window(struct ip_ct_tcp *state,
743 state->retrans = 0; 739 state->retrans = 0;
744 } 740 }
745 } 741 }
746 /*
747 * Close the window of disabled window tracking :-)
748 */
749 if (sender->loose)
750 sender->loose--;
751
752 res = 1; 742 res = 1;
753 } else { 743 } else {
754 if (LOG_INVALID(IPPROTO_TCP)) 744 res = 0;
745 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
746 ip_ct_tcp_be_liberal)
747 res = 1;
748 if (!res && LOG_INVALID(IPPROTO_TCP))
755 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, 749 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
756 "ip_ct_tcp: %s ", 750 "ip_ct_tcp: %s ",
757 before(seq, sender->td_maxend + 1) ? 751 before(seq, sender->td_maxend + 1) ?
@@ -762,8 +756,6 @@ static int tcp_in_window(struct ip_ct_tcp *state,
762 : "ACK is over the upper bound (ACKed data not seen yet)" 756 : "ACK is over the upper bound (ACKed data not seen yet)"
763 : "SEQ is under the lower bound (already ACKed data retransmitted)" 757 : "SEQ is under the lower bound (already ACKed data retransmitted)"
764 : "SEQ is over the upper bound (over the window of the receiver)"); 758 : "SEQ is over the upper bound (over the window of the receiver)");
765
766 res = ip_ct_tcp_be_liberal;
767 } 759 }
768 760
769 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " 761 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
@@ -1105,8 +1097,6 @@ static int tcp_new(struct ip_conntrack *conntrack,
1105 1097
1106 tcp_options(skb, iph, th, &conntrack->proto.tcp.seen[0]); 1098 tcp_options(skb, iph, th, &conntrack->proto.tcp.seen[0]);
1107 conntrack->proto.tcp.seen[1].flags = 0; 1099 conntrack->proto.tcp.seen[1].flags = 0;
1108 conntrack->proto.tcp.seen[0].loose =
1109 conntrack->proto.tcp.seen[1].loose = 0;
1110 } else if (ip_ct_tcp_loose == 0) { 1100 } else if (ip_ct_tcp_loose == 0) {
1111 /* Don't try to pick up connections. */ 1101 /* Don't try to pick up connections. */
1112 return 0; 1102 return 0;
@@ -1127,11 +1117,11 @@ static int tcp_new(struct ip_conntrack *conntrack,
1127 conntrack->proto.tcp.seen[0].td_maxwin; 1117 conntrack->proto.tcp.seen[0].td_maxwin;
1128 conntrack->proto.tcp.seen[0].td_scale = 0; 1118 conntrack->proto.tcp.seen[0].td_scale = 0;
1129 1119
1130 /* We assume SACK. Should we assume window scaling too? */ 1120 /* We assume SACK and liberal window checking to handle
1121 * window scaling */
1131 conntrack->proto.tcp.seen[0].flags = 1122 conntrack->proto.tcp.seen[0].flags =
1132 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM; 1123 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
1133 conntrack->proto.tcp.seen[0].loose = 1124 IP_CT_TCP_FLAG_BE_LIBERAL;
1134 conntrack->proto.tcp.seen[1].loose = ip_ct_tcp_loose;
1135 } 1125 }
1136 1126
1137 conntrack->proto.tcp.seen[1].td_end = 0; 1127 conntrack->proto.tcp.seen[1].td_end = 0;
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 9d1a5175dcd4..5e08c2bf887d 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -246,8 +246,9 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
246 if (maniptype == IP_NAT_MANIP_SRC) { 246 if (maniptype == IP_NAT_MANIP_SRC) {
247 if (find_appropriate_src(orig_tuple, tuple, range)) { 247 if (find_appropriate_src(orig_tuple, tuple, range)) {
248 DEBUGP("get_unique_tuple: Found current src map\n"); 248 DEBUGP("get_unique_tuple: Found current src map\n");
249 if (!ip_nat_used_tuple(tuple, conntrack)) 249 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
250 return; 250 if (!ip_nat_used_tuple(tuple, conntrack))
251 return;
251 } 252 }
252 } 253 }
253 254
@@ -261,6 +262,13 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
261 262
262 proto = ip_nat_proto_find_get(orig_tuple->dst.protonum); 263 proto = ip_nat_proto_find_get(orig_tuple->dst.protonum);
263 264
265 /* Change protocol info to have some randomization */
266 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
267 proto->unique_tuple(tuple, range, maniptype, conntrack);
268 ip_nat_proto_put(proto);
269 return;
270 }
271
264 /* Only bother mapping if it's not already in range and unique */ 272 /* Only bother mapping if it's not already in range and unique */
265 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) 273 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)
266 || proto->in_range(tuple, maniptype, &range->min, &range->max)) 274 || proto->in_range(tuple, maniptype, &range->min, &range->max))
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c
index ee80feb4b2a9..2e5c4bc52a60 100644
--- a/net/ipv4/netfilter/ip_nat_helper.c
+++ b/net/ipv4/netfilter/ip_nat_helper.c
@@ -183,7 +183,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
183 datalen = (*pskb)->len - iph->ihl*4; 183 datalen = (*pskb)->len - iph->ihl*4;
184 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { 184 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
185 tcph->check = 0; 185 tcph->check = 0;
186 tcph->check = tcp_v4_check(tcph, datalen, 186 tcph->check = tcp_v4_check(datalen,
187 iph->saddr, iph->daddr, 187 iph->saddr, iph->daddr,
188 csum_partial((char *)tcph, 188 csum_partial((char *)tcph,
189 datalen, 0)); 189 datalen, 0));
diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
index b586d18b3fb3..14ff24f53a7a 100644
--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h>
11#include <linux/netfilter.h> 12#include <linux/netfilter.h>
12#include <linux/ip.h> 13#include <linux/ip.h>
13#include <linux/tcp.h> 14#include <linux/tcp.h>
@@ -75,6 +76,10 @@ tcp_unique_tuple(struct ip_conntrack_tuple *tuple,
75 range_size = ntohs(range->max.tcp.port) - min + 1; 76 range_size = ntohs(range->max.tcp.port) - min + 1;
76 } 77 }
77 78
79 /* Start from random port to avoid prediction */
80 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
81 port = net_random();
82
78 for (i = 0; i < range_size; i++, port++) { 83 for (i = 0; i < range_size; i++, port++) {
79 *portptr = htons(min + port % range_size); 84 *portptr = htons(min + port % range_size);
80 if (!ip_nat_used_tuple(tuple, conntrack)) { 85 if (!ip_nat_used_tuple(tuple, conntrack)) {
diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
index 5ced0877b32f..dfd521672891 100644
--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h>
11#include <linux/netfilter.h> 12#include <linux/netfilter.h>
12#include <linux/ip.h> 13#include <linux/ip.h>
13#include <linux/udp.h> 14#include <linux/udp.h>
@@ -74,6 +75,10 @@ udp_unique_tuple(struct ip_conntrack_tuple *tuple,
74 range_size = ntohs(range->max.udp.port) - min + 1; 75 range_size = ntohs(range->max.udp.port) - min + 1;
75 } 76 }
76 77
78 /* Start from random port to avoid prediction */
79 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
80 port = net_random();
81
77 for (i = 0; i < range_size; i++, port++) { 82 for (i = 0; i < range_size; i++, port++) {
78 *portptr = htons(min + port % range_size); 83 *portptr = htons(min + port % range_size);
79 if (!ip_nat_used_tuple(tuple, conntrack)) 84 if (!ip_nat_used_tuple(tuple, conntrack))
diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c
index a176aa3031e0..e1c8a05f3dc6 100644
--- a/net/ipv4/netfilter/ip_nat_rule.c
+++ b/net/ipv4/netfilter/ip_nat_rule.c
@@ -86,7 +86,7 @@ static struct
86 } 86 }
87}; 87};
88 88
89static struct ipt_table nat_table = { 89static struct xt_table nat_table = {
90 .name = "nat", 90 .name = "nat",
91 .valid_hooks = NAT_VALID_HOOKS, 91 .valid_hooks = NAT_VALID_HOOKS,
92 .lock = RW_LOCK_UNLOCKED, 92 .lock = RW_LOCK_UNLOCKED,
@@ -99,7 +99,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb,
99 const struct net_device *in, 99 const struct net_device *in,
100 const struct net_device *out, 100 const struct net_device *out,
101 unsigned int hooknum, 101 unsigned int hooknum,
102 const struct ipt_target *target, 102 const struct xt_target *target,
103 const void *targinfo) 103 const void *targinfo)
104{ 104{
105 struct ip_conntrack *ct; 105 struct ip_conntrack *ct;
@@ -141,7 +141,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
141 const struct net_device *in, 141 const struct net_device *in,
142 const struct net_device *out, 142 const struct net_device *out,
143 unsigned int hooknum, 143 unsigned int hooknum,
144 const struct ipt_target *target, 144 const struct xt_target *target,
145 const void *targinfo) 145 const void *targinfo)
146{ 146{
147 struct ip_conntrack *ct; 147 struct ip_conntrack *ct;
@@ -166,7 +166,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
166 166
167static int ipt_snat_checkentry(const char *tablename, 167static int ipt_snat_checkentry(const char *tablename,
168 const void *entry, 168 const void *entry,
169 const struct ipt_target *target, 169 const struct xt_target *target,
170 void *targinfo, 170 void *targinfo,
171 unsigned int hook_mask) 171 unsigned int hook_mask)
172{ 172{
@@ -182,7 +182,7 @@ static int ipt_snat_checkentry(const char *tablename,
182 182
183static int ipt_dnat_checkentry(const char *tablename, 183static int ipt_dnat_checkentry(const char *tablename,
184 const void *entry, 184 const void *entry,
185 const struct ipt_target *target, 185 const struct xt_target *target,
186 void *targinfo, 186 void *targinfo,
187 unsigned int hook_mask) 187 unsigned int hook_mask)
188{ 188{
@@ -193,6 +193,10 @@ static int ipt_dnat_checkentry(const char *tablename,
193 printk("DNAT: multiple ranges no longer supported\n"); 193 printk("DNAT: multiple ranges no longer supported\n");
194 return 0; 194 return 0;
195 } 195 }
196 if (mr->range[0].flags & IP_NAT_RANGE_PROTO_RANDOM) {
197 printk("DNAT: port randomization not supported\n");
198 return 0;
199 }
196 return 1; 200 return 1;
197} 201}
198 202
@@ -257,8 +261,9 @@ int ip_nat_rule_find(struct sk_buff **pskb,
257 return ret; 261 return ret;
258} 262}
259 263
260static struct ipt_target ipt_snat_reg = { 264static struct xt_target ipt_snat_reg = {
261 .name = "SNAT", 265 .name = "SNAT",
266 .family = AF_INET,
262 .target = ipt_snat_target, 267 .target = ipt_snat_target,
263 .targetsize = sizeof(struct ip_nat_multi_range_compat), 268 .targetsize = sizeof(struct ip_nat_multi_range_compat),
264 .table = "nat", 269 .table = "nat",
@@ -266,8 +271,9 @@ static struct ipt_target ipt_snat_reg = {
266 .checkentry = ipt_snat_checkentry, 271 .checkentry = ipt_snat_checkentry,
267}; 272};
268 273
269static struct ipt_target ipt_dnat_reg = { 274static struct xt_target ipt_dnat_reg = {
270 .name = "DNAT", 275 .name = "DNAT",
276 .family = AF_INET,
271 .target = ipt_dnat_target, 277 .target = ipt_dnat_target,
272 .targetsize = sizeof(struct ip_nat_multi_range_compat), 278 .targetsize = sizeof(struct ip_nat_multi_range_compat),
273 .table = "nat", 279 .table = "nat",
@@ -282,27 +288,27 @@ int __init ip_nat_rule_init(void)
282 ret = ipt_register_table(&nat_table, &nat_initial_table.repl); 288 ret = ipt_register_table(&nat_table, &nat_initial_table.repl);
283 if (ret != 0) 289 if (ret != 0)
284 return ret; 290 return ret;
285 ret = ipt_register_target(&ipt_snat_reg); 291 ret = xt_register_target(&ipt_snat_reg);
286 if (ret != 0) 292 if (ret != 0)
287 goto unregister_table; 293 goto unregister_table;
288 294
289 ret = ipt_register_target(&ipt_dnat_reg); 295 ret = xt_register_target(&ipt_dnat_reg);
290 if (ret != 0) 296 if (ret != 0)
291 goto unregister_snat; 297 goto unregister_snat;
292 298
293 return ret; 299 return ret;
294 300
295 unregister_snat: 301 unregister_snat:
296 ipt_unregister_target(&ipt_snat_reg); 302 xt_unregister_target(&ipt_snat_reg);
297 unregister_table: 303 unregister_table:
298 ipt_unregister_table(&nat_table); 304 xt_unregister_table(&nat_table);
299 305
300 return ret; 306 return ret;
301} 307}
302 308
303void ip_nat_rule_cleanup(void) 309void ip_nat_rule_cleanup(void)
304{ 310{
305 ipt_unregister_target(&ipt_dnat_reg); 311 xt_unregister_target(&ipt_dnat_reg);
306 ipt_unregister_target(&ipt_snat_reg); 312 xt_unregister_target(&ipt_snat_reg);
307 ipt_unregister_table(&nat_table); 313 ipt_unregister_table(&nat_table);
308} 314}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index fc1f153c86ba..5a7b3a341389 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -216,7 +216,7 @@ ipt_do_table(struct sk_buff **pskb,
216 unsigned int hook, 216 unsigned int hook,
217 const struct net_device *in, 217 const struct net_device *in,
218 const struct net_device *out, 218 const struct net_device *out,
219 struct ipt_table *table) 219 struct xt_table *table)
220{ 220{
221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
222 u_int16_t offset; 222 u_int16_t offset;
@@ -507,7 +507,7 @@ check_entry(struct ipt_entry *e, const char *name)
507static inline int check_match(struct ipt_entry_match *m, const char *name, 507static inline int check_match(struct ipt_entry_match *m, const char *name,
508 const struct ipt_ip *ip, unsigned int hookmask) 508 const struct ipt_ip *ip, unsigned int hookmask)
509{ 509{
510 struct ipt_match *match; 510 struct xt_match *match;
511 int ret; 511 int ret;
512 512
513 match = m->u.kernel.match; 513 match = m->u.kernel.match;
@@ -531,7 +531,7 @@ find_check_match(struct ipt_entry_match *m,
531 unsigned int hookmask, 531 unsigned int hookmask,
532 unsigned int *i) 532 unsigned int *i)
533{ 533{
534 struct ipt_match *match; 534 struct xt_match *match;
535 int ret; 535 int ret;
536 536
537 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name, 537 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
@@ -557,7 +557,7 @@ err:
557static inline int check_target(struct ipt_entry *e, const char *name) 557static inline int check_target(struct ipt_entry *e, const char *name)
558{ 558{
559 struct ipt_entry_target *t; 559 struct ipt_entry_target *t;
560 struct ipt_target *target; 560 struct xt_target *target;
561 int ret; 561 int ret;
562 562
563 t = ipt_get_target(e); 563 t = ipt_get_target(e);
@@ -580,7 +580,7 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
580 unsigned int *i) 580 unsigned int *i)
581{ 581{
582 struct ipt_entry_target *t; 582 struct ipt_entry_target *t;
583 struct ipt_target *target; 583 struct xt_target *target;
584 int ret; 584 int ret;
585 unsigned int j; 585 unsigned int j;
586 586
@@ -818,7 +818,7 @@ get_counters(const struct xt_table_info *t,
818 } 818 }
819} 819}
820 820
821static inline struct xt_counters * alloc_counters(struct ipt_table *table) 821static inline struct xt_counters * alloc_counters(struct xt_table *table)
822{ 822{
823 unsigned int countersize; 823 unsigned int countersize;
824 struct xt_counters *counters; 824 struct xt_counters *counters;
@@ -843,7 +843,7 @@ static inline struct xt_counters * alloc_counters(struct ipt_table *table)
843 843
844static int 844static int
845copy_entries_to_user(unsigned int total_size, 845copy_entries_to_user(unsigned int total_size,
846 struct ipt_table *table, 846 struct xt_table *table,
847 void __user *userptr) 847 void __user *userptr)
848{ 848{
849 unsigned int off, num; 849 unsigned int off, num;
@@ -1046,7 +1046,7 @@ static int compat_table_info(struct xt_table_info *info,
1046static int get_info(void __user *user, int *len, int compat) 1046static int get_info(void __user *user, int *len, int compat)
1047{ 1047{
1048 char name[IPT_TABLE_MAXNAMELEN]; 1048 char name[IPT_TABLE_MAXNAMELEN];
1049 struct ipt_table *t; 1049 struct xt_table *t;
1050 int ret; 1050 int ret;
1051 1051
1052 if (*len != sizeof(struct ipt_getinfo)) { 1052 if (*len != sizeof(struct ipt_getinfo)) {
@@ -1107,7 +1107,7 @@ get_entries(struct ipt_get_entries __user *uptr, int *len)
1107{ 1107{
1108 int ret; 1108 int ret;
1109 struct ipt_get_entries get; 1109 struct ipt_get_entries get;
1110 struct ipt_table *t; 1110 struct xt_table *t;
1111 1111
1112 if (*len < sizeof(get)) { 1112 if (*len < sizeof(get)) {
1113 duprintf("get_entries: %u < %d\n", *len, 1113 duprintf("get_entries: %u < %d\n", *len,
@@ -1151,7 +1151,7 @@ __do_replace(const char *name, unsigned int valid_hooks,
1151 void __user *counters_ptr) 1151 void __user *counters_ptr)
1152{ 1152{
1153 int ret; 1153 int ret;
1154 struct ipt_table *t; 1154 struct xt_table *t;
1155 struct xt_table_info *oldinfo; 1155 struct xt_table_info *oldinfo;
1156 struct xt_counters *counters; 1156 struct xt_counters *counters;
1157 void *loc_cpu_old_entry; 1157 void *loc_cpu_old_entry;
@@ -1302,7 +1302,7 @@ do_add_counters(void __user *user, unsigned int len, int compat)
1302 char *name; 1302 char *name;
1303 int size; 1303 int size;
1304 void *ptmp; 1304 void *ptmp;
1305 struct ipt_table *t; 1305 struct xt_table *t;
1306 struct xt_table_info *private; 1306 struct xt_table_info *private;
1307 int ret = 0; 1307 int ret = 0;
1308 void *loc_cpu_entry; 1308 void *loc_cpu_entry;
@@ -1437,7 +1437,7 @@ compat_check_calc_match(struct ipt_entry_match *m,
1437 unsigned int hookmask, 1437 unsigned int hookmask,
1438 int *size, int *i) 1438 int *size, int *i)
1439{ 1439{
1440 struct ipt_match *match; 1440 struct xt_match *match;
1441 1441
1442 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name, 1442 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1443 m->u.user.revision), 1443 m->u.user.revision),
@@ -1466,7 +1466,7 @@ check_compat_entry_size_and_hooks(struct ipt_entry *e,
1466 const char *name) 1466 const char *name)
1467{ 1467{
1468 struct ipt_entry_target *t; 1468 struct ipt_entry_target *t;
1469 struct ipt_target *target; 1469 struct xt_target *target;
1470 unsigned int entry_offset; 1470 unsigned int entry_offset;
1471 int ret, off, h, j; 1471 int ret, off, h, j;
1472 1472
@@ -1550,7 +1550,7 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1550 struct xt_table_info *newinfo, unsigned char *base) 1550 struct xt_table_info *newinfo, unsigned char *base)
1551{ 1551{
1552 struct ipt_entry_target *t; 1552 struct ipt_entry_target *t;
1553 struct ipt_target *target; 1553 struct xt_target *target;
1554 struct ipt_entry *de; 1554 struct ipt_entry *de;
1555 unsigned int origsize; 1555 unsigned int origsize;
1556 int ret, h; 1556 int ret, h;
@@ -1795,7 +1795,7 @@ struct compat_ipt_get_entries
1795}; 1795};
1796 1796
1797static int compat_copy_entries_to_user(unsigned int total_size, 1797static int compat_copy_entries_to_user(unsigned int total_size,
1798 struct ipt_table *table, void __user *userptr) 1798 struct xt_table *table, void __user *userptr)
1799{ 1799{
1800 unsigned int off, num; 1800 unsigned int off, num;
1801 struct compat_ipt_entry e; 1801 struct compat_ipt_entry e;
@@ -1869,7 +1869,7 @@ compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1869{ 1869{
1870 int ret; 1870 int ret;
1871 struct compat_ipt_get_entries get; 1871 struct compat_ipt_get_entries get;
1872 struct ipt_table *t; 1872 struct xt_table *t;
1873 1873
1874 1874
1875 if (*len < sizeof(get)) { 1875 if (*len < sizeof(get)) {
@@ -2052,7 +2052,7 @@ int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2052 return 0; 2052 return 0;
2053} 2053}
2054 2054
2055void ipt_unregister_table(struct ipt_table *table) 2055void ipt_unregister_table(struct xt_table *table)
2056{ 2056{
2057 struct xt_table_info *private; 2057 struct xt_table_info *private;
2058 void *loc_cpu_entry; 2058 void *loc_cpu_entry;
@@ -2124,7 +2124,7 @@ icmp_checkentry(const char *tablename,
2124} 2124}
2125 2125
2126/* The built-in targets: standard (NULL) and error. */ 2126/* The built-in targets: standard (NULL) and error. */
2127static struct ipt_target ipt_standard_target = { 2127static struct xt_target ipt_standard_target = {
2128 .name = IPT_STANDARD_TARGET, 2128 .name = IPT_STANDARD_TARGET,
2129 .targetsize = sizeof(int), 2129 .targetsize = sizeof(int),
2130 .family = AF_INET, 2130 .family = AF_INET,
@@ -2135,7 +2135,7 @@ static struct ipt_target ipt_standard_target = {
2135#endif 2135#endif
2136}; 2136};
2137 2137
2138static struct ipt_target ipt_error_target = { 2138static struct xt_target ipt_error_target = {
2139 .name = IPT_ERROR_TARGET, 2139 .name = IPT_ERROR_TARGET,
2140 .target = ipt_error, 2140 .target = ipt_error,
2141 .targetsize = IPT_FUNCTION_MAXNAMELEN, 2141 .targetsize = IPT_FUNCTION_MAXNAMELEN,
@@ -2158,7 +2158,7 @@ static struct nf_sockopt_ops ipt_sockopts = {
2158#endif 2158#endif
2159}; 2159};
2160 2160
2161static struct ipt_match icmp_matchstruct = { 2161static struct xt_match icmp_matchstruct = {
2162 .name = "icmp", 2162 .name = "icmp",
2163 .match = icmp_match, 2163 .match = icmp_match,
2164 .matchsize = sizeof(struct ipt_icmp), 2164 .matchsize = sizeof(struct ipt_icmp),
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index b1c11160b9de..343c2abdc1a0 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -26,6 +26,7 @@
26 26
27#include <linux/netfilter_arp.h> 27#include <linux/netfilter_arp.h>
28 28
29#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_ipv4/ip_tables.h> 30#include <linux/netfilter_ipv4/ip_tables.h>
30#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h> 31#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
31#include <net/netfilter/nf_conntrack_compat.h> 32#include <net/netfilter/nf_conntrack_compat.h>
@@ -247,6 +248,7 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config)
247 switch (iph->protocol) { 248 switch (iph->protocol) {
248 case IPPROTO_TCP: 249 case IPPROTO_TCP:
249 case IPPROTO_UDP: 250 case IPPROTO_UDP:
251 case IPPROTO_UDPLITE:
250 case IPPROTO_SCTP: 252 case IPPROTO_SCTP:
251 case IPPROTO_DCCP: 253 case IPPROTO_DCCP:
252 case IPPROTO_ICMP: 254 case IPPROTO_ICMP:
@@ -329,7 +331,7 @@ target(struct sk_buff **pskb,
329 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP 331 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP
330 && (ctinfo == IP_CT_RELATED 332 && (ctinfo == IP_CT_RELATED
331 || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) 333 || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY))
332 return IPT_CONTINUE; 334 return XT_CONTINUE;
333 335
334 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, 336 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
335 * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here 337 * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here
@@ -367,7 +369,7 @@ target(struct sk_buff **pskb,
367 * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */ 369 * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */
368 (*pskb)->pkt_type = PACKET_HOST; 370 (*pskb)->pkt_type = PACKET_HOST;
369 371
370 return IPT_CONTINUE; 372 return XT_CONTINUE;
371} 373}
372 374
373static int 375static int
@@ -470,8 +472,9 @@ static void destroy(const struct xt_target *target, void *targinfo)
470 nf_ct_l3proto_module_put(target->family); 472 nf_ct_l3proto_module_put(target->family);
471} 473}
472 474
473static struct ipt_target clusterip_tgt = { 475static struct xt_target clusterip_tgt = {
474 .name = "CLUSTERIP", 476 .name = "CLUSTERIP",
477 .family = AF_INET,
475 .target = target, 478 .target = target,
476 .targetsize = sizeof(struct ipt_clusterip_tgt_info), 479 .targetsize = sizeof(struct ipt_clusterip_tgt_info),
477 .checkentry = checkentry, 480 .checkentry = checkentry,
@@ -727,7 +730,7 @@ static int __init ipt_clusterip_init(void)
727{ 730{
728 int ret; 731 int ret;
729 732
730 ret = ipt_register_target(&clusterip_tgt); 733 ret = xt_register_target(&clusterip_tgt);
731 if (ret < 0) 734 if (ret < 0)
732 return ret; 735 return ret;
733 736
@@ -753,7 +756,7 @@ cleanup_hook:
753 nf_unregister_hook(&cip_arp_ops); 756 nf_unregister_hook(&cip_arp_ops);
754#endif /* CONFIG_PROC_FS */ 757#endif /* CONFIG_PROC_FS */
755cleanup_target: 758cleanup_target:
756 ipt_unregister_target(&clusterip_tgt); 759 xt_unregister_target(&clusterip_tgt);
757 return ret; 760 return ret;
758} 761}
759 762
@@ -765,7 +768,7 @@ static void __exit ipt_clusterip_fini(void)
765 remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent); 768 remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent);
766#endif 769#endif
767 nf_unregister_hook(&cip_arp_ops); 770 nf_unregister_hook(&cip_arp_ops);
768 ipt_unregister_target(&clusterip_tgt); 771 xt_unregister_target(&clusterip_tgt);
769} 772}
770 773
771module_init(ipt_clusterip_init); 774module_init(ipt_clusterip_init);
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index b55d670a24df..b5ca5938d1fe 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -9,12 +9,14 @@
9 * ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp 9 * ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp
10*/ 10*/
11 11
12#include <linux/in.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/skbuff.h> 14#include <linux/skbuff.h>
14#include <linux/ip.h> 15#include <linux/ip.h>
15#include <linux/tcp.h> 16#include <linux/tcp.h>
16#include <net/checksum.h> 17#include <net/checksum.h>
17 18
19#include <linux/netfilter/x_tables.h>
18#include <linux/netfilter_ipv4/ip_tables.h> 20#include <linux/netfilter_ipv4/ip_tables.h>
19#include <linux/netfilter_ipv4/ipt_ECN.h> 21#include <linux/netfilter_ipv4/ipt_ECN.h>
20 22
@@ -95,7 +97,7 @@ target(struct sk_buff **pskb,
95 if (!set_ect_tcp(pskb, einfo)) 97 if (!set_ect_tcp(pskb, einfo))
96 return NF_DROP; 98 return NF_DROP;
97 99
98 return IPT_CONTINUE; 100 return XT_CONTINUE;
99} 101}
100 102
101static int 103static int
@@ -119,7 +121,7 @@ checkentry(const char *tablename,
119 return 0; 121 return 0;
120 } 122 }
121 if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) 123 if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR))
122 && (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & IPT_INV_PROTO))) { 124 && (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
123 printk(KERN_WARNING "ECN: cannot use TCP operations on a " 125 printk(KERN_WARNING "ECN: cannot use TCP operations on a "
124 "non-tcp rule\n"); 126 "non-tcp rule\n");
125 return 0; 127 return 0;
@@ -127,8 +129,9 @@ checkentry(const char *tablename,
127 return 1; 129 return 1;
128} 130}
129 131
130static struct ipt_target ipt_ecn_reg = { 132static struct xt_target ipt_ecn_reg = {
131 .name = "ECN", 133 .name = "ECN",
134 .family = AF_INET,
132 .target = target, 135 .target = target,
133 .targetsize = sizeof(struct ipt_ECN_info), 136 .targetsize = sizeof(struct ipt_ECN_info),
134 .table = "mangle", 137 .table = "mangle",
@@ -138,12 +141,12 @@ static struct ipt_target ipt_ecn_reg = {
138 141
139static int __init ipt_ecn_init(void) 142static int __init ipt_ecn_init(void)
140{ 143{
141 return ipt_register_target(&ipt_ecn_reg); 144 return xt_register_target(&ipt_ecn_reg);
142} 145}
143 146
144static void __exit ipt_ecn_fini(void) 147static void __exit ipt_ecn_fini(void)
145{ 148{
146 ipt_unregister_target(&ipt_ecn_reg); 149 xt_unregister_target(&ipt_ecn_reg);
147} 150}
148 151
149module_init(ipt_ecn_init); 152module_init(ipt_ecn_init);
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index c96de16fefae..f68370ffb43f 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -20,7 +20,7 @@
20#include <net/route.h> 20#include <net/route.h>
21 21
22#include <linux/netfilter.h> 22#include <linux/netfilter.h>
23#include <linux/netfilter_ipv4/ip_tables.h> 23#include <linux/netfilter/x_tables.h>
24#include <linux/netfilter_ipv4/ipt_LOG.h> 24#include <linux/netfilter_ipv4/ipt_LOG.h>
25 25
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
@@ -432,7 +432,7 @@ ipt_log_target(struct sk_buff **pskb,
432 432
433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, 433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
434 loginfo->prefix); 434 loginfo->prefix);
435 return IPT_CONTINUE; 435 return XT_CONTINUE;
436} 436}
437 437
438static int ipt_log_checkentry(const char *tablename, 438static int ipt_log_checkentry(const char *tablename,
@@ -455,8 +455,9 @@ static int ipt_log_checkentry(const char *tablename,
455 return 1; 455 return 1;
456} 456}
457 457
458static struct ipt_target ipt_log_reg = { 458static struct xt_target ipt_log_reg = {
459 .name = "LOG", 459 .name = "LOG",
460 .family = AF_INET,
460 .target = ipt_log_target, 461 .target = ipt_log_target,
461 .targetsize = sizeof(struct ipt_log_info), 462 .targetsize = sizeof(struct ipt_log_info),
462 .checkentry = ipt_log_checkentry, 463 .checkentry = ipt_log_checkentry,
@@ -471,8 +472,11 @@ static struct nf_logger ipt_log_logger ={
471 472
472static int __init ipt_log_init(void) 473static int __init ipt_log_init(void)
473{ 474{
474 if (ipt_register_target(&ipt_log_reg)) 475 int ret;
475 return -EINVAL; 476
477 ret = xt_register_target(&ipt_log_reg);
478 if (ret < 0)
479 return ret;
476 if (nf_log_register(PF_INET, &ipt_log_logger) < 0) { 480 if (nf_log_register(PF_INET, &ipt_log_logger) < 0) {
477 printk(KERN_WARNING "ipt_LOG: not logging via system console " 481 printk(KERN_WARNING "ipt_LOG: not logging via system console "
478 "since somebody else already registered for PF_INET\n"); 482 "since somebody else already registered for PF_INET\n");
@@ -486,7 +490,7 @@ static int __init ipt_log_init(void)
486static void __exit ipt_log_fini(void) 490static void __exit ipt_log_fini(void)
487{ 491{
488 nf_log_unregister_logger(&ipt_log_logger); 492 nf_log_unregister_logger(&ipt_log_logger);
489 ipt_unregister_target(&ipt_log_reg); 493 xt_unregister_target(&ipt_log_reg);
490} 494}
491 495
492module_init(ipt_log_init); 496module_init(ipt_log_init);
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index d669685afd04..91c42efcd533 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -25,7 +25,7 @@
25#else 25#else
26#include <linux/netfilter_ipv4/ip_nat_rule.h> 26#include <linux/netfilter_ipv4/ip_nat_rule.h>
27#endif 27#endif
28#include <linux/netfilter_ipv4/ip_tables.h> 28#include <linux/netfilter/x_tables.h>
29 29
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 31MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -190,8 +190,9 @@ static struct notifier_block masq_inet_notifier = {
190 .notifier_call = masq_inet_event, 190 .notifier_call = masq_inet_event,
191}; 191};
192 192
193static struct ipt_target masquerade = { 193static struct xt_target masquerade = {
194 .name = "MASQUERADE", 194 .name = "MASQUERADE",
195 .family = AF_INET,
195 .target = masquerade_target, 196 .target = masquerade_target,
196 .targetsize = sizeof(struct ip_nat_multi_range_compat), 197 .targetsize = sizeof(struct ip_nat_multi_range_compat),
197 .table = "nat", 198 .table = "nat",
@@ -204,7 +205,7 @@ static int __init ipt_masquerade_init(void)
204{ 205{
205 int ret; 206 int ret;
206 207
207 ret = ipt_register_target(&masquerade); 208 ret = xt_register_target(&masquerade);
208 209
209 if (ret == 0) { 210 if (ret == 0) {
210 /* Register for device down reports */ 211 /* Register for device down reports */
@@ -218,7 +219,7 @@ static int __init ipt_masquerade_init(void)
218 219
219static void __exit ipt_masquerade_fini(void) 220static void __exit ipt_masquerade_fini(void)
220{ 221{
221 ipt_unregister_target(&masquerade); 222 xt_unregister_target(&masquerade);
222 unregister_netdevice_notifier(&masq_dev_notifier); 223 unregister_netdevice_notifier(&masq_dev_notifier);
223 unregister_inetaddr_notifier(&masq_inet_notifier); 224 unregister_inetaddr_notifier(&masq_inet_notifier);
224} 225}
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index 9390e90f2b25..b4acc241d898 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -15,6 +15,7 @@
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/netfilter.h> 16#include <linux/netfilter.h>
17#include <linux/netfilter_ipv4.h> 17#include <linux/netfilter_ipv4.h>
18#include <linux/netfilter/x_tables.h>
18#ifdef CONFIG_NF_NAT_NEEDED 19#ifdef CONFIG_NF_NAT_NEEDED
19#include <net/netfilter/nf_nat_rule.h> 20#include <net/netfilter/nf_nat_rule.h>
20#else 21#else
@@ -88,8 +89,9 @@ target(struct sk_buff **pskb,
88 return ip_nat_setup_info(ct, &newrange, hooknum); 89 return ip_nat_setup_info(ct, &newrange, hooknum);
89} 90}
90 91
91static struct ipt_target target_module = { 92static struct xt_target target_module = {
92 .name = MODULENAME, 93 .name = MODULENAME,
94 .family = AF_INET,
93 .target = target, 95 .target = target,
94 .targetsize = sizeof(struct ip_nat_multi_range_compat), 96 .targetsize = sizeof(struct ip_nat_multi_range_compat),
95 .table = "nat", 97 .table = "nat",
@@ -101,12 +103,12 @@ static struct ipt_target target_module = {
101 103
102static int __init ipt_netmap_init(void) 104static int __init ipt_netmap_init(void)
103{ 105{
104 return ipt_register_target(&target_module); 106 return xt_register_target(&target_module);
105} 107}
106 108
107static void __exit ipt_netmap_fini(void) 109static void __exit ipt_netmap_fini(void)
108{ 110{
109 ipt_unregister_target(&target_module); 111 xt_unregister_target(&target_module);
110} 112}
111 113
112module_init(ipt_netmap_init); 114module_init(ipt_netmap_init);
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 462eceb3a1b1..54cd021aa5a8 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -18,6 +18,7 @@
18#include <net/protocol.h> 18#include <net/protocol.h>
19#include <net/checksum.h> 19#include <net/checksum.h>
20#include <linux/netfilter_ipv4.h> 20#include <linux/netfilter_ipv4.h>
21#include <linux/netfilter/x_tables.h>
21#ifdef CONFIG_NF_NAT_NEEDED 22#ifdef CONFIG_NF_NAT_NEEDED
22#include <net/netfilter/nf_nat_rule.h> 23#include <net/netfilter/nf_nat_rule.h>
23#else 24#else
@@ -104,8 +105,9 @@ redirect_target(struct sk_buff **pskb,
104 return ip_nat_setup_info(ct, &newrange, hooknum); 105 return ip_nat_setup_info(ct, &newrange, hooknum);
105} 106}
106 107
107static struct ipt_target redirect_reg = { 108static struct xt_target redirect_reg = {
108 .name = "REDIRECT", 109 .name = "REDIRECT",
110 .family = AF_INET,
109 .target = redirect_target, 111 .target = redirect_target,
110 .targetsize = sizeof(struct ip_nat_multi_range_compat), 112 .targetsize = sizeof(struct ip_nat_multi_range_compat),
111 .table = "nat", 113 .table = "nat",
@@ -116,12 +118,12 @@ static struct ipt_target redirect_reg = {
116 118
117static int __init ipt_redirect_init(void) 119static int __init ipt_redirect_init(void)
118{ 120{
119 return ipt_register_target(&redirect_reg); 121 return xt_register_target(&redirect_reg);
120} 122}
121 123
122static void __exit ipt_redirect_fini(void) 124static void __exit ipt_redirect_fini(void)
123{ 125{
124 ipt_unregister_target(&redirect_reg); 126 xt_unregister_target(&redirect_reg);
125} 127}
126 128
127module_init(ipt_redirect_init); 129module_init(ipt_redirect_init);
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index f0319e5ee437..e4a1ddb386a7 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -22,6 +22,7 @@
22#include <net/tcp.h> 22#include <net/tcp.h>
23#include <net/route.h> 23#include <net/route.h>
24#include <net/dst.h> 24#include <net/dst.h>
25#include <linux/netfilter/x_tables.h>
25#include <linux/netfilter_ipv4/ip_tables.h> 26#include <linux/netfilter_ipv4/ip_tables.h>
26#include <linux/netfilter_ipv4/ipt_REJECT.h> 27#include <linux/netfilter_ipv4/ipt_REJECT.h>
27#ifdef CONFIG_BRIDGE_NETFILTER 28#ifdef CONFIG_BRIDGE_NETFILTER
@@ -116,7 +117,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
116 117
117 /* Adjust TCP checksum */ 118 /* Adjust TCP checksum */
118 tcph->check = 0; 119 tcph->check = 0;
119 tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr), 120 tcph->check = tcp_v4_check(sizeof(struct tcphdr),
120 nskb->nh.iph->saddr, 121 nskb->nh.iph->saddr,
121 nskb->nh.iph->daddr, 122 nskb->nh.iph->daddr,
122 csum_partial((char *)tcph, 123 csum_partial((char *)tcph,
@@ -230,7 +231,7 @@ static int check(const char *tablename,
230 } else if (rejinfo->with == IPT_TCP_RESET) { 231 } else if (rejinfo->with == IPT_TCP_RESET) {
231 /* Must specify that it's a TCP packet */ 232 /* Must specify that it's a TCP packet */
232 if (e->ip.proto != IPPROTO_TCP 233 if (e->ip.proto != IPPROTO_TCP
233 || (e->ip.invflags & IPT_INV_PROTO)) { 234 || (e->ip.invflags & XT_INV_PROTO)) {
234 DEBUGP("REJECT: TCP_RESET invalid for non-tcp\n"); 235 DEBUGP("REJECT: TCP_RESET invalid for non-tcp\n");
235 return 0; 236 return 0;
236 } 237 }
@@ -238,8 +239,9 @@ static int check(const char *tablename,
238 return 1; 239 return 1;
239} 240}
240 241
241static struct ipt_target ipt_reject_reg = { 242static struct xt_target ipt_reject_reg = {
242 .name = "REJECT", 243 .name = "REJECT",
244 .family = AF_INET,
243 .target = reject, 245 .target = reject,
244 .targetsize = sizeof(struct ipt_reject_info), 246 .targetsize = sizeof(struct ipt_reject_info),
245 .table = "filter", 247 .table = "filter",
@@ -251,12 +253,12 @@ static struct ipt_target ipt_reject_reg = {
251 253
252static int __init ipt_reject_init(void) 254static int __init ipt_reject_init(void)
253{ 255{
254 return ipt_register_target(&ipt_reject_reg); 256 return xt_register_target(&ipt_reject_reg);
255} 257}
256 258
257static void __exit ipt_reject_fini(void) 259static void __exit ipt_reject_fini(void)
258{ 260{
259 ipt_unregister_target(&ipt_reject_reg); 261 xt_unregister_target(&ipt_reject_reg);
260} 262}
261 263
262module_init(ipt_reject_init); 264module_init(ipt_reject_init);
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c
index 3dcf29411337..a1cdd1262de2 100644
--- a/net/ipv4/netfilter/ipt_SAME.c
+++ b/net/ipv4/netfilter/ipt_SAME.c
@@ -34,6 +34,7 @@
34#include <net/protocol.h> 34#include <net/protocol.h>
35#include <net/checksum.h> 35#include <net/checksum.h>
36#include <linux/netfilter_ipv4.h> 36#include <linux/netfilter_ipv4.h>
37#include <linux/netfilter/x_tables.h>
37#ifdef CONFIG_NF_NAT_NEEDED 38#ifdef CONFIG_NF_NAT_NEEDED
38#include <net/netfilter/nf_nat_rule.h> 39#include <net/netfilter/nf_nat_rule.h>
39#else 40#else
@@ -186,8 +187,9 @@ same_target(struct sk_buff **pskb,
186 return ip_nat_setup_info(ct, &newrange, hooknum); 187 return ip_nat_setup_info(ct, &newrange, hooknum);
187} 188}
188 189
189static struct ipt_target same_reg = { 190static struct xt_target same_reg = {
190 .name = "SAME", 191 .name = "SAME",
192 .family = AF_INET,
191 .target = same_target, 193 .target = same_target,
192 .targetsize = sizeof(struct ipt_same_info), 194 .targetsize = sizeof(struct ipt_same_info),
193 .table = "nat", 195 .table = "nat",
@@ -199,12 +201,12 @@ static struct ipt_target same_reg = {
199 201
200static int __init ipt_same_init(void) 202static int __init ipt_same_init(void)
201{ 203{
202 return ipt_register_target(&same_reg); 204 return xt_register_target(&same_reg);
203} 205}
204 206
205static void __exit ipt_same_fini(void) 207static void __exit ipt_same_fini(void)
206{ 208{
207 ipt_unregister_target(&same_reg); 209 xt_unregister_target(&same_reg);
208} 210}
209 211
210module_init(ipt_same_init); 212module_init(ipt_same_init);
diff --git a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c
deleted file mode 100644
index 93eb5c3c1884..000000000000
--- a/net/ipv4/netfilter/ipt_TCPMSS.c
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * This is a module which is used for setting the MSS option in TCP packets.
3 *
4 * Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/skbuff.h>
13
14#include <linux/ip.h>
15#include <net/tcp.h>
16
17#include <linux/netfilter_ipv4/ip_tables.h>
18#include <linux/netfilter_ipv4/ipt_TCPMSS.h>
19
20MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
22MODULE_DESCRIPTION("iptables TCP MSS modification module");
23
24static inline unsigned int
25optlen(const u_int8_t *opt, unsigned int offset)
26{
27 /* Beware zero-length options: make finite progress */
28 if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
29 return 1;
30 else
31 return opt[offset+1];
32}
33
34static unsigned int
35ipt_tcpmss_target(struct sk_buff **pskb,
36 const struct net_device *in,
37 const struct net_device *out,
38 unsigned int hooknum,
39 const struct xt_target *target,
40 const void *targinfo)
41{
42 const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
43 struct tcphdr *tcph;
44 struct iphdr *iph;
45 u_int16_t tcplen, newmss;
46 __be16 newtotlen, oldval;
47 unsigned int i;
48 u_int8_t *opt;
49
50 if (!skb_make_writable(pskb, (*pskb)->len))
51 return NF_DROP;
52
53 iph = (*pskb)->nh.iph;
54 tcplen = (*pskb)->len - iph->ihl*4;
55 tcph = (void *)iph + iph->ihl*4;
56
57 /* Since it passed flags test in tcp match, we know it is is
58 not a fragment, and has data >= tcp header length. SYN
59 packets should not contain data: if they did, then we risk
60 running over MTU, sending Frag Needed and breaking things
61 badly. --RR */
62 if (tcplen != tcph->doff*4) {
63 if (net_ratelimit())
64 printk(KERN_ERR
65 "ipt_tcpmss_target: bad length (%d bytes)\n",
66 (*pskb)->len);
67 return NF_DROP;
68 }
69
70 if (tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) {
71 if (dst_mtu((*pskb)->dst) <= sizeof(struct iphdr) +
72 sizeof(struct tcphdr)) {
73 if (net_ratelimit())
74 printk(KERN_ERR "ipt_tcpmss_target: "
75 "unknown or invalid path-MTU (%d)\n",
76 dst_mtu((*pskb)->dst));
77 return NF_DROP; /* or IPT_CONTINUE ?? */
78 }
79
80 newmss = dst_mtu((*pskb)->dst) - sizeof(struct iphdr) -
81 sizeof(struct tcphdr);
82 } else
83 newmss = tcpmssinfo->mss;
84
85 opt = (u_int8_t *)tcph;
86 for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
87 if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
88 opt[i+1] == TCPOLEN_MSS) {
89 u_int16_t oldmss;
90
91 oldmss = (opt[i+2] << 8) | opt[i+3];
92
93 if (tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU &&
94 oldmss <= newmss)
95 return IPT_CONTINUE;
96
97 opt[i+2] = (newmss & 0xff00) >> 8;
98 opt[i+3] = (newmss & 0x00ff);
99
100 nf_proto_csum_replace2(&tcph->check, *pskb,
101 htons(oldmss), htons(newmss), 0);
102 return IPT_CONTINUE;
103 }
104 }
105
106 /*
107 * MSS Option not found ?! add it..
108 */
109 if (skb_tailroom((*pskb)) < TCPOLEN_MSS) {
110 struct sk_buff *newskb;
111
112 newskb = skb_copy_expand(*pskb, skb_headroom(*pskb),
113 TCPOLEN_MSS, GFP_ATOMIC);
114 if (!newskb)
115 return NF_DROP;
116 kfree_skb(*pskb);
117 *pskb = newskb;
118 iph = (*pskb)->nh.iph;
119 tcph = (void *)iph + iph->ihl*4;
120 }
121
122 skb_put((*pskb), TCPOLEN_MSS);
123
124 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
125 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
126
127 nf_proto_csum_replace2(&tcph->check, *pskb,
128 htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
129 opt[0] = TCPOPT_MSS;
130 opt[1] = TCPOLEN_MSS;
131 opt[2] = (newmss & 0xff00) >> 8;
132 opt[3] = (newmss & 0x00ff);
133
134 nf_proto_csum_replace4(&tcph->check, *pskb, 0, *((__be32 *)opt), 0);
135
136 oldval = ((__be16 *)tcph)[6];
137 tcph->doff += TCPOLEN_MSS/4;
138 nf_proto_csum_replace2(&tcph->check, *pskb,
139 oldval, ((__be16 *)tcph)[6], 0);
140
141 newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS);
142 nf_csum_replace2(&iph->check, iph->tot_len, newtotlen);
143 iph->tot_len = newtotlen;
144 return IPT_CONTINUE;
145}
146
147#define TH_SYN 0x02
148
149static inline int find_syn_match(const struct ipt_entry_match *m)
150{
151 const struct ipt_tcp *tcpinfo = (const struct ipt_tcp *)m->data;
152
153 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
154 tcpinfo->flg_cmp & TH_SYN &&
155 !(tcpinfo->invflags & IPT_TCP_INV_FLAGS))
156 return 1;
157
158 return 0;
159}
160
161/* Must specify -p tcp --syn/--tcp-flags SYN */
162static int
163ipt_tcpmss_checkentry(const char *tablename,
164 const void *e_void,
165 const struct xt_target *target,
166 void *targinfo,
167 unsigned int hook_mask)
168{
169 const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
170 const struct ipt_entry *e = e_void;
171
172 if (tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU &&
173 (hook_mask & ~((1 << NF_IP_FORWARD) |
174 (1 << NF_IP_LOCAL_OUT) |
175 (1 << NF_IP_POST_ROUTING))) != 0) {
176 printk("TCPMSS: path-MTU clamping only supported in "
177 "FORWARD, OUTPUT and POSTROUTING hooks\n");
178 return 0;
179 }
180
181 if (IPT_MATCH_ITERATE(e, find_syn_match))
182 return 1;
183 printk("TCPMSS: Only works on TCP SYN packets\n");
184 return 0;
185}
186
187static struct ipt_target ipt_tcpmss_reg = {
188 .name = "TCPMSS",
189 .target = ipt_tcpmss_target,
190 .targetsize = sizeof(struct ipt_tcpmss_info),
191 .proto = IPPROTO_TCP,
192 .checkentry = ipt_tcpmss_checkentry,
193 .me = THIS_MODULE,
194};
195
196static int __init ipt_tcpmss_init(void)
197{
198 return ipt_register_target(&ipt_tcpmss_reg);
199}
200
201static void __exit ipt_tcpmss_fini(void)
202{
203 ipt_unregister_target(&ipt_tcpmss_reg);
204}
205
206module_init(ipt_tcpmss_init);
207module_exit(ipt_tcpmss_fini);
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c
index 18e74ac4d425..29b05a6bd108 100644
--- a/net/ipv4/netfilter/ipt_TOS.c
+++ b/net/ipv4/netfilter/ipt_TOS.c
@@ -13,7 +13,7 @@
13#include <linux/ip.h> 13#include <linux/ip.h>
14#include <net/checksum.h> 14#include <net/checksum.h>
15 15
16#include <linux/netfilter_ipv4/ip_tables.h> 16#include <linux/netfilter/x_tables.h>
17#include <linux/netfilter_ipv4/ipt_TOS.h> 17#include <linux/netfilter_ipv4/ipt_TOS.h>
18 18
19MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
@@ -40,7 +40,7 @@ target(struct sk_buff **pskb,
40 iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos; 40 iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos;
41 nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); 41 nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
42 } 42 }
43 return IPT_CONTINUE; 43 return XT_CONTINUE;
44} 44}
45 45
46static int 46static int
@@ -63,8 +63,9 @@ checkentry(const char *tablename,
63 return 1; 63 return 1;
64} 64}
65 65
66static struct ipt_target ipt_tos_reg = { 66static struct xt_target ipt_tos_reg = {
67 .name = "TOS", 67 .name = "TOS",
68 .family = AF_INET,
68 .target = target, 69 .target = target,
69 .targetsize = sizeof(struct ipt_tos_target_info), 70 .targetsize = sizeof(struct ipt_tos_target_info),
70 .table = "mangle", 71 .table = "mangle",
@@ -74,12 +75,12 @@ static struct ipt_target ipt_tos_reg = {
74 75
75static int __init ipt_tos_init(void) 76static int __init ipt_tos_init(void)
76{ 77{
77 return ipt_register_target(&ipt_tos_reg); 78 return xt_register_target(&ipt_tos_reg);
78} 79}
79 80
80static void __exit ipt_tos_fini(void) 81static void __exit ipt_tos_fini(void)
81{ 82{
82 ipt_unregister_target(&ipt_tos_reg); 83 xt_unregister_target(&ipt_tos_reg);
83} 84}
84 85
85module_init(ipt_tos_init); 86module_init(ipt_tos_init);
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
index fffe5ca82e91..d2b6fa3f9dcd 100644
--- a/net/ipv4/netfilter/ipt_TTL.c
+++ b/net/ipv4/netfilter/ipt_TTL.c
@@ -12,7 +12,7 @@
12#include <linux/ip.h> 12#include <linux/ip.h>
13#include <net/checksum.h> 13#include <net/checksum.h>
14 14
15#include <linux/netfilter_ipv4/ip_tables.h> 15#include <linux/netfilter/x_tables.h>
16#include <linux/netfilter_ipv4/ipt_TTL.h> 16#include <linux/netfilter_ipv4/ipt_TTL.h>
17 17
18MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 18MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -59,7 +59,7 @@ ipt_ttl_target(struct sk_buff **pskb,
59 iph->ttl = new_ttl; 59 iph->ttl = new_ttl;
60 } 60 }
61 61
62 return IPT_CONTINUE; 62 return XT_CONTINUE;
63} 63}
64 64
65static int ipt_ttl_checkentry(const char *tablename, 65static int ipt_ttl_checkentry(const char *tablename,
@@ -80,8 +80,9 @@ static int ipt_ttl_checkentry(const char *tablename,
80 return 1; 80 return 1;
81} 81}
82 82
83static struct ipt_target ipt_TTL = { 83static struct xt_target ipt_TTL = {
84 .name = "TTL", 84 .name = "TTL",
85 .family = AF_INET,
85 .target = ipt_ttl_target, 86 .target = ipt_ttl_target,
86 .targetsize = sizeof(struct ipt_TTL_info), 87 .targetsize = sizeof(struct ipt_TTL_info),
87 .table = "mangle", 88 .table = "mangle",
@@ -91,12 +92,12 @@ static struct ipt_target ipt_TTL = {
91 92
92static int __init ipt_ttl_init(void) 93static int __init ipt_ttl_init(void)
93{ 94{
94 return ipt_register_target(&ipt_TTL); 95 return xt_register_target(&ipt_TTL);
95} 96}
96 97
97static void __exit ipt_ttl_fini(void) 98static void __exit ipt_ttl_fini(void)
98{ 99{
99 ipt_unregister_target(&ipt_TTL); 100 xt_unregister_target(&ipt_TTL);
100} 101}
101 102
102module_init(ipt_ttl_init); 103module_init(ipt_ttl_init);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index dbd34783a64d..7af57a3a1f36 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -57,7 +57,7 @@
57#include <linux/mm.h> 57#include <linux/mm.h>
58#include <linux/moduleparam.h> 58#include <linux/moduleparam.h>
59#include <linux/netfilter.h> 59#include <linux/netfilter.h>
60#include <linux/netfilter_ipv4/ip_tables.h> 60#include <linux/netfilter/x_tables.h>
61#include <linux/netfilter_ipv4/ipt_ULOG.h> 61#include <linux/netfilter_ipv4/ipt_ULOG.h>
62#include <net/sock.h> 62#include <net/sock.h>
63#include <linux/bitops.h> 63#include <linux/bitops.h>
@@ -132,7 +132,6 @@ static void ulog_send(unsigned int nlgroupnum)
132 ub->qlen = 0; 132 ub->qlen = 0;
133 ub->skb = NULL; 133 ub->skb = NULL;
134 ub->lastnlh = NULL; 134 ub->lastnlh = NULL;
135
136} 135}
137 136
138 137
@@ -314,7 +313,7 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb,
314 313
315 ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL); 314 ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL);
316 315
317 return IPT_CONTINUE; 316 return XT_CONTINUE;
318} 317}
319 318
320static void ipt_logfn(unsigned int pf, 319static void ipt_logfn(unsigned int pf,
@@ -363,8 +362,9 @@ static int ipt_ulog_checkentry(const char *tablename,
363 return 1; 362 return 1;
364} 363}
365 364
366static struct ipt_target ipt_ulog_reg = { 365static struct xt_target ipt_ulog_reg = {
367 .name = "ULOG", 366 .name = "ULOG",
367 .family = AF_INET,
368 .target = ipt_ulog_target, 368 .target = ipt_ulog_target,
369 .targetsize = sizeof(struct ipt_ulog_info), 369 .targetsize = sizeof(struct ipt_ulog_info),
370 .checkentry = ipt_ulog_checkentry, 370 .checkentry = ipt_ulog_checkentry,
@@ -379,7 +379,7 @@ static struct nf_logger ipt_ulog_logger = {
379 379
380static int __init ipt_ulog_init(void) 380static int __init ipt_ulog_init(void)
381{ 381{
382 int i; 382 int ret, i;
383 383
384 DEBUGP("ipt_ULOG: init module\n"); 384 DEBUGP("ipt_ULOG: init module\n");
385 385
@@ -400,9 +400,10 @@ static int __init ipt_ulog_init(void)
400 if (!nflognl) 400 if (!nflognl)
401 return -ENOMEM; 401 return -ENOMEM;
402 402
403 if (ipt_register_target(&ipt_ulog_reg) != 0) { 403 ret = xt_register_target(&ipt_ulog_reg);
404 if (ret < 0) {
404 sock_release(nflognl->sk_socket); 405 sock_release(nflognl->sk_socket);
405 return -EINVAL; 406 return ret;
406 } 407 }
407 if (nflog) 408 if (nflog)
408 nf_log_register(PF_INET, &ipt_ulog_logger); 409 nf_log_register(PF_INET, &ipt_ulog_logger);
@@ -419,7 +420,7 @@ static void __exit ipt_ulog_fini(void)
419 420
420 if (nflog) 421 if (nflog)
421 nf_log_unregister_logger(&ipt_ulog_logger); 422 nf_log_unregister_logger(&ipt_ulog_logger);
422 ipt_unregister_target(&ipt_ulog_reg); 423 xt_unregister_target(&ipt_ulog_reg);
423 sock_release(nflognl->sk_socket); 424 sock_release(nflognl->sk_socket);
424 425
425 /* remove pending timers and free allocated skb's */ 426 /* remove pending timers and free allocated skb's */
@@ -435,7 +436,6 @@ static void __exit ipt_ulog_fini(void)
435 ub->skb = NULL; 436 ub->skb = NULL;
436 } 437 }
437 } 438 }
438
439} 439}
440 440
441module_init(ipt_ulog_init); 441module_init(ipt_ulog_init);
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 7b60eb74788b..648f555c4d16 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -16,7 +16,7 @@
16#include <net/route.h> 16#include <net/route.h>
17 17
18#include <linux/netfilter_ipv4/ipt_addrtype.h> 18#include <linux/netfilter_ipv4/ipt_addrtype.h>
19#include <linux/netfilter_ipv4/ip_tables.h> 19#include <linux/netfilter/x_tables.h>
20 20
21MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 22MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
@@ -44,8 +44,9 @@ static int match(const struct sk_buff *skb,
44 return ret; 44 return ret;
45} 45}
46 46
47static struct ipt_match addrtype_match = { 47static struct xt_match addrtype_match = {
48 .name = "addrtype", 48 .name = "addrtype",
49 .family = AF_INET,
49 .match = match, 50 .match = match,
50 .matchsize = sizeof(struct ipt_addrtype_info), 51 .matchsize = sizeof(struct ipt_addrtype_info),
51 .me = THIS_MODULE 52 .me = THIS_MODULE
@@ -53,12 +54,12 @@ static struct ipt_match addrtype_match = {
53 54
54static int __init ipt_addrtype_init(void) 55static int __init ipt_addrtype_init(void)
55{ 56{
56 return ipt_register_match(&addrtype_match); 57 return xt_register_match(&addrtype_match);
57} 58}
58 59
59static void __exit ipt_addrtype_fini(void) 60static void __exit ipt_addrtype_fini(void)
60{ 61{
61 ipt_unregister_match(&addrtype_match); 62 xt_unregister_match(&addrtype_match);
62} 63}
63 64
64module_init(ipt_addrtype_init); 65module_init(ipt_addrtype_init);
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c
index 1798f86bc534..42f41224a43a 100644
--- a/net/ipv4/netfilter/ipt_ah.c
+++ b/net/ipv4/netfilter/ipt_ah.c
@@ -6,12 +6,13 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#include <linux/in.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/skbuff.h> 11#include <linux/skbuff.h>
11#include <linux/ip.h> 12#include <linux/ip.h>
12 13
13#include <linux/netfilter_ipv4/ipt_ah.h> 14#include <linux/netfilter_ipv4/ipt_ah.h>
14#include <linux/netfilter_ipv4/ip_tables.h> 15#include <linux/netfilter/x_tables.h>
15 16
16MODULE_LICENSE("GPL"); 17MODULE_LICENSE("GPL");
17MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>"); 18MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>");
@@ -86,8 +87,9 @@ checkentry(const char *tablename,
86 return 1; 87 return 1;
87} 88}
88 89
89static struct ipt_match ah_match = { 90static struct xt_match ah_match = {
90 .name = "ah", 91 .name = "ah",
92 .family = AF_INET,
91 .match = match, 93 .match = match,
92 .matchsize = sizeof(struct ipt_ah), 94 .matchsize = sizeof(struct ipt_ah),
93 .proto = IPPROTO_AH, 95 .proto = IPPROTO_AH,
@@ -97,12 +99,12 @@ static struct ipt_match ah_match = {
97 99
98static int __init ipt_ah_init(void) 100static int __init ipt_ah_init(void)
99{ 101{
100 return ipt_register_match(&ah_match); 102 return xt_register_match(&ah_match);
101} 103}
102 104
103static void __exit ipt_ah_fini(void) 105static void __exit ipt_ah_fini(void)
104{ 106{
105 ipt_unregister_match(&ah_match); 107 xt_unregister_match(&ah_match);
106} 108}
107 109
108module_init(ipt_ah_init); 110module_init(ipt_ah_init);
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index dafbdec0efc0..37508b2cfea6 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -9,10 +9,13 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/in.h>
13#include <linux/ip.h>
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/skbuff.h> 15#include <linux/skbuff.h>
14#include <linux/tcp.h> 16#include <linux/tcp.h>
15 17
18#include <linux/netfilter/x_tables.h>
16#include <linux/netfilter_ipv4/ip_tables.h> 19#include <linux/netfilter_ipv4/ip_tables.h>
17#include <linux/netfilter_ipv4/ipt_ecn.h> 20#include <linux/netfilter_ipv4/ipt_ecn.h>
18 21
@@ -109,8 +112,9 @@ static int checkentry(const char *tablename, const void *ip_void,
109 return 1; 112 return 1;
110} 113}
111 114
112static struct ipt_match ecn_match = { 115static struct xt_match ecn_match = {
113 .name = "ecn", 116 .name = "ecn",
117 .family = AF_INET,
114 .match = match, 118 .match = match,
115 .matchsize = sizeof(struct ipt_ecn_info), 119 .matchsize = sizeof(struct ipt_ecn_info),
116 .checkentry = checkentry, 120 .checkentry = checkentry,
@@ -119,12 +123,12 @@ static struct ipt_match ecn_match = {
119 123
120static int __init ipt_ecn_init(void) 124static int __init ipt_ecn_init(void)
121{ 125{
122 return ipt_register_match(&ecn_match); 126 return xt_register_match(&ecn_match);
123} 127}
124 128
125static void __exit ipt_ecn_fini(void) 129static void __exit ipt_ecn_fini(void)
126{ 130{
127 ipt_unregister_match(&ecn_match); 131 xt_unregister_match(&ecn_match);
128} 132}
129 133
130module_init(ipt_ecn_init); 134module_init(ipt_ecn_init);
diff --git a/net/ipv4/netfilter/ipt_iprange.c b/net/ipv4/netfilter/ipt_iprange.c
index 5202edd8d333..05de593be94c 100644
--- a/net/ipv4/netfilter/ipt_iprange.c
+++ b/net/ipv4/netfilter/ipt_iprange.c
@@ -10,7 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/ip.h> 12#include <linux/ip.h>
13#include <linux/netfilter_ipv4/ip_tables.h> 13#include <linux/netfilter/x_tables.h>
14#include <linux/netfilter_ipv4/ipt_iprange.h> 14#include <linux/netfilter_ipv4/ipt_iprange.h>
15 15
16MODULE_LICENSE("GPL"); 16MODULE_LICENSE("GPL");
@@ -63,22 +63,22 @@ match(const struct sk_buff *skb,
63 return 1; 63 return 1;
64} 64}
65 65
66static struct ipt_match iprange_match = { 66static struct xt_match iprange_match = {
67 .name = "iprange", 67 .name = "iprange",
68 .family = AF_INET,
68 .match = match, 69 .match = match,
69 .matchsize = sizeof(struct ipt_iprange_info), 70 .matchsize = sizeof(struct ipt_iprange_info),
70 .destroy = NULL,
71 .me = THIS_MODULE 71 .me = THIS_MODULE
72}; 72};
73 73
74static int __init ipt_iprange_init(void) 74static int __init ipt_iprange_init(void)
75{ 75{
76 return ipt_register_match(&iprange_match); 76 return xt_register_match(&iprange_match);
77} 77}
78 78
79static void __exit ipt_iprange_fini(void) 79static void __exit ipt_iprange_fini(void)
80{ 80{
81 ipt_unregister_match(&iprange_match); 81 xt_unregister_match(&iprange_match);
82} 82}
83 83
84module_init(ipt_iprange_init); 84module_init(ipt_iprange_init);
diff --git a/net/ipv4/netfilter/ipt_owner.c b/net/ipv4/netfilter/ipt_owner.c
index 78c336f12a9e..9f496ac834b5 100644
--- a/net/ipv4/netfilter/ipt_owner.c
+++ b/net/ipv4/netfilter/ipt_owner.c
@@ -15,7 +15,7 @@
15#include <net/sock.h> 15#include <net/sock.h>
16 16
17#include <linux/netfilter_ipv4/ipt_owner.h> 17#include <linux/netfilter_ipv4/ipt_owner.h>
18#include <linux/netfilter_ipv4/ip_tables.h> 18#include <linux/netfilter/x_tables.h>
19 19
20MODULE_LICENSE("GPL"); 20MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); 21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
@@ -68,8 +68,9 @@ checkentry(const char *tablename,
68 return 1; 68 return 1;
69} 69}
70 70
71static struct ipt_match owner_match = { 71static struct xt_match owner_match = {
72 .name = "owner", 72 .name = "owner",
73 .family = AF_INET,
73 .match = match, 74 .match = match,
74 .matchsize = sizeof(struct ipt_owner_info), 75 .matchsize = sizeof(struct ipt_owner_info),
75 .hooks = (1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_POST_ROUTING), 76 .hooks = (1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_POST_ROUTING),
@@ -79,12 +80,12 @@ static struct ipt_match owner_match = {
79 80
80static int __init ipt_owner_init(void) 81static int __init ipt_owner_init(void)
81{ 82{
82 return ipt_register_match(&owner_match); 83 return xt_register_match(&owner_match);
83} 84}
84 85
85static void __exit ipt_owner_fini(void) 86static void __exit ipt_owner_fini(void)
86{ 87{
87 ipt_unregister_match(&owner_match); 88 xt_unregister_match(&owner_match);
88} 89}
89 90
90module_init(ipt_owner_init); 91module_init(ipt_owner_init);
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 4db0e73c56f1..6b97b6796173 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -12,6 +12,7 @@
12 * Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org 12 * Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/ip.h>
15#include <linux/moduleparam.h> 16#include <linux/moduleparam.h>
16#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
17#include <linux/seq_file.h> 18#include <linux/seq_file.h>
@@ -24,7 +25,7 @@
24#include <linux/skbuff.h> 25#include <linux/skbuff.h>
25#include <linux/inet.h> 26#include <linux/inet.h>
26 27
27#include <linux/netfilter_ipv4/ip_tables.h> 28#include <linux/netfilter/x_tables.h>
28#include <linux/netfilter_ipv4/ipt_recent.h> 29#include <linux/netfilter_ipv4/ipt_recent.h>
29 30
30MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 31MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
@@ -462,8 +463,9 @@ static struct file_operations recent_fops = {
462}; 463};
463#endif /* CONFIG_PROC_FS */ 464#endif /* CONFIG_PROC_FS */
464 465
465static struct ipt_match recent_match = { 466static struct xt_match recent_match = {
466 .name = "recent", 467 .name = "recent",
468 .family = AF_INET,
467 .match = ipt_recent_match, 469 .match = ipt_recent_match,
468 .matchsize = sizeof(struct ipt_recent_info), 470 .matchsize = sizeof(struct ipt_recent_info),
469 .checkentry = ipt_recent_checkentry, 471 .checkentry = ipt_recent_checkentry,
@@ -479,13 +481,13 @@ static int __init ipt_recent_init(void)
479 return -EINVAL; 481 return -EINVAL;
480 ip_list_hash_size = 1 << fls(ip_list_tot); 482 ip_list_hash_size = 1 << fls(ip_list_tot);
481 483
482 err = ipt_register_match(&recent_match); 484 err = xt_register_match(&recent_match);
483#ifdef CONFIG_PROC_FS 485#ifdef CONFIG_PROC_FS
484 if (err) 486 if (err)
485 return err; 487 return err;
486 proc_dir = proc_mkdir("ipt_recent", proc_net); 488 proc_dir = proc_mkdir("ipt_recent", proc_net);
487 if (proc_dir == NULL) { 489 if (proc_dir == NULL) {
488 ipt_unregister_match(&recent_match); 490 xt_unregister_match(&recent_match);
489 err = -ENOMEM; 491 err = -ENOMEM;
490 } 492 }
491#endif 493#endif
@@ -495,7 +497,7 @@ static int __init ipt_recent_init(void)
495static void __exit ipt_recent_exit(void) 497static void __exit ipt_recent_exit(void)
496{ 498{
497 BUG_ON(!list_empty(&tables)); 499 BUG_ON(!list_empty(&tables));
498 ipt_unregister_match(&recent_match); 500 xt_unregister_match(&recent_match);
499#ifdef CONFIG_PROC_FS 501#ifdef CONFIG_PROC_FS
500 remove_proc_entry("ipt_recent", proc_net); 502 remove_proc_entry("ipt_recent", proc_net);
501#endif 503#endif
diff --git a/net/ipv4/netfilter/ipt_tos.c b/net/ipv4/netfilter/ipt_tos.c
index 5549c39c7851..5d33b51d49d8 100644
--- a/net/ipv4/netfilter/ipt_tos.c
+++ b/net/ipv4/netfilter/ipt_tos.c
@@ -8,11 +8,12 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/ip.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/skbuff.h> 13#include <linux/skbuff.h>
13 14
14#include <linux/netfilter_ipv4/ipt_tos.h> 15#include <linux/netfilter_ipv4/ipt_tos.h>
15#include <linux/netfilter_ipv4/ip_tables.h> 16#include <linux/netfilter/x_tables.h>
16 17
17MODULE_LICENSE("GPL"); 18MODULE_LICENSE("GPL");
18MODULE_DESCRIPTION("iptables TOS match module"); 19MODULE_DESCRIPTION("iptables TOS match module");
@@ -32,8 +33,9 @@ match(const struct sk_buff *skb,
32 return (skb->nh.iph->tos == info->tos) ^ info->invert; 33 return (skb->nh.iph->tos == info->tos) ^ info->invert;
33} 34}
34 35
35static struct ipt_match tos_match = { 36static struct xt_match tos_match = {
36 .name = "tos", 37 .name = "tos",
38 .family = AF_INET,
37 .match = match, 39 .match = match,
38 .matchsize = sizeof(struct ipt_tos_info), 40 .matchsize = sizeof(struct ipt_tos_info),
39 .me = THIS_MODULE, 41 .me = THIS_MODULE,
@@ -41,12 +43,12 @@ static struct ipt_match tos_match = {
41 43
42static int __init ipt_multiport_init(void) 44static int __init ipt_multiport_init(void)
43{ 45{
44 return ipt_register_match(&tos_match); 46 return xt_register_match(&tos_match);
45} 47}
46 48
47static void __exit ipt_multiport_fini(void) 49static void __exit ipt_multiport_fini(void)
48{ 50{
49 ipt_unregister_match(&tos_match); 51 xt_unregister_match(&tos_match);
50} 52}
51 53
52module_init(ipt_multiport_init); 54module_init(ipt_multiport_init);
diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c
index a5243bdb87d7..d5cd984e5ed2 100644
--- a/net/ipv4/netfilter/ipt_ttl.c
+++ b/net/ipv4/netfilter/ipt_ttl.c
@@ -9,11 +9,12 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/ip.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/skbuff.h> 14#include <linux/skbuff.h>
14 15
15#include <linux/netfilter_ipv4/ipt_ttl.h> 16#include <linux/netfilter_ipv4/ipt_ttl.h>
16#include <linux/netfilter_ipv4/ip_tables.h> 17#include <linux/netfilter/x_tables.h>
17 18
18MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 19MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
19MODULE_DESCRIPTION("IP tables TTL matching module"); 20MODULE_DESCRIPTION("IP tables TTL matching module");
@@ -48,8 +49,9 @@ static int match(const struct sk_buff *skb,
48 return 0; 49 return 0;
49} 50}
50 51
51static struct ipt_match ttl_match = { 52static struct xt_match ttl_match = {
52 .name = "ttl", 53 .name = "ttl",
54 .family = AF_INET,
53 .match = match, 55 .match = match,
54 .matchsize = sizeof(struct ipt_ttl_info), 56 .matchsize = sizeof(struct ipt_ttl_info),
55 .me = THIS_MODULE, 57 .me = THIS_MODULE,
@@ -57,13 +59,12 @@ static struct ipt_match ttl_match = {
57 59
58static int __init ipt_ttl_init(void) 60static int __init ipt_ttl_init(void)
59{ 61{
60 return ipt_register_match(&ttl_match); 62 return xt_register_match(&ttl_match);
61} 63}
62 64
63static void __exit ipt_ttl_fini(void) 65static void __exit ipt_ttl_fini(void)
64{ 66{
65 ipt_unregister_match(&ttl_match); 67 xt_unregister_match(&ttl_match);
66
67} 68}
68 69
69module_init(ipt_ttl_init); 70module_init(ipt_ttl_init);
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index e2e7dd8d7903..51053cb42f43 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -74,7 +74,7 @@ static struct
74 } 74 }
75}; 75};
76 76
77static struct ipt_table packet_filter = { 77static struct xt_table packet_filter = {
78 .name = "filter", 78 .name = "filter",
79 .valid_hooks = FILTER_VALID_HOOKS, 79 .valid_hooks = FILTER_VALID_HOOKS,
80 .lock = RW_LOCK_UNLOCKED, 80 .lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index af2939889444..a532e4d84332 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -103,7 +103,7 @@ static struct
103 } 103 }
104}; 104};
105 105
106static struct ipt_table packet_mangler = { 106static struct xt_table packet_mangler = {
107 .name = "mangle", 107 .name = "mangle",
108 .valid_hooks = MANGLE_VALID_HOOKS, 108 .valid_hooks = MANGLE_VALID_HOOKS,
109 .lock = RW_LOCK_UNLOCKED, 109 .lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index bcbeb4aeacd9..5277550fa6b5 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -79,7 +79,7 @@ static struct
79 } 79 }
80}; 80};
81 81
82static struct ipt_table packet_raw = { 82static struct xt_table packet_raw = {
83 .name = "raw", 83 .name = "raw",
84 .valid_hooks = RAW_VALID_HOOKS, 84 .valid_hooks = RAW_VALID_HOOKS,
85 .lock = RW_LOCK_UNLOCKED, 85 .lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 86a92272b053..998b2557692c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -254,8 +254,9 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
254 if (maniptype == IP_NAT_MANIP_SRC) { 254 if (maniptype == IP_NAT_MANIP_SRC) {
255 if (find_appropriate_src(orig_tuple, tuple, range)) { 255 if (find_appropriate_src(orig_tuple, tuple, range)) {
256 DEBUGP("get_unique_tuple: Found current src map\n"); 256 DEBUGP("get_unique_tuple: Found current src map\n");
257 if (!nf_nat_used_tuple(tuple, ct)) 257 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
258 return; 258 if (!nf_nat_used_tuple(tuple, ct))
259 return;
259 } 260 }
260 } 261 }
261 262
@@ -269,6 +270,13 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
269 270
270 proto = nf_nat_proto_find_get(orig_tuple->dst.protonum); 271 proto = nf_nat_proto_find_get(orig_tuple->dst.protonum);
271 272
273 /* Change protocol info to have some randomization */
274 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
275 proto->unique_tuple(tuple, range, maniptype, ct);
276 nf_nat_proto_put(proto);
277 return;
278 }
279
272 /* Only bother mapping if it's not already in range and unique */ 280 /* Only bother mapping if it's not already in range and unique */
273 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || 281 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
274 proto->in_range(tuple, maniptype, &range->min, &range->max)) && 282 proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 98fbfc84d183..dc6738bdfab7 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -176,7 +176,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
176 datalen = (*pskb)->len - iph->ihl*4; 176 datalen = (*pskb)->len - iph->ihl*4;
177 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { 177 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
178 tcph->check = 0; 178 tcph->check = 0;
179 tcph->check = tcp_v4_check(tcph, datalen, 179 tcph->check = tcp_v4_check(datalen,
180 iph->saddr, iph->daddr, 180 iph->saddr, iph->daddr,
181 csum_partial((char *)tcph, 181 csum_partial((char *)tcph,
182 datalen, 0)); 182 datalen, 0));
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index 7e26a7e9bee1..439164c7a626 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h>
11#include <linux/ip.h> 12#include <linux/ip.h>
12#include <linux/tcp.h> 13#include <linux/tcp.h>
13 14
@@ -75,6 +76,9 @@ tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
75 range_size = ntohs(range->max.tcp.port) - min + 1; 76 range_size = ntohs(range->max.tcp.port) - min + 1;
76 } 77 }
77 78
79 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
80 port = net_random();
81
78 for (i = 0; i < range_size; i++, port++) { 82 for (i = 0; i < range_size; i++, port++) {
79 *portptr = htons(min + port % range_size); 83 *portptr = htons(min + port % range_size);
80 if (!nf_nat_used_tuple(tuple, ct)) 84 if (!nf_nat_used_tuple(tuple, ct))
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index ab0ce4c8699f..8cae6e063bb6 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h>
11#include <linux/ip.h> 12#include <linux/ip.h>
12#include <linux/udp.h> 13#include <linux/udp.h>
13 14
@@ -73,6 +74,9 @@ udp_unique_tuple(struct nf_conntrack_tuple *tuple,
73 range_size = ntohs(range->max.udp.port) - min + 1; 74 range_size = ntohs(range->max.udp.port) - min + 1;
74 } 75 }
75 76
77 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
78 port = net_random();
79
76 for (i = 0; i < range_size; i++, port++) { 80 for (i = 0; i < range_size; i++, port++) {
77 *portptr = htons(min + port % range_size); 81 *portptr = htons(min + port % range_size);
78 if (!nf_nat_used_tuple(tuple, ct)) 82 if (!nf_nat_used_tuple(tuple, ct))
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index b868ee0195d4..7f95b4e2eb31 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -119,7 +119,7 @@ static struct
119 } 119 }
120}; 120};
121 121
122static struct ipt_table nat_table = { 122static struct xt_table nat_table = {
123 .name = "nat", 123 .name = "nat",
124 .valid_hooks = NAT_VALID_HOOKS, 124 .valid_hooks = NAT_VALID_HOOKS,
125 .lock = RW_LOCK_UNLOCKED, 125 .lock = RW_LOCK_UNLOCKED,
@@ -226,6 +226,10 @@ static int ipt_dnat_checkentry(const char *tablename,
226 printk("DNAT: multiple ranges no longer supported\n"); 226 printk("DNAT: multiple ranges no longer supported\n");
227 return 0; 227 return 0;
228 } 228 }
229 if (mr->range[0].flags & IP_NAT_RANGE_PROTO_RANDOM) {
230 printk("DNAT: port randomization not supported\n");
231 return 0;
232 }
229 return 1; 233 return 1;
230} 234}
231 235
@@ -290,7 +294,7 @@ int nf_nat_rule_find(struct sk_buff **pskb,
290 return ret; 294 return ret;
291} 295}
292 296
293static struct ipt_target ipt_snat_reg = { 297static struct xt_target ipt_snat_reg = {
294 .name = "SNAT", 298 .name = "SNAT",
295 .target = ipt_snat_target, 299 .target = ipt_snat_target,
296 .targetsize = sizeof(struct nf_nat_multi_range_compat), 300 .targetsize = sizeof(struct nf_nat_multi_range_compat),
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 00d6dea9f7f3..5a964a167c13 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -32,12 +32,6 @@
32#define DEBUGP(format, args...) 32#define DEBUGP(format, args...)
33#endif 33#endif
34 34
35#define HOOKNAME(hooknum) ((hooknum) == NF_IP_POST_ROUTING ? "POST_ROUTING" \
36 : ((hooknum) == NF_IP_PRE_ROUTING ? "PRE_ROUTING" \
37 : ((hooknum) == NF_IP_LOCAL_OUT ? "LOCAL_OUT" \
38 : ((hooknum) == NF_IP_LOCAL_IN ? "LOCAL_IN" \
39 : "*ERROR*")))
40
41#ifdef CONFIG_XFRM 35#ifdef CONFIG_XFRM
42static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) 36static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
43{ 37{
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a6c63bbd9ddb..fed6a1e7af9e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -489,7 +489,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
489 } 489 }
490 490
491 security_sk_classify_flow(sk, &fl); 491 security_sk_classify_flow(sk, &fl);
492 err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT)); 492 err = ip_route_output_flow(&rt, &fl, sk, 1);
493 } 493 }
494 if (err) 494 if (err)
495 goto done; 495 goto done;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2daa0dc19d33..baee304a3cb7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2635,7 +2635,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2635 2635
2636 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); 2636 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2637 if (nlh == NULL) 2637 if (nlh == NULL)
2638 return -ENOBUFS; 2638 return -EMSGSIZE;
2639 2639
2640 r = nlmsg_data(nlh); 2640 r = nlmsg_data(nlh);
2641 r->rtm_family = AF_INET; 2641 r->rtm_family = AF_INET;
@@ -2718,7 +2718,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2718 return nlmsg_end(skb, nlh); 2718 return nlmsg_end(skb, nlh);
2719 2719
2720nla_put_failure: 2720nla_put_failure:
2721 return nlmsg_cancel(skb, nlh); 2721 nlmsg_cancel(skb, nlh);
2722 return -EMSGSIZE;
2722} 2723}
2723 2724
2724int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) 2725int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b67e0dd743be..5bd43d7294fd 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2415,10 +2415,11 @@ void __init tcp_init(void)
2415 &tcp_hashinfo.ehash_size, 2415 &tcp_hashinfo.ehash_size,
2416 NULL, 2416 NULL,
2417 0); 2417 0);
2418 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1; 2418 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2419 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) { 2419 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2420 rwlock_init(&tcp_hashinfo.ehash[i].lock); 2420 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2421 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); 2421 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2422 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
2422 } 2423 }
2423 2424
2424 tcp_hashinfo.bhash = 2425 tcp_hashinfo.bhash =
@@ -2475,7 +2476,7 @@ void __init tcp_init(void)
2475 2476
2476 printk(KERN_INFO "TCP: Hash tables configured " 2477 printk(KERN_INFO "TCP: Hash tables configured "
2477 "(established %d bind %d)\n", 2478 "(established %d bind %d)\n",
2478 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size); 2479 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
2479 2480
2480 tcp_register_congestion_control(&tcp_reno); 2481 tcp_register_congestion_control(&tcp_reno);
2481} 2482}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c26076fb890e..c6109895bb5e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -936,28 +936,58 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
936 struct tcp_sock *tp = tcp_sk(sk); 936 struct tcp_sock *tp = tcp_sk(sk);
937 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; 937 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
938 struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2); 938 struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
939 struct sk_buff *cached_skb;
939 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; 940 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
940 int reord = tp->packets_out; 941 int reord = tp->packets_out;
941 int prior_fackets; 942 int prior_fackets;
942 u32 lost_retrans = 0; 943 u32 lost_retrans = 0;
943 int flag = 0; 944 int flag = 0;
944 int dup_sack = 0; 945 int dup_sack = 0;
946 int cached_fack_count;
945 int i; 947 int i;
948 int first_sack_index;
946 949
947 if (!tp->sacked_out) 950 if (!tp->sacked_out)
948 tp->fackets_out = 0; 951 tp->fackets_out = 0;
949 prior_fackets = tp->fackets_out; 952 prior_fackets = tp->fackets_out;
950 953
954 /* Check for D-SACK. */
955 if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
956 dup_sack = 1;
957 tp->rx_opt.sack_ok |= 4;
958 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
959 } else if (num_sacks > 1 &&
960 !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
961 !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
962 dup_sack = 1;
963 tp->rx_opt.sack_ok |= 4;
964 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
965 }
966
967 /* D-SACK for already forgotten data...
968 * Do dumb counting. */
969 if (dup_sack &&
970 !after(ntohl(sp[0].end_seq), prior_snd_una) &&
971 after(ntohl(sp[0].end_seq), tp->undo_marker))
972 tp->undo_retrans--;
973
974 /* Eliminate too old ACKs, but take into
975 * account more or less fresh ones, they can
976 * contain valid SACK info.
977 */
978 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
979 return 0;
980
951 /* SACK fastpath: 981 /* SACK fastpath:
952 * if the only SACK change is the increase of the end_seq of 982 * if the only SACK change is the increase of the end_seq of
953 * the first block then only apply that SACK block 983 * the first block then only apply that SACK block
954 * and use retrans queue hinting otherwise slowpath */ 984 * and use retrans queue hinting otherwise slowpath */
955 flag = 1; 985 flag = 1;
956 for (i = 0; i< num_sacks; i++) { 986 for (i = 0; i < num_sacks; i++) {
957 __u32 start_seq = ntohl(sp[i].start_seq); 987 __be32 start_seq = sp[i].start_seq;
958 __u32 end_seq = ntohl(sp[i].end_seq); 988 __be32 end_seq = sp[i].end_seq;
959 989
960 if (i == 0){ 990 if (i == 0) {
961 if (tp->recv_sack_cache[i].start_seq != start_seq) 991 if (tp->recv_sack_cache[i].start_seq != start_seq)
962 flag = 0; 992 flag = 0;
963 } else { 993 } else {
@@ -967,39 +997,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
967 } 997 }
968 tp->recv_sack_cache[i].start_seq = start_seq; 998 tp->recv_sack_cache[i].start_seq = start_seq;
969 tp->recv_sack_cache[i].end_seq = end_seq; 999 tp->recv_sack_cache[i].end_seq = end_seq;
970 1000 }
971 /* Check for D-SACK. */ 1001 /* Clear the rest of the cache sack blocks so they won't match mistakenly. */
972 if (i == 0) { 1002 for (; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
973 u32 ack = TCP_SKB_CB(ack_skb)->ack_seq; 1003 tp->recv_sack_cache[i].start_seq = 0;
974 1004 tp->recv_sack_cache[i].end_seq = 0;
975 if (before(start_seq, ack)) {
976 dup_sack = 1;
977 tp->rx_opt.sack_ok |= 4;
978 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
979 } else if (num_sacks > 1 &&
980 !after(end_seq, ntohl(sp[1].end_seq)) &&
981 !before(start_seq, ntohl(sp[1].start_seq))) {
982 dup_sack = 1;
983 tp->rx_opt.sack_ok |= 4;
984 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
985 }
986
987 /* D-SACK for already forgotten data...
988 * Do dumb counting. */
989 if (dup_sack &&
990 !after(end_seq, prior_snd_una) &&
991 after(end_seq, tp->undo_marker))
992 tp->undo_retrans--;
993
994 /* Eliminate too old ACKs, but take into
995 * account more or less fresh ones, they can
996 * contain valid SACK info.
997 */
998 if (before(ack, prior_snd_una - tp->max_window))
999 return 0;
1000 }
1001 } 1005 }
1002 1006
1007 first_sack_index = 0;
1003 if (flag) 1008 if (flag)
1004 num_sacks = 1; 1009 num_sacks = 1;
1005 else { 1010 else {
@@ -1016,6 +1021,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1016 tmp = sp[j]; 1021 tmp = sp[j];
1017 sp[j] = sp[j+1]; 1022 sp[j] = sp[j+1];
1018 sp[j+1] = tmp; 1023 sp[j+1] = tmp;
1024
1025 /* Track where the first SACK block goes to */
1026 if (j == first_sack_index)
1027 first_sack_index = j+1;
1019 } 1028 }
1020 1029
1021 } 1030 }
@@ -1025,20 +1034,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1025 /* clear flag as used for different purpose in following code */ 1034 /* clear flag as used for different purpose in following code */
1026 flag = 0; 1035 flag = 0;
1027 1036
1037 /* Use SACK fastpath hint if valid */
1038 cached_skb = tp->fastpath_skb_hint;
1039 cached_fack_count = tp->fastpath_cnt_hint;
1040 if (!cached_skb) {
1041 cached_skb = sk->sk_write_queue.next;
1042 cached_fack_count = 0;
1043 }
1044
1028 for (i=0; i<num_sacks; i++, sp++) { 1045 for (i=0; i<num_sacks; i++, sp++) {
1029 struct sk_buff *skb; 1046 struct sk_buff *skb;
1030 __u32 start_seq = ntohl(sp->start_seq); 1047 __u32 start_seq = ntohl(sp->start_seq);
1031 __u32 end_seq = ntohl(sp->end_seq); 1048 __u32 end_seq = ntohl(sp->end_seq);
1032 int fack_count; 1049 int fack_count;
1033 1050
1034 /* Use SACK fastpath hint if valid */ 1051 skb = cached_skb;
1035 if (tp->fastpath_skb_hint) { 1052 fack_count = cached_fack_count;
1036 skb = tp->fastpath_skb_hint;
1037 fack_count = tp->fastpath_cnt_hint;
1038 } else {
1039 skb = sk->sk_write_queue.next;
1040 fack_count = 0;
1041 }
1042 1053
1043 /* Event "B" in the comment above. */ 1054 /* Event "B" in the comment above. */
1044 if (after(end_seq, tp->high_seq)) 1055 if (after(end_seq, tp->high_seq))
@@ -1048,8 +1059,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1048 int in_sack, pcount; 1059 int in_sack, pcount;
1049 u8 sacked; 1060 u8 sacked;
1050 1061
1051 tp->fastpath_skb_hint = skb; 1062 cached_skb = skb;
1052 tp->fastpath_cnt_hint = fack_count; 1063 cached_fack_count = fack_count;
1064 if (i == first_sack_index) {
1065 tp->fastpath_skb_hint = skb;
1066 tp->fastpath_cnt_hint = fack_count;
1067 }
1053 1068
1054 /* The retransmission queue is always in order, so 1069 /* The retransmission queue is always in order, so
1055 * we can short-circuit the walk early. 1070 * we can short-circuit the walk early.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 12de90a5047c..f51d6404c61c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -191,7 +191,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
191 tmp = ip_route_connect(&rt, nexthop, inet->saddr, 191 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
192 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 192 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
193 IPPROTO_TCP, 193 IPPROTO_TCP,
194 inet->sport, usin->sin_port, sk); 194 inet->sport, usin->sin_port, sk, 1);
195 if (tmp < 0) 195 if (tmp < 0)
196 return tmp; 196 return tmp;
197 197
@@ -502,11 +502,11 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
502 struct tcphdr *th = skb->h.th; 502 struct tcphdr *th = skb->h.th;
503 503
504 if (skb->ip_summed == CHECKSUM_PARTIAL) { 504 if (skb->ip_summed == CHECKSUM_PARTIAL) {
505 th->check = ~tcp_v4_check(th, len, 505 th->check = ~tcp_v4_check(len, inet->saddr,
506 inet->saddr, inet->daddr, 0); 506 inet->daddr, 0);
507 skb->csum_offset = offsetof(struct tcphdr, check); 507 skb->csum_offset = offsetof(struct tcphdr, check);
508 } else { 508 } else {
509 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, 509 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
510 csum_partial((char *)th, 510 csum_partial((char *)th,
511 th->doff << 2, 511 th->doff << 2,
512 skb->csum)); 512 skb->csum));
@@ -525,7 +525,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
525 th = skb->h.th; 525 th = skb->h.th;
526 526
527 th->check = 0; 527 th->check = 0;
528 th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); 528 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
529 skb->csum_offset = offsetof(struct tcphdr, check); 529 skb->csum_offset = offsetof(struct tcphdr, check);
530 skb->ip_summed = CHECKSUM_PARTIAL; 530 skb->ip_summed = CHECKSUM_PARTIAL;
531 return 0; 531 return 0;
@@ -747,7 +747,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
747 if (skb) { 747 if (skb) {
748 struct tcphdr *th = skb->h.th; 748 struct tcphdr *th = skb->h.th;
749 749
750 th->check = tcp_v4_check(th, skb->len, 750 th->check = tcp_v4_check(skb->len,
751 ireq->loc_addr, 751 ireq->loc_addr,
752 ireq->rmt_addr, 752 ireq->rmt_addr,
753 csum_partial((char *)th, skb->len, 753 csum_partial((char *)th, skb->len,
@@ -1514,7 +1514,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1514static __sum16 tcp_v4_checksum_init(struct sk_buff *skb) 1514static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1515{ 1515{
1516 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1516 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1517 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr, 1517 if (!tcp_v4_check(skb->len, skb->nh.iph->saddr,
1518 skb->nh.iph->daddr, skb->csum)) { 1518 skb->nh.iph->daddr, skb->csum)) {
1519 skb->ip_summed = CHECKSUM_UNNECESSARY; 1519 skb->ip_summed = CHECKSUM_UNNECESSARY;
1520 return 0; 1520 return 0;
@@ -2051,7 +2051,7 @@ static void *established_get_first(struct seq_file *seq)
2051 } 2051 }
2052 st->state = TCP_SEQ_STATE_TIME_WAIT; 2052 st->state = TCP_SEQ_STATE_TIME_WAIT;
2053 inet_twsk_for_each(tw, node, 2053 inet_twsk_for_each(tw, node,
2054 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) { 2054 &tcp_hashinfo.ehash[st->bucket].twchain) {
2055 if (tw->tw_family != st->family) { 2055 if (tw->tw_family != st->family) {
2056 continue; 2056 continue;
2057 } 2057 }
@@ -2107,7 +2107,7 @@ get_tw:
2107 } 2107 }
2108 2108
2109 st->state = TCP_SEQ_STATE_TIME_WAIT; 2109 st->state = TCP_SEQ_STATE_TIME_WAIT;
2110 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain); 2110 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2111 goto get_tw; 2111 goto get_tw;
2112found: 2112found:
2113 cur = sk; 2113 cur = sk;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 975f4472af29..58b7111523f4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -965,7 +965,8 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk
965 u32 in_flight, cwnd; 965 u32 in_flight, cwnd;
966 966
967 /* Don't be strict about the congestion window for the final FIN. */ 967 /* Don't be strict about the congestion window for the final FIN. */
968 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 968 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
969 tcp_skb_pcount(skb) == 1)
969 return 1; 970 return 1;
970 971
971 in_flight = tcp_packets_in_flight(tp); 972 in_flight = tcp_packets_in_flight(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cfff930f2baf..8b54c68a0d12 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -629,7 +629,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
629 { .sport = inet->sport, 629 { .sport = inet->sport,
630 .dport = dport } } }; 630 .dport = dport } } };
631 security_sk_classify_flow(sk, &fl); 631 security_sk_classify_flow(sk, &fl);
632 err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT)); 632 err = ip_route_output_flow(&rt, &fl, sk, 1);
633 if (err) 633 if (err)
634 goto out; 634 goto out;
635 635
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index e23c21d31a53..e54c5494c88f 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -23,6 +23,12 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
23 IP_ECN_set_ce(inner_iph); 23 IP_ECN_set_ce(inner_iph);
24} 24}
25 25
26static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
27{
28 if (INET_ECN_is_ce(iph->tos))
29 IP6_ECN_set_ce(skb->nh.ipv6h);
30}
31
26/* Add encapsulation header. 32/* Add encapsulation header.
27 * 33 *
28 * The top IP header will be constructed per RFC 2401. The following fields 34 * The top IP header will be constructed per RFC 2401. The following fields
@@ -36,6 +42,7 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
36static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) 42static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
37{ 43{
38 struct dst_entry *dst = skb->dst; 44 struct dst_entry *dst = skb->dst;
45 struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
39 struct iphdr *iph, *top_iph; 46 struct iphdr *iph, *top_iph;
40 int flags; 47 int flags;
41 48
@@ -48,15 +55,27 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
48 top_iph->ihl = 5; 55 top_iph->ihl = 5;
49 top_iph->version = 4; 56 top_iph->version = 4;
50 57
58 flags = x->props.flags;
59
51 /* DS disclosed */ 60 /* DS disclosed */
52 top_iph->tos = INET_ECN_encapsulate(iph->tos, iph->tos); 61 if (xdst->route->ops->family == AF_INET) {
62 top_iph->protocol = IPPROTO_IPIP;
63 top_iph->tos = INET_ECN_encapsulate(iph->tos, iph->tos);
64 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
65 0 : (iph->frag_off & htons(IP_DF));
66 }
67#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
68 else {
69 struct ipv6hdr *ipv6h = (struct ipv6hdr*)iph;
70 top_iph->protocol = IPPROTO_IPV6;
71 top_iph->tos = INET_ECN_encapsulate(iph->tos, ipv6_get_dsfield(ipv6h));
72 top_iph->frag_off = 0;
73 }
74#endif
53 75
54 flags = x->props.flags;
55 if (flags & XFRM_STATE_NOECN) 76 if (flags & XFRM_STATE_NOECN)
56 IP_ECN_clear(top_iph); 77 IP_ECN_clear(top_iph);
57 78
58 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
59 0 : (iph->frag_off & htons(IP_DF));
60 if (!top_iph->frag_off) 79 if (!top_iph->frag_off)
61 __ip_select_ident(top_iph, dst->child, 0); 80 __ip_select_ident(top_iph, dst->child, 0);
62 81
@@ -64,7 +83,6 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
64 83
65 top_iph->saddr = x->props.saddr.a4; 84 top_iph->saddr = x->props.saddr.a4;
66 top_iph->daddr = x->id.daddr.a4; 85 top_iph->daddr = x->id.daddr.a4;
67 top_iph->protocol = IPPROTO_IPIP;
68 86
69 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 87 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
70 return 0; 88 return 0;
@@ -75,8 +93,16 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
75 struct iphdr *iph = skb->nh.iph; 93 struct iphdr *iph = skb->nh.iph;
76 int err = -EINVAL; 94 int err = -EINVAL;
77 95
78 if (iph->protocol != IPPROTO_IPIP) 96 switch(iph->protocol){
79 goto out; 97 case IPPROTO_IPIP:
98#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
99 case IPPROTO_IPV6:
100 break;
101#endif
102 default:
103 goto out;
104 }
105
80 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 106 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
81 goto out; 107 goto out;
82 108
@@ -84,10 +110,19 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
84 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 110 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
85 goto out; 111 goto out;
86 112
87 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 113 if (iph->protocol == IPPROTO_IPIP) {
88 ipv4_copy_dscp(iph, skb->h.ipiph); 114 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
89 if (!(x->props.flags & XFRM_STATE_NOECN)) 115 ipv4_copy_dscp(iph, skb->h.ipiph);
90 ipip_ecn_decapsulate(skb); 116 if (!(x->props.flags & XFRM_STATE_NOECN))
117 ipip_ecn_decapsulate(skb);
118 }
119#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
120 else {
121 if (!(x->props.flags & XFRM_STATE_NOECN))
122 ipip6_ecn_decapsulate(iph, skb);
123 skb->protocol = htons(ETH_P_IPV6);
124 }
125#endif
91 skb->mac.raw = memmove(skb->data - skb->mac_len, 126 skb->mac.raw = memmove(skb->data - skb->mac_len,
92 skb->mac.raw, skb->mac_len); 127 skb->mac.raw, skb->mac_len);
93 skb->nh.raw = skb->data; 128 skb->nh.raw = skb->data;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index fb9f69c616f5..699f27ce62ad 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -72,13 +72,11 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
72 struct dst_entry *dst, *dst_prev; 72 struct dst_entry *dst, *dst_prev;
73 struct rtable *rt0 = (struct rtable*)(*dst_p); 73 struct rtable *rt0 = (struct rtable*)(*dst_p);
74 struct rtable *rt = rt0; 74 struct rtable *rt = rt0;
75 __be32 remote = fl->fl4_dst;
76 __be32 local = fl->fl4_src;
77 struct flowi fl_tunnel = { 75 struct flowi fl_tunnel = {
78 .nl_u = { 76 .nl_u = {
79 .ip4_u = { 77 .ip4_u = {
80 .saddr = local, 78 .saddr = fl->fl4_src,
81 .daddr = remote, 79 .daddr = fl->fl4_dst,
82 .tos = fl->fl4_tos 80 .tos = fl->fl4_tos
83 } 81 }
84 } 82 }
@@ -94,7 +92,6 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
94 for (i = 0; i < nx; i++) { 92 for (i = 0; i < nx; i++) {
95 struct dst_entry *dst1 = dst_alloc(&xfrm4_dst_ops); 93 struct dst_entry *dst1 = dst_alloc(&xfrm4_dst_ops);
96 struct xfrm_dst *xdst; 94 struct xfrm_dst *xdst;
97 int tunnel = 0;
98 95
99 if (unlikely(dst1 == NULL)) { 96 if (unlikely(dst1 == NULL)) {
100 err = -ENOBUFS; 97 err = -ENOBUFS;
@@ -116,19 +113,28 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
116 113
117 dst1->next = dst_prev; 114 dst1->next = dst_prev;
118 dst_prev = dst1; 115 dst_prev = dst1;
119 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 116
120 remote = xfrm[i]->id.daddr.a4;
121 local = xfrm[i]->props.saddr.a4;
122 tunnel = 1;
123 }
124 header_len += xfrm[i]->props.header_len; 117 header_len += xfrm[i]->props.header_len;
125 trailer_len += xfrm[i]->props.trailer_len; 118 trailer_len += xfrm[i]->props.trailer_len;
126 119
127 if (tunnel) { 120 if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) {
128 fl_tunnel.fl4_src = local; 121 unsigned short encap_family = xfrm[i]->props.family;
129 fl_tunnel.fl4_dst = remote; 122 switch(encap_family) {
123 case AF_INET:
124 fl_tunnel.fl4_dst = xfrm[i]->id.daddr.a4;
125 fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4;
126 break;
127#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
128 case AF_INET6:
129 ipv6_addr_copy(&fl_tunnel.fl6_dst, (struct in6_addr*)&xfrm[i]->id.daddr.a6);
130 ipv6_addr_copy(&fl_tunnel.fl6_src, (struct in6_addr*)&xfrm[i]->props.saddr.a6);
131 break;
132#endif
133 default:
134 BUG_ON(1);
135 }
130 err = xfrm_dst_lookup((struct xfrm_dst **)&rt, 136 err = xfrm_dst_lookup((struct xfrm_dst **)&rt,
131 &fl_tunnel, AF_INET); 137 &fl_tunnel, encap_family);
132 if (err) 138 if (err)
133 goto error; 139 goto error;
134 } else 140 } else
@@ -145,6 +151,7 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
145 i = 0; 151 i = 0;
146 for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) { 152 for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) {
147 struct xfrm_dst *x = (struct xfrm_dst*)dst_prev; 153 struct xfrm_dst *x = (struct xfrm_dst*)dst_prev;
154 struct xfrm_state_afinfo *afinfo;
148 x->u.rt.fl = *fl; 155 x->u.rt.fl = *fl;
149 156
150 dst_prev->xfrm = xfrm[i++]; 157 dst_prev->xfrm = xfrm[i++];
@@ -162,8 +169,18 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
162 /* Copy neighbout for reachability confirmation */ 169 /* Copy neighbout for reachability confirmation */
163 dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour); 170 dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
164 dst_prev->input = rt->u.dst.input; 171 dst_prev->input = rt->u.dst.input;
165 dst_prev->output = xfrm4_output; 172 /* XXX: When IPv6 module can be unloaded, we should manage reference
166 if (rt->peer) 173 * to xfrm6_output in afinfo->output. Miyazawa
174 * */
175 afinfo = xfrm_state_get_afinfo(dst_prev->xfrm->props.family);
176 if (!afinfo) {
177 dst = *dst_p;
178 err = -EAFNOSUPPORT;
179 goto error;
180 }
181 dst_prev->output = afinfo->output;
182 xfrm_state_put_afinfo(afinfo);
183 if (dst_prev->xfrm->props.family == AF_INET && rt->peer)
167 atomic_inc(&rt->peer->refcnt); 184 atomic_inc(&rt->peer->refcnt);
168 x->u.rt.peer = rt->peer; 185 x->u.rt.peer = rt->peer;
169 /* Sheit... I remember I did this right. Apparently, 186 /* Sheit... I remember I did this right. Apparently,
@@ -274,7 +291,7 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
274 291
275 if (likely(xdst->u.rt.idev)) 292 if (likely(xdst->u.rt.idev))
276 in_dev_put(xdst->u.rt.idev); 293 in_dev_put(xdst->u.rt.idev);
277 if (likely(xdst->u.rt.peer)) 294 if (dst->xfrm->props.family == AF_INET && likely(xdst->u.rt.peer))
278 inet_putpeer(xdst->u.rt.peer); 295 inet_putpeer(xdst->u.rt.peer);
279 xfrm_dst_destroy(xdst); 296 xfrm_dst_destroy(xdst);
280} 297}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 3cc3df0c6ece..93e2c061cdda 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -51,6 +51,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
51 .family = AF_INET, 51 .family = AF_INET,
52 .init_flags = xfrm4_init_flags, 52 .init_flags = xfrm4_init_flags,
53 .init_tempsel = __xfrm4_init_tempsel, 53 .init_tempsel = __xfrm4_init_tempsel,
54 .output = xfrm4_output,
54}; 55};
55 56
56void __init xfrm4_state_init(void) 57void __init xfrm4_state_init(void)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e3854696988d..fe5e1d833871 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3117,7 +3117,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3117 3117
3118 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags); 3118 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
3119 if (nlh == NULL) 3119 if (nlh == NULL)
3120 return -ENOBUFS; 3120 return -EMSGSIZE;
3121 3121
3122 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), 3122 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
3123 ifa->idev->dev->ifindex); 3123 ifa->idev->dev->ifindex);
@@ -3137,8 +3137,10 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3137 } 3137 }
3138 3138
3139 if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0 || 3139 if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0 ||
3140 put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0) 3140 put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0) {
3141 return nlmsg_cancel(skb, nlh); 3141 nlmsg_cancel(skb, nlh);
3142 return -EMSGSIZE;
3143 }
3142 3144
3143 return nlmsg_end(skb, nlh); 3145 return nlmsg_end(skb, nlh);
3144} 3146}
@@ -3155,13 +3157,15 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
3155 3157
3156 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags); 3158 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
3157 if (nlh == NULL) 3159 if (nlh == NULL)
3158 return -ENOBUFS; 3160 return -EMSGSIZE;
3159 3161
3160 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); 3162 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
3161 if (nla_put(skb, IFA_MULTICAST, 16, &ifmca->mca_addr) < 0 || 3163 if (nla_put(skb, IFA_MULTICAST, 16, &ifmca->mca_addr) < 0 ||
3162 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp, 3164 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
3163 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) 3165 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
3164 return nlmsg_cancel(skb, nlh); 3166 nlmsg_cancel(skb, nlh);
3167 return -EMSGSIZE;
3168 }
3165 3169
3166 return nlmsg_end(skb, nlh); 3170 return nlmsg_end(skb, nlh);
3167} 3171}
@@ -3178,13 +3182,15 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
3178 3182
3179 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags); 3183 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
3180 if (nlh == NULL) 3184 if (nlh == NULL)
3181 return -ENOBUFS; 3185 return -EMSGSIZE;
3182 3186
3183 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); 3187 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
3184 if (nla_put(skb, IFA_ANYCAST, 16, &ifaca->aca_addr) < 0 || 3188 if (nla_put(skb, IFA_ANYCAST, 16, &ifaca->aca_addr) < 0 ||
3185 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp, 3189 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
3186 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) 3190 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
3187 return nlmsg_cancel(skb, nlh); 3191 nlmsg_cancel(skb, nlh);
3192 return -EMSGSIZE;
3193 }
3188 3194
3189 return nlmsg_end(skb, nlh); 3195 return nlmsg_end(skb, nlh);
3190} 3196}
@@ -3334,9 +3340,12 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3334 3340
3335 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid, 3341 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid,
3336 nlh->nlmsg_seq, RTM_NEWADDR, 0); 3342 nlh->nlmsg_seq, RTM_NEWADDR, 0);
3337 /* failure implies BUG in inet6_ifaddr_msgsize() */ 3343 if (err < 0) {
3338 BUG_ON(err < 0); 3344 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
3339 3345 WARN_ON(err == -EMSGSIZE);
3346 kfree_skb(skb);
3347 goto errout_ifa;
3348 }
3340 err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid); 3349 err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
3341errout_ifa: 3350errout_ifa:
3342 in6_ifa_put(ifa); 3351 in6_ifa_put(ifa);
@@ -3354,9 +3363,12 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
3354 goto errout; 3363 goto errout;
3355 3364
3356 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0); 3365 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
3357 /* failure implies BUG in inet6_ifaddr_msgsize() */ 3366 if (err < 0) {
3358 BUG_ON(err < 0); 3367 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
3359 3368 WARN_ON(err == -EMSGSIZE);
3369 kfree_skb(skb);
3370 goto errout;
3371 }
3360 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 3372 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
3361errout: 3373errout:
3362 if (err < 0) 3374 if (err < 0)
@@ -3426,7 +3438,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
3426 3438
3427 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 3439 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
3428 if (nlh == NULL) 3440 if (nlh == NULL)
3429 return -ENOBUFS; 3441 return -EMSGSIZE;
3430 3442
3431 hdr = nlmsg_data(nlh); 3443 hdr = nlmsg_data(nlh);
3432 hdr->ifi_family = AF_INET6; 3444 hdr->ifi_family = AF_INET6;
@@ -3469,7 +3481,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
3469 return nlmsg_end(skb, nlh); 3481 return nlmsg_end(skb, nlh);
3470 3482
3471nla_put_failure: 3483nla_put_failure:
3472 return nlmsg_cancel(skb, nlh); 3484 nlmsg_cancel(skb, nlh);
3485 return -EMSGSIZE;
3473} 3486}
3474 3487
3475static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 3488static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
@@ -3507,9 +3520,12 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
3507 goto errout; 3520 goto errout;
3508 3521
3509 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0); 3522 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
3510 /* failure implies BUG in inet6_if_nlmsg_size() */ 3523 if (err < 0) {
3511 BUG_ON(err < 0); 3524 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
3512 3525 WARN_ON(err == -EMSGSIZE);
3526 kfree_skb(skb);
3527 goto errout;
3528 }
3513 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 3529 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
3514errout: 3530errout:
3515 if (err < 0) 3531 if (err < 0)
@@ -3533,7 +3549,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
3533 3549
3534 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags); 3550 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags);
3535 if (nlh == NULL) 3551 if (nlh == NULL)
3536 return -ENOBUFS; 3552 return -EMSGSIZE;
3537 3553
3538 pmsg = nlmsg_data(nlh); 3554 pmsg = nlmsg_data(nlh);
3539 pmsg->prefix_family = AF_INET6; 3555 pmsg->prefix_family = AF_INET6;
@@ -3558,7 +3574,8 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
3558 return nlmsg_end(skb, nlh); 3574 return nlmsg_end(skb, nlh);
3559 3575
3560nla_put_failure: 3576nla_put_failure:
3561 return nlmsg_cancel(skb, nlh); 3577 nlmsg_cancel(skb, nlh);
3578 return -EMSGSIZE;
3562} 3579}
3563 3580
3564static void inet6_prefix_notify(int event, struct inet6_dev *idev, 3581static void inet6_prefix_notify(int event, struct inet6_dev *idev,
@@ -3572,9 +3589,12 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
3572 goto errout; 3589 goto errout;
3573 3590
3574 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0); 3591 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
3575 /* failure implies BUG in inet6_prefix_nlmsg_size() */ 3592 if (err < 0) {
3576 BUG_ON(err < 0); 3593 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
3577 3594 WARN_ON(err == -EMSGSIZE);
3595 kfree_skb(skb);
3596 goto errout;
3597 }
3578 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC); 3598 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
3579errout: 3599errout:
3580 if (err < 0) 3600 if (err < 0)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 5c94fea90e97..ecde30140f4a 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -178,7 +178,7 @@ ipv4_connected:
178 if (final_p) 178 if (final_p)
179 ipv6_addr_copy(&fl.fl6_dst, final_p); 179 ipv6_addr_copy(&fl.fl6_dst, final_p);
180 180
181 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 181 if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
182 goto out; 182 goto out;
183 183
184 /* source address lookup done in ip6_dst_lookup */ 184 /* source address lookup done in ip6_dst_lookup */
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b7e5bae0e347..e61116949bee 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -79,7 +79,7 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
79 goto hit; /* You sunk my battleship! */ 79 goto hit; /* You sunk my battleship! */
80 } 80 }
81 /* Must check for a TIME_WAIT'er before going to listener hash. */ 81 /* Must check for a TIME_WAIT'er before going to listener hash. */
82 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { 82 sk_for_each(sk, node, &head->twchain) {
83 const struct inet_timewait_sock *tw = inet_twsk(sk); 83 const struct inet_timewait_sock *tw = inet_twsk(sk);
84 84
85 if(*((__portpair *)&(tw->tw_dport)) == ports && 85 if(*((__portpair *)&(tw->tw_dport)) == ports &&
@@ -183,7 +183,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
183 write_lock(&head->lock); 183 write_lock(&head->lock);
184 184
185 /* Check TIME-WAIT sockets first. */ 185 /* Check TIME-WAIT sockets first. */
186 sk_for_each(sk2, node, &(head + hinfo->ehash_size)->chain) { 186 sk_for_each(sk2, node, &head->twchain) {
187 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2); 187 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2);
188 188
189 tw = inet_twsk(sk2); 189 tw = inet_twsk(sk2);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8d918348f5bb..2b9e3bb7da65 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -999,7 +999,8 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
999 break; 999 break;
1000 dev = t->dev; 1000 dev = t->dev;
1001 } 1001 }
1002 err = unregister_netdevice(dev); 1002 err = 0;
1003 unregister_netdevice(dev);
1003 break; 1004 break;
1004 default: 1005 default:
1005 err = -EINVAL; 1006 err = -EINVAL;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 882cde4b4047..e3ec21695832 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1582,6 +1582,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1582 skb = add_grhead(skb, pmc, type, &pgr); 1582 skb = add_grhead(skb, pmc, type, &pgr);
1583 first = 0; 1583 first = 0;
1584 } 1584 }
1585 if (!skb)
1586 return NULL;
1585 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc)); 1587 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
1586 *psrc = psf->sf_addr; 1588 *psrc = psf->sf_addr;
1587 scount++; stotal++; 1589 scount++; stotal++;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index be7dd7db65d7..681bb077eacc 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -89,7 +89,6 @@ static int mip6_mh_len(int type)
89int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) 89int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
90{ 90{
91 struct ip6_mh *mh; 91 struct ip6_mh *mh;
92 int mhlen;
93 92
94 if (!pskb_may_pull(skb, (skb->h.raw - skb->data) + 8) || 93 if (!pskb_may_pull(skb, (skb->h.raw - skb->data) + 8) ||
95 !pskb_may_pull(skb, (skb->h.raw - skb->data) + ((skb->h.raw[1] + 1) << 3))) 94 !pskb_may_pull(skb, (skb->h.raw - skb->data) + ((skb->h.raw[1] + 1) << 3)))
@@ -103,31 +102,6 @@ int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
103 mip6_param_prob(skb, 0, (&mh->ip6mh_hdrlen) - skb->nh.raw); 102 mip6_param_prob(skb, 0, (&mh->ip6mh_hdrlen) - skb->nh.raw);
104 return -1; 103 return -1;
105 } 104 }
106 mhlen = (mh->ip6mh_hdrlen + 1) << 3;
107
108 if (skb->ip_summed == CHECKSUM_COMPLETE) {
109 skb->ip_summed = CHECKSUM_UNNECESSARY;
110 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
111 &skb->nh.ipv6h->daddr,
112 mhlen, IPPROTO_MH,
113 skb->csum)) {
114 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH hw checksum failed\n");
115 skb->ip_summed = CHECKSUM_NONE;
116 }
117 }
118 if (skb->ip_summed == CHECKSUM_NONE) {
119 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
120 &skb->nh.ipv6h->daddr,
121 mhlen, IPPROTO_MH,
122 skb_checksum(skb, 0, mhlen, 0))) {
123 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH checksum failed "
124 "[" NIP6_FMT " > " NIP6_FMT "]\n",
125 NIP6(skb->nh.ipv6h->saddr),
126 NIP6(skb->nh.ipv6h->daddr));
127 return -1;
128 }
129 skb->ip_summed = CHECKSUM_UNNECESSARY;
130 }
131 105
132 if (mh->ip6mh_proto != IPPROTO_NONE) { 106 if (mh->ip6mh_proto != IPPROTO_NONE) {
133 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", 107 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index adcd6131df2a..cd549aea84f0 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -114,6 +114,14 @@ config IP6_NF_MATCH_AH
114 114
115 To compile it as a module, choose M here. If unsure, say N. 115 To compile it as a module, choose M here. If unsure, say N.
116 116
117config IP6_NF_MATCH_MH
118 tristate "MH match support"
119 depends on IP6_NF_IPTABLES
120 help
121 This module allows one to match MH packets.
122
123 To compile it as a module, choose M here. If unsure, say N.
124
117config IP6_NF_MATCH_EUI64 125config IP6_NF_MATCH_EUI64
118 tristate "EUI64 address check" 126 tristate "EUI64 address check"
119 depends on IP6_NF_IPTABLES 127 depends on IP6_NF_IPTABLES
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index ac1dfebde175..4513eab77397 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
19obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o 19obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
20obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o 20obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
21obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o 21obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
22obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
22 23
23# objects for l3 independent conntrack 24# objects for l3 independent conntrack
24nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o 25nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 99502c5da4c4..7083e1cfb2f5 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -530,7 +530,7 @@ check_match(struct ip6t_entry_match *m,
530 unsigned int hookmask, 530 unsigned int hookmask,
531 unsigned int *i) 531 unsigned int *i)
532{ 532{
533 struct ip6t_match *match; 533 struct xt_match *match;
534 int ret; 534 int ret;
535 535
536 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name, 536 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
@@ -564,14 +564,14 @@ err:
564 return ret; 564 return ret;
565} 565}
566 566
567static struct ip6t_target ip6t_standard_target; 567static struct xt_target ip6t_standard_target;
568 568
569static inline int 569static inline int
570check_entry(struct ip6t_entry *e, const char *name, unsigned int size, 570check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
571 unsigned int *i) 571 unsigned int *i)
572{ 572{
573 struct ip6t_entry_target *t; 573 struct ip6t_entry_target *t;
574 struct ip6t_target *target; 574 struct xt_target *target;
575 int ret; 575 int ret;
576 unsigned int j; 576 unsigned int j;
577 577
@@ -1348,13 +1348,13 @@ icmp6_checkentry(const char *tablename,
1348} 1348}
1349 1349
1350/* The built-in targets: standard (NULL) and error. */ 1350/* The built-in targets: standard (NULL) and error. */
1351static struct ip6t_target ip6t_standard_target = { 1351static struct xt_target ip6t_standard_target = {
1352 .name = IP6T_STANDARD_TARGET, 1352 .name = IP6T_STANDARD_TARGET,
1353 .targetsize = sizeof(int), 1353 .targetsize = sizeof(int),
1354 .family = AF_INET6, 1354 .family = AF_INET6,
1355}; 1355};
1356 1356
1357static struct ip6t_target ip6t_error_target = { 1357static struct xt_target ip6t_error_target = {
1358 .name = IP6T_ERROR_TARGET, 1358 .name = IP6T_ERROR_TARGET,
1359 .target = ip6t_error, 1359 .target = ip6t_error,
1360 .targetsize = IP6T_FUNCTION_MAXNAMELEN, 1360 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
@@ -1371,7 +1371,7 @@ static struct nf_sockopt_ops ip6t_sockopts = {
1371 .get = do_ip6t_get_ctl, 1371 .get = do_ip6t_get_ctl,
1372}; 1372};
1373 1373
1374static struct ip6t_match icmp6_matchstruct = { 1374static struct xt_match icmp6_matchstruct = {
1375 .name = "icmp6", 1375 .name = "icmp6",
1376 .match = &icmp6_match, 1376 .match = &icmp6_match,
1377 .matchsize = sizeof(struct ip6t_icmp), 1377 .matchsize = sizeof(struct ip6t_icmp),
diff --git a/net/ipv6/netfilter/ip6t_HL.c b/net/ipv6/netfilter/ip6t_HL.c
index 435750f664dd..04e500172fb4 100644
--- a/net/ipv6/netfilter/ip6t_HL.c
+++ b/net/ipv6/netfilter/ip6t_HL.c
@@ -9,12 +9,13 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/ip.h> 11#include <linux/ip.h>
12#include <linux/ipv6.h>
12 13
13#include <linux/netfilter_ipv6/ip6_tables.h> 14#include <linux/netfilter/x_tables.h>
14#include <linux/netfilter_ipv6/ip6t_HL.h> 15#include <linux/netfilter_ipv6/ip6t_HL.h>
15 16
16MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>"); 17MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
17MODULE_DESCRIPTION("IP tables Hop Limit modification module"); 18MODULE_DESCRIPTION("IP6 tables Hop Limit modification module");
18MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
19 20
20static unsigned int ip6t_hl_target(struct sk_buff **pskb, 21static unsigned int ip6t_hl_target(struct sk_buff **pskb,
@@ -52,10 +53,9 @@ static unsigned int ip6t_hl_target(struct sk_buff **pskb,
52 break; 53 break;
53 } 54 }
54 55
55 if (new_hl != ip6h->hop_limit) 56 ip6h->hop_limit = new_hl;
56 ip6h->hop_limit = new_hl;
57 57
58 return IP6T_CONTINUE; 58 return XT_CONTINUE;
59} 59}
60 60
61static int ip6t_hl_checkentry(const char *tablename, 61static int ip6t_hl_checkentry(const char *tablename,
@@ -79,8 +79,9 @@ static int ip6t_hl_checkentry(const char *tablename,
79 return 1; 79 return 1;
80} 80}
81 81
82static struct ip6t_target ip6t_HL = { 82static struct xt_target ip6t_HL = {
83 .name = "HL", 83 .name = "HL",
84 .family = AF_INET6,
84 .target = ip6t_hl_target, 85 .target = ip6t_hl_target,
85 .targetsize = sizeof(struct ip6t_HL_info), 86 .targetsize = sizeof(struct ip6t_HL_info),
86 .table = "mangle", 87 .table = "mangle",
@@ -90,12 +91,12 @@ static struct ip6t_target ip6t_HL = {
90 91
91static int __init ip6t_hl_init(void) 92static int __init ip6t_hl_init(void)
92{ 93{
93 return ip6t_register_target(&ip6t_HL); 94 return xt_register_target(&ip6t_HL);
94} 95}
95 96
96static void __exit ip6t_hl_fini(void) 97static void __exit ip6t_hl_fini(void)
97{ 98{
98 ip6t_unregister_target(&ip6t_HL); 99 xt_unregister_target(&ip6t_HL);
99} 100}
100 101
101module_init(ip6t_hl_init); 102module_init(ip6t_hl_init);
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 33b1faa90d74..5587a77b884c 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -21,6 +21,7 @@
21#include <net/tcp.h> 21#include <net/tcp.h>
22#include <net/ipv6.h> 22#include <net/ipv6.h>
23#include <linux/netfilter.h> 23#include <linux/netfilter.h>
24#include <linux/netfilter/x_tables.h>
24#include <linux/netfilter_ipv6/ip6_tables.h> 25#include <linux/netfilter_ipv6/ip6_tables.h>
25 26
26MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>"); 27MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
@@ -442,7 +443,7 @@ ip6t_log_target(struct sk_buff **pskb,
442 443
443 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, 444 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
444 loginfo->prefix); 445 loginfo->prefix);
445 return IP6T_CONTINUE; 446 return XT_CONTINUE;
446} 447}
447 448
448 449
@@ -466,8 +467,9 @@ static int ip6t_log_checkentry(const char *tablename,
466 return 1; 467 return 1;
467} 468}
468 469
469static struct ip6t_target ip6t_log_reg = { 470static struct xt_target ip6t_log_reg = {
470 .name = "LOG", 471 .name = "LOG",
472 .family = AF_INET6,
471 .target = ip6t_log_target, 473 .target = ip6t_log_target,
472 .targetsize = sizeof(struct ip6t_log_info), 474 .targetsize = sizeof(struct ip6t_log_info),
473 .checkentry = ip6t_log_checkentry, 475 .checkentry = ip6t_log_checkentry,
@@ -482,8 +484,11 @@ static struct nf_logger ip6t_logger = {
482 484
483static int __init ip6t_log_init(void) 485static int __init ip6t_log_init(void)
484{ 486{
485 if (ip6t_register_target(&ip6t_log_reg)) 487 int ret;
486 return -EINVAL; 488
489 ret = xt_register_target(&ip6t_log_reg);
490 if (ret < 0)
491 return ret;
487 if (nf_log_register(PF_INET6, &ip6t_logger) < 0) { 492 if (nf_log_register(PF_INET6, &ip6t_logger) < 0) {
488 printk(KERN_WARNING "ip6t_LOG: not logging via system console " 493 printk(KERN_WARNING "ip6t_LOG: not logging via system console "
489 "since somebody else already registered for PF_INET6\n"); 494 "since somebody else already registered for PF_INET6\n");
@@ -497,7 +502,7 @@ static int __init ip6t_log_init(void)
497static void __exit ip6t_log_fini(void) 502static void __exit ip6t_log_fini(void)
498{ 503{
499 nf_log_unregister_logger(&ip6t_logger); 504 nf_log_unregister_logger(&ip6t_logger);
500 ip6t_unregister_target(&ip6t_log_reg); 505 xt_unregister_target(&ip6t_log_reg);
501} 506}
502 507
503module_init(ip6t_log_init); 508module_init(ip6t_log_init);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 311eae82feb3..278349c18793 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -26,6 +26,7 @@
26#include <net/ip6_fib.h> 26#include <net/ip6_fib.h>
27#include <net/ip6_route.h> 27#include <net/ip6_route.h>
28#include <net/flow.h> 28#include <net/flow.h>
29#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_ipv6/ip6_tables.h> 30#include <linux/netfilter_ipv6/ip6_tables.h>
30#include <linux/netfilter_ipv6/ip6t_REJECT.h> 31#include <linux/netfilter_ipv6/ip6t_REJECT.h>
31 32
@@ -234,7 +235,7 @@ static int check(const char *tablename,
234 } else if (rejinfo->with == IP6T_TCP_RESET) { 235 } else if (rejinfo->with == IP6T_TCP_RESET) {
235 /* Must specify that it's a TCP packet */ 236 /* Must specify that it's a TCP packet */
236 if (e->ipv6.proto != IPPROTO_TCP 237 if (e->ipv6.proto != IPPROTO_TCP
237 || (e->ipv6.invflags & IP6T_INV_PROTO)) { 238 || (e->ipv6.invflags & XT_INV_PROTO)) {
238 DEBUGP("ip6t_REJECT: TCP_RESET illegal for non-tcp\n"); 239 DEBUGP("ip6t_REJECT: TCP_RESET illegal for non-tcp\n");
239 return 0; 240 return 0;
240 } 241 }
@@ -242,8 +243,9 @@ static int check(const char *tablename,
242 return 1; 243 return 1;
243} 244}
244 245
245static struct ip6t_target ip6t_reject_reg = { 246static struct xt_target ip6t_reject_reg = {
246 .name = "REJECT", 247 .name = "REJECT",
248 .family = AF_INET6,
247 .target = reject6_target, 249 .target = reject6_target,
248 .targetsize = sizeof(struct ip6t_reject_info), 250 .targetsize = sizeof(struct ip6t_reject_info),
249 .table = "filter", 251 .table = "filter",
@@ -255,12 +257,12 @@ static struct ip6t_target ip6t_reject_reg = {
255 257
256static int __init ip6t_reject_init(void) 258static int __init ip6t_reject_init(void)
257{ 259{
258 return ip6t_register_target(&ip6t_reject_reg); 260 return xt_register_target(&ip6t_reject_reg);
259} 261}
260 262
261static void __exit ip6t_reject_fini(void) 263static void __exit ip6t_reject_fini(void)
262{ 264{
263 ip6t_unregister_target(&ip6t_reject_reg); 265 xt_unregister_target(&ip6t_reject_reg);
264} 266}
265 267
266module_init(ip6t_reject_init); 268module_init(ip6t_reject_init);
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index 46486645eb75..456c76adcbf6 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -15,6 +15,7 @@
15#include <net/checksum.h> 15#include <net/checksum.h>
16#include <net/ipv6.h> 16#include <net/ipv6.h>
17 17
18#include <linux/netfilter/x_tables.h>
18#include <linux/netfilter_ipv6/ip6_tables.h> 19#include <linux/netfilter_ipv6/ip6_tables.h>
19#include <linux/netfilter_ipv6/ip6t_ah.h> 20#include <linux/netfilter_ipv6/ip6t_ah.h>
20 21
@@ -118,8 +119,9 @@ checkentry(const char *tablename,
118 return 1; 119 return 1;
119} 120}
120 121
121static struct ip6t_match ah_match = { 122static struct xt_match ah_match = {
122 .name = "ah", 123 .name = "ah",
124 .family = AF_INET6,
123 .match = match, 125 .match = match,
124 .matchsize = sizeof(struct ip6t_ah), 126 .matchsize = sizeof(struct ip6t_ah),
125 .checkentry = checkentry, 127 .checkentry = checkentry,
@@ -128,12 +130,12 @@ static struct ip6t_match ah_match = {
128 130
129static int __init ip6t_ah_init(void) 131static int __init ip6t_ah_init(void)
130{ 132{
131 return ip6t_register_match(&ah_match); 133 return xt_register_match(&ah_match);
132} 134}
133 135
134static void __exit ip6t_ah_fini(void) 136static void __exit ip6t_ah_fini(void)
135{ 137{
136 ip6t_unregister_match(&ah_match); 138 xt_unregister_match(&ah_match);
137} 139}
138 140
139module_init(ip6t_ah_init); 141module_init(ip6t_ah_init);
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index 4f6b84c8f4ab..967bed71d4a8 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -12,6 +12,7 @@
12#include <linux/ipv6.h> 12#include <linux/ipv6.h>
13#include <linux/if_ether.h> 13#include <linux/if_ether.h>
14 14
15#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter_ipv6/ip6_tables.h> 16#include <linux/netfilter_ipv6/ip6_tables.h>
16 17
17MODULE_DESCRIPTION("IPv6 EUI64 address checking match"); 18MODULE_DESCRIPTION("IPv6 EUI64 address checking match");
@@ -61,8 +62,9 @@ match(const struct sk_buff *skb,
61 return 0; 62 return 0;
62} 63}
63 64
64static struct ip6t_match eui64_match = { 65static struct xt_match eui64_match = {
65 .name = "eui64", 66 .name = "eui64",
67 .family = AF_INET6,
66 .match = match, 68 .match = match,
67 .matchsize = sizeof(int), 69 .matchsize = sizeof(int),
68 .hooks = (1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) | 70 .hooks = (1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) |
@@ -72,12 +74,12 @@ static struct ip6t_match eui64_match = {
72 74
73static int __init ip6t_eui64_init(void) 75static int __init ip6t_eui64_init(void)
74{ 76{
75 return ip6t_register_match(&eui64_match); 77 return xt_register_match(&eui64_match);
76} 78}
77 79
78static void __exit ip6t_eui64_fini(void) 80static void __exit ip6t_eui64_fini(void)
79{ 81{
80 ip6t_unregister_match(&eui64_match); 82 xt_unregister_match(&eui64_match);
81} 83}
82 84
83module_init(ip6t_eui64_init); 85module_init(ip6t_eui64_init);
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index cd22eaaccdca..5a5da71321b6 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -14,6 +14,7 @@
14#include <net/checksum.h> 14#include <net/checksum.h>
15#include <net/ipv6.h> 15#include <net/ipv6.h>
16 16
17#include <linux/netfilter/x_tables.h>
17#include <linux/netfilter_ipv6/ip6_tables.h> 18#include <linux/netfilter_ipv6/ip6_tables.h>
18#include <linux/netfilter_ipv6/ip6t_frag.h> 19#include <linux/netfilter_ipv6/ip6t_frag.h>
19 20
@@ -135,8 +136,9 @@ checkentry(const char *tablename,
135 return 1; 136 return 1;
136} 137}
137 138
138static struct ip6t_match frag_match = { 139static struct xt_match frag_match = {
139 .name = "frag", 140 .name = "frag",
141 .family = AF_INET6,
140 .match = match, 142 .match = match,
141 .matchsize = sizeof(struct ip6t_frag), 143 .matchsize = sizeof(struct ip6t_frag),
142 .checkentry = checkentry, 144 .checkentry = checkentry,
@@ -145,12 +147,12 @@ static struct ip6t_match frag_match = {
145 147
146static int __init ip6t_frag_init(void) 148static int __init ip6t_frag_init(void)
147{ 149{
148 return ip6t_register_match(&frag_match); 150 return xt_register_match(&frag_match);
149} 151}
150 152
151static void __exit ip6t_frag_fini(void) 153static void __exit ip6t_frag_fini(void)
152{ 154{
153 ip6t_unregister_match(&frag_match); 155 xt_unregister_match(&frag_match);
154} 156}
155 157
156module_init(ip6t_frag_init); 158module_init(ip6t_frag_init);
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index 3f25babe0440..d2373c7cd354 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -16,6 +16,7 @@
16 16
17#include <asm/byteorder.h> 17#include <asm/byteorder.h>
18 18
19#include <linux/netfilter/x_tables.h>
19#include <linux/netfilter_ipv6/ip6_tables.h> 20#include <linux/netfilter_ipv6/ip6_tables.h>
20#include <linux/netfilter_ipv6/ip6t_opts.h> 21#include <linux/netfilter_ipv6/ip6t_opts.h>
21 22
diff --git a/net/ipv6/netfilter/ip6t_hl.c b/net/ipv6/netfilter/ip6t_hl.c
index 44a729e17c48..601cc1211c62 100644
--- a/net/ipv6/netfilter/ip6t_hl.c
+++ b/net/ipv6/netfilter/ip6t_hl.c
@@ -8,11 +8,12 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/ipv6.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/skbuff.h> 13#include <linux/skbuff.h>
13 14
14#include <linux/netfilter_ipv6/ip6t_hl.h> 15#include <linux/netfilter_ipv6/ip6t_hl.h>
15#include <linux/netfilter_ipv6/ip6_tables.h> 16#include <linux/netfilter/x_tables.h>
16 17
17MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>"); 18MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
18MODULE_DESCRIPTION("IP tables Hop Limit matching module"); 19MODULE_DESCRIPTION("IP tables Hop Limit matching module");
@@ -48,8 +49,9 @@ static int match(const struct sk_buff *skb,
48 return 0; 49 return 0;
49} 50}
50 51
51static struct ip6t_match hl_match = { 52static struct xt_match hl_match = {
52 .name = "hl", 53 .name = "hl",
54 .family = AF_INET6,
53 .match = match, 55 .match = match,
54 .matchsize = sizeof(struct ip6t_hl_info), 56 .matchsize = sizeof(struct ip6t_hl_info),
55 .me = THIS_MODULE, 57 .me = THIS_MODULE,
@@ -57,13 +59,12 @@ static struct ip6t_match hl_match = {
57 59
58static int __init ip6t_hl_init(void) 60static int __init ip6t_hl_init(void)
59{ 61{
60 return ip6t_register_match(&hl_match); 62 return xt_register_match(&hl_match);
61} 63}
62 64
63static void __exit ip6t_hl_fini(void) 65static void __exit ip6t_hl_fini(void)
64{ 66{
65 ip6t_unregister_match(&hl_match); 67 xt_unregister_match(&hl_match);
66
67} 68}
68 69
69module_init(ip6t_hl_init); 70module_init(ip6t_hl_init);
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 3093c398002f..26ac084adefc 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -18,6 +18,7 @@
18#include <net/checksum.h> 18#include <net/checksum.h>
19#include <net/ipv6.h> 19#include <net/ipv6.h>
20 20
21#include <linux/netfilter/x_tables.h>
21#include <linux/netfilter_ipv6/ip6_tables.h> 22#include <linux/netfilter_ipv6/ip6_tables.h>
22#include <linux/netfilter_ipv6/ip6t_ipv6header.h> 23#include <linux/netfilter_ipv6/ip6t_ipv6header.h>
23 24
@@ -140,8 +141,9 @@ ipv6header_checkentry(const char *tablename,
140 return 1; 141 return 1;
141} 142}
142 143
143static struct ip6t_match ip6t_ipv6header_match = { 144static struct xt_match ip6t_ipv6header_match = {
144 .name = "ipv6header", 145 .name = "ipv6header",
146 .family = AF_INET6,
145 .match = &ipv6header_match, 147 .match = &ipv6header_match,
146 .matchsize = sizeof(struct ip6t_ipv6header_info), 148 .matchsize = sizeof(struct ip6t_ipv6header_info),
147 .checkentry = &ipv6header_checkentry, 149 .checkentry = &ipv6header_checkentry,
@@ -151,12 +153,12 @@ static struct ip6t_match ip6t_ipv6header_match = {
151 153
152static int __init ipv6header_init(void) 154static int __init ipv6header_init(void)
153{ 155{
154 return ip6t_register_match(&ip6t_ipv6header_match); 156 return xt_register_match(&ip6t_ipv6header_match);
155} 157}
156 158
157static void __exit ipv6header_exit(void) 159static void __exit ipv6header_exit(void)
158{ 160{
159 ip6t_unregister_match(&ip6t_ipv6header_match); 161 xt_unregister_match(&ip6t_ipv6header_match);
160} 162}
161 163
162module_init(ipv6header_init); 164module_init(ipv6header_init);
diff --git a/net/ipv6/netfilter/ip6t_mh.c b/net/ipv6/netfilter/ip6t_mh.c
new file mode 100644
index 000000000000..2c7efc6a506d
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_mh.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C)2006 USAGI/WIDE Project
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Author:
9 * Masahide NAKAMURA @USAGI <masahide.nakamura.cz@hitachi.com>
10 *
11 * Based on net/netfilter/xt_tcpudp.c
12 *
13 */
14#include <linux/types.h>
15#include <linux/module.h>
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <net/ipv6.h>
19#include <net/mip6.h>
20
21#include <linux/netfilter/x_tables.h>
22#include <linux/netfilter_ipv6/ip6t_mh.h>
23
24MODULE_DESCRIPTION("ip6t_tables match for MH");
25MODULE_LICENSE("GPL");
26
27#ifdef DEBUG_IP_FIREWALL_USER
28#define duprintf(format, args...) printk(format , ## args)
29#else
30#define duprintf(format, args...)
31#endif
32
33/* Returns 1 if the type is matched by the range, 0 otherwise */
34static inline int
35type_match(u_int8_t min, u_int8_t max, u_int8_t type, int invert)
36{
37 int ret;
38
39 ret = (type >= min && type <= max) ^ invert;
40 return ret;
41}
42
43static int
44match(const struct sk_buff *skb,
45 const struct net_device *in,
46 const struct net_device *out,
47 const struct xt_match *match,
48 const void *matchinfo,
49 int offset,
50 unsigned int protoff,
51 int *hotdrop)
52{
53 struct ip6_mh _mh, *mh;
54 const struct ip6t_mh *mhinfo = matchinfo;
55
56 /* Must not be a fragment. */
57 if (offset)
58 return 0;
59
60 mh = skb_header_pointer(skb, protoff, sizeof(_mh), &_mh);
61 if (mh == NULL) {
62 /* We've been asked to examine this packet, and we
63 can't. Hence, no choice but to drop. */
64 duprintf("Dropping evil MH tinygram.\n");
65 *hotdrop = 1;
66 return 0;
67 }
68
69 return type_match(mhinfo->types[0], mhinfo->types[1], mh->ip6mh_type,
70 !!(mhinfo->invflags & IP6T_MH_INV_TYPE));
71}
72
73/* Called when user tries to insert an entry of this type. */
74static int
75mh_checkentry(const char *tablename,
76 const void *entry,
77 const struct xt_match *match,
78 void *matchinfo,
79 unsigned int hook_mask)
80{
81 const struct ip6t_mh *mhinfo = matchinfo;
82
83 /* Must specify no unknown invflags */
84 return !(mhinfo->invflags & ~IP6T_MH_INV_MASK);
85}
86
87static struct xt_match mh_match = {
88 .name = "mh",
89 .family = AF_INET6,
90 .checkentry = mh_checkentry,
91 .match = match,
92 .matchsize = sizeof(struct ip6t_mh),
93 .proto = IPPROTO_MH,
94 .me = THIS_MODULE,
95};
96
97static int __init ip6t_mh_init(void)
98{
99 return xt_register_match(&mh_match);
100}
101
102static void __exit ip6t_mh_fini(void)
103{
104 xt_unregister_match(&mh_match);
105}
106
107module_init(ip6t_mh_init);
108module_exit(ip6t_mh_fini);
diff --git a/net/ipv6/netfilter/ip6t_owner.c b/net/ipv6/netfilter/ip6t_owner.c
index 4eb9bbc4ebc3..43738bba00b5 100644
--- a/net/ipv6/netfilter/ip6t_owner.c
+++ b/net/ipv6/netfilter/ip6t_owner.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/netfilter_ipv6/ip6t_owner.h> 17#include <linux/netfilter_ipv6/ip6t_owner.h>
18#include <linux/netfilter_ipv6/ip6_tables.h> 18#include <linux/netfilter_ipv6/ip6_tables.h>
19#include <linux/netfilter/x_tables.h>
19 20
20MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); 21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
21MODULE_DESCRIPTION("IP6 tables owner matching module"); 22MODULE_DESCRIPTION("IP6 tables owner matching module");
@@ -69,8 +70,9 @@ checkentry(const char *tablename,
69 return 1; 70 return 1;
70} 71}
71 72
72static struct ip6t_match owner_match = { 73static struct xt_match owner_match = {
73 .name = "owner", 74 .name = "owner",
75 .family = AF_INET6,
74 .match = match, 76 .match = match,
75 .matchsize = sizeof(struct ip6t_owner_info), 77 .matchsize = sizeof(struct ip6t_owner_info),
76 .hooks = (1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING), 78 .hooks = (1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING),
@@ -80,12 +82,12 @@ static struct ip6t_match owner_match = {
80 82
81static int __init ip6t_owner_init(void) 83static int __init ip6t_owner_init(void)
82{ 84{
83 return ip6t_register_match(&owner_match); 85 return xt_register_match(&owner_match);
84} 86}
85 87
86static void __exit ip6t_owner_fini(void) 88static void __exit ip6t_owner_fini(void)
87{ 89{
88 ip6t_unregister_match(&owner_match); 90 xt_unregister_match(&owner_match);
89} 91}
90 92
91module_init(ip6t_owner_init); 93module_init(ip6t_owner_init);
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index 54d7d14134fd..81ab00d8c182 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -16,6 +16,7 @@
16 16
17#include <asm/byteorder.h> 17#include <asm/byteorder.h>
18 18
19#include <linux/netfilter/x_tables.h>
19#include <linux/netfilter_ipv6/ip6_tables.h> 20#include <linux/netfilter_ipv6/ip6_tables.h>
20#include <linux/netfilter_ipv6/ip6t_rt.h> 21#include <linux/netfilter_ipv6/ip6t_rt.h>
21 22
@@ -221,8 +222,9 @@ checkentry(const char *tablename,
221 return 1; 222 return 1;
222} 223}
223 224
224static struct ip6t_match rt_match = { 225static struct xt_match rt_match = {
225 .name = "rt", 226 .name = "rt",
227 .family = AF_INET6,
226 .match = match, 228 .match = match,
227 .matchsize = sizeof(struct ip6t_rt), 229 .matchsize = sizeof(struct ip6t_rt),
228 .checkentry = checkentry, 230 .checkentry = checkentry,
@@ -231,12 +233,12 @@ static struct ip6t_match rt_match = {
231 233
232static int __init ip6t_rt_init(void) 234static int __init ip6t_rt_init(void)
233{ 235{
234 return ip6t_register_match(&rt_match); 236 return xt_register_match(&rt_match);
235} 237}
236 238
237static void __exit ip6t_rt_fini(void) 239static void __exit ip6t_rt_fini(void)
238{ 240{
239 ip6t_unregister_match(&rt_match); 241 xt_unregister_match(&rt_match);
240} 242}
241 243
242module_init(ip6t_rt_init); 244module_init(ip6t_rt_init);
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 2fc07c74decf..112a21d0c6da 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -19,25 +19,6 @@ MODULE_DESCRIPTION("ip6tables filter table");
19 19
20#define FILTER_VALID_HOOKS ((1 << NF_IP6_LOCAL_IN) | (1 << NF_IP6_FORWARD) | (1 << NF_IP6_LOCAL_OUT)) 20#define FILTER_VALID_HOOKS ((1 << NF_IP6_LOCAL_IN) | (1 << NF_IP6_FORWARD) | (1 << NF_IP6_LOCAL_OUT))
21 21
22/* Standard entry. */
23struct ip6t_standard
24{
25 struct ip6t_entry entry;
26 struct ip6t_standard_target target;
27};
28
29struct ip6t_error_target
30{
31 struct ip6t_entry_target target;
32 char errorname[IP6T_FUNCTION_MAXNAMELEN];
33};
34
35struct ip6t_error
36{
37 struct ip6t_entry entry;
38 struct ip6t_error_target target;
39};
40
41static struct 22static struct
42{ 23{
43 struct ip6t_replace repl; 24 struct ip6t_replace repl;
@@ -92,7 +73,7 @@ static struct
92 } 73 }
93}; 74};
94 75
95static struct ip6t_table packet_filter = { 76static struct xt_table packet_filter = {
96 .name = "filter", 77 .name = "filter",
97 .valid_hooks = FILTER_VALID_HOOKS, 78 .valid_hooks = FILTER_VALID_HOOKS,
98 .lock = RW_LOCK_UNLOCKED, 79 .lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 6250e86a6ddc..5f5aa0e51478 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -29,25 +29,6 @@ MODULE_DESCRIPTION("ip6tables mangle table");
29#define DEBUGP(x, args...) 29#define DEBUGP(x, args...)
30#endif 30#endif
31 31
32/* Standard entry. */
33struct ip6t_standard
34{
35 struct ip6t_entry entry;
36 struct ip6t_standard_target target;
37};
38
39struct ip6t_error_target
40{
41 struct ip6t_entry_target target;
42 char errorname[IP6T_FUNCTION_MAXNAMELEN];
43};
44
45struct ip6t_error
46{
47 struct ip6t_entry entry;
48 struct ip6t_error_target target;
49};
50
51static struct 32static struct
52{ 33{
53 struct ip6t_replace repl; 34 struct ip6t_replace repl;
@@ -122,7 +103,7 @@ static struct
122 } 103 }
123}; 104};
124 105
125static struct ip6t_table packet_mangler = { 106static struct xt_table packet_mangler = {
126 .name = "mangle", 107 .name = "mangle",
127 .valid_hooks = MANGLE_VALID_HOOKS, 108 .valid_hooks = MANGLE_VALID_HOOKS,
128 .lock = RW_LOCK_UNLOCKED, 109 .lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index b4154da575c0..277bf34638b4 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -14,25 +14,6 @@
14#define DEBUGP(x, args...) 14#define DEBUGP(x, args...)
15#endif 15#endif
16 16
17/* Standard entry. */
18struct ip6t_standard
19{
20 struct ip6t_entry entry;
21 struct ip6t_standard_target target;
22};
23
24struct ip6t_error_target
25{
26 struct ip6t_entry_target target;
27 char errorname[IP6T_FUNCTION_MAXNAMELEN];
28};
29
30struct ip6t_error
31{
32 struct ip6t_entry entry;
33 struct ip6t_error_target target;
34};
35
36static struct 17static struct
37{ 18{
38 struct ip6t_replace repl; 19 struct ip6t_replace repl;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4ae1b19ada5d..c2d8059e754e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -815,7 +815,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
815 if (final_p) 815 if (final_p)
816 ipv6_addr_copy(&fl.fl6_dst, final_p); 816 ipv6_addr_copy(&fl.fl6_dst, final_p);
817 817
818 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 818 if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
819 goto out; 819 goto out;
820 820
821 if (hlimit < 0) { 821 if (hlimit < 0) {
@@ -1094,10 +1094,19 @@ static void rawv6_close(struct sock *sk, long timeout)
1094 1094
1095static int rawv6_init_sk(struct sock *sk) 1095static int rawv6_init_sk(struct sock *sk)
1096{ 1096{
1097 if (inet_sk(sk)->num == IPPROTO_ICMPV6) { 1097 struct raw6_sock *rp = raw6_sk(sk);
1098 struct raw6_sock *rp = raw6_sk(sk); 1098
1099 switch (inet_sk(sk)->num) {
1100 case IPPROTO_ICMPV6:
1099 rp->checksum = 1; 1101 rp->checksum = 1;
1100 rp->offset = 2; 1102 rp->offset = 2;
1103 break;
1104 case IPPROTO_MH:
1105 rp->checksum = 1;
1106 rp->offset = 4;
1107 break;
1108 default:
1109 break;
1101 } 1110 }
1102 return(0); 1111 return(0);
1103} 1112}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5f0043c30b70..19c906f6efa1 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -311,12 +311,21 @@ static inline void rt6_probe(struct rt6_info *rt)
311static int inline rt6_check_dev(struct rt6_info *rt, int oif) 311static int inline rt6_check_dev(struct rt6_info *rt, int oif)
312{ 312{
313 struct net_device *dev = rt->rt6i_dev; 313 struct net_device *dev = rt->rt6i_dev;
314 if (!oif || dev->ifindex == oif) 314 int ret = 0;
315
316 if (!oif)
315 return 2; 317 return 2;
316 if ((dev->flags & IFF_LOOPBACK) && 318 if (dev->flags & IFF_LOOPBACK) {
317 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif) 319 if (!WARN_ON(rt->rt6i_idev == NULL) &&
318 return 1; 320 rt->rt6i_idev->dev->ifindex == oif)
319 return 0; 321 ret = 1;
322 else
323 return 0;
324 }
325 if (dev->ifindex == oif)
326 return 2;
327
328 return ret;
320} 329}
321 330
322static int inline rt6_check_neigh(struct rt6_info *rt) 331static int inline rt6_check_neigh(struct rt6_info *rt)
@@ -2040,7 +2049,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2040 2049
2041 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags); 2050 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2042 if (nlh == NULL) 2051 if (nlh == NULL)
2043 return -ENOBUFS; 2052 return -EMSGSIZE;
2044 2053
2045 rtm = nlmsg_data(nlh); 2054 rtm = nlmsg_data(nlh);
2046 rtm->rtm_family = AF_INET6; 2055 rtm->rtm_family = AF_INET6;
@@ -2111,7 +2120,8 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2111 return nlmsg_end(skb, nlh); 2120 return nlmsg_end(skb, nlh);
2112 2121
2113nla_put_failure: 2122nla_put_failure:
2114 return nlmsg_cancel(skb, nlh); 2123 nlmsg_cancel(skb, nlh);
2124 return -EMSGSIZE;
2115} 2125}
2116 2126
2117int rt6_dump_route(struct rt6_info *rt, void *p_arg) 2127int rt6_dump_route(struct rt6_info *rt, void *p_arg)
@@ -2222,9 +2232,12 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2222 goto errout; 2232 goto errout;
2223 2233
2224 err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0); 2234 err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0);
2225 /* failure implies BUG in rt6_nlmsg_size() */ 2235 if (err < 0) {
2226 BUG_ON(err < 0); 2236 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2227 2237 WARN_ON(err == -EMSGSIZE);
2238 kfree_skb(skb);
2239 goto errout;
2240 }
2228 err = rtnl_notify(skb, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any()); 2241 err = rtnl_notify(skb, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any());
2229errout: 2242errout:
2230 if (err < 0) 2243 if (err < 0)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 77b7b0911438..47cfeadac6dd 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -686,7 +686,8 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
686 goto done; 686 goto done;
687 dev = t->dev; 687 dev = t->dev;
688 } 688 }
689 err = unregister_netdevice(dev); 689 unregister_netdevice(dev);
690 err = 0;
690 break; 691 break;
691 692
692 default: 693 default:
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c25e930c2c69..dcb7b00a737d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -265,7 +265,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
265 if (final_p) 265 if (final_p)
266 ipv6_addr_copy(&fl.fl6_dst, final_p); 266 ipv6_addr_copy(&fl.fl6_dst, final_p);
267 267
268 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 268 if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
269 goto failure; 269 goto failure;
270 270
271 if (saddr == NULL) { 271 if (saddr == NULL) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f52a5c3cc0a3..15e5195549cb 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -736,7 +736,7 @@ do_udp_sendmsg:
736 if (final_p) 736 if (final_p)
737 ipv6_addr_copy(&fl.fl6_dst, final_p); 737 ipv6_addr_copy(&fl.fl6_dst, final_p);
738 738
739 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 739 if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
740 goto out; 740 goto out;
741 741
742 if (hlimit < 0) { 742 if (hlimit < 0) {
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 5e7d8a7d6414..0bc866c0d83c 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -25,6 +25,12 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
25 IP6_ECN_set_ce(inner_iph); 25 IP6_ECN_set_ce(inner_iph);
26} 26}
27 27
28static inline void ip6ip_ecn_decapsulate(struct sk_buff *skb)
29{
30 if (INET_ECN_is_ce(ipv6_get_dsfield(skb->nh.ipv6h)))
31 IP_ECN_set_ce(skb->h.ipiph);
32}
33
28/* Add encapsulation header. 34/* Add encapsulation header.
29 * 35 *
30 * The top IP header will be constructed per RFC 2401. The following fields 36 * The top IP header will be constructed per RFC 2401. The following fields
@@ -40,6 +46,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
40static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) 46static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
41{ 47{
42 struct dst_entry *dst = skb->dst; 48 struct dst_entry *dst = skb->dst;
49 struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
43 struct ipv6hdr *iph, *top_iph; 50 struct ipv6hdr *iph, *top_iph;
44 int dsfield; 51 int dsfield;
45 52
@@ -52,16 +59,24 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
52 skb->h.ipv6h = top_iph + 1; 59 skb->h.ipv6h = top_iph + 1;
53 60
54 top_iph->version = 6; 61 top_iph->version = 6;
55 top_iph->priority = iph->priority; 62 if (xdst->route->ops->family == AF_INET6) {
56 top_iph->flow_lbl[0] = iph->flow_lbl[0]; 63 top_iph->priority = iph->priority;
57 top_iph->flow_lbl[1] = iph->flow_lbl[1]; 64 top_iph->flow_lbl[0] = iph->flow_lbl[0];
58 top_iph->flow_lbl[2] = iph->flow_lbl[2]; 65 top_iph->flow_lbl[1] = iph->flow_lbl[1];
66 top_iph->flow_lbl[2] = iph->flow_lbl[2];
67 top_iph->nexthdr = IPPROTO_IPV6;
68 } else {
69 top_iph->priority = 0;
70 top_iph->flow_lbl[0] = 0;
71 top_iph->flow_lbl[1] = 0;
72 top_iph->flow_lbl[2] = 0;
73 top_iph->nexthdr = IPPROTO_IPIP;
74 }
59 dsfield = ipv6_get_dsfield(top_iph); 75 dsfield = ipv6_get_dsfield(top_iph);
60 dsfield = INET_ECN_encapsulate(dsfield, dsfield); 76 dsfield = INET_ECN_encapsulate(dsfield, dsfield);
61 if (x->props.flags & XFRM_STATE_NOECN) 77 if (x->props.flags & XFRM_STATE_NOECN)
62 dsfield &= ~INET_ECN_MASK; 78 dsfield &= ~INET_ECN_MASK;
63 ipv6_change_dsfield(top_iph, 0, dsfield); 79 ipv6_change_dsfield(top_iph, 0, dsfield);
64 top_iph->nexthdr = IPPROTO_IPV6;
65 top_iph->hop_limit = dst_metric(dst->child, RTAX_HOPLIMIT); 80 top_iph->hop_limit = dst_metric(dst->child, RTAX_HOPLIMIT);
66 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); 81 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
67 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); 82 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
@@ -72,7 +87,8 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
72{ 87{
73 int err = -EINVAL; 88 int err = -EINVAL;
74 89
75 if (skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPV6) 90 if (skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPV6
91 && skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPIP)
76 goto out; 92 goto out;
77 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 93 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
78 goto out; 94 goto out;
@@ -81,10 +97,16 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
81 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 97 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
82 goto out; 98 goto out;
83 99
84 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 100 if (skb->nh.raw[IP6CB(skb)->nhoff] == IPPROTO_IPV6) {
85 ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h); 101 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
86 if (!(x->props.flags & XFRM_STATE_NOECN)) 102 ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h);
87 ipip6_ecn_decapsulate(skb); 103 if (!(x->props.flags & XFRM_STATE_NOECN))
104 ipip6_ecn_decapsulate(skb);
105 } else {
106 if (!(x->props.flags & XFRM_STATE_NOECN))
107 ip6ip_ecn_decapsulate(skb);
108 skb->protocol = htons(ETH_P_IP);
109 }
88 skb->mac.raw = memmove(skb->data - skb->mac_len, 110 skb->mac.raw = memmove(skb->data - skb->mac_len,
89 skb->mac.raw, skb->mac_len); 111 skb->mac.raw, skb->mac_len);
90 skb->nh.raw = skb->data; 112 skb->nh.raw = skb->data;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8dffd4daae9c..59480e92177d 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -131,13 +131,11 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
131 struct dst_entry *dst, *dst_prev; 131 struct dst_entry *dst, *dst_prev;
132 struct rt6_info *rt0 = (struct rt6_info*)(*dst_p); 132 struct rt6_info *rt0 = (struct rt6_info*)(*dst_p);
133 struct rt6_info *rt = rt0; 133 struct rt6_info *rt = rt0;
134 struct in6_addr *remote = &fl->fl6_dst;
135 struct in6_addr *local = &fl->fl6_src;
136 struct flowi fl_tunnel = { 134 struct flowi fl_tunnel = {
137 .nl_u = { 135 .nl_u = {
138 .ip6_u = { 136 .ip6_u = {
139 .saddr = *local, 137 .saddr = fl->fl6_src,
140 .daddr = *remote 138 .daddr = fl->fl6_dst,
141 } 139 }
142 } 140 }
143 }; 141 };
@@ -153,7 +151,6 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
153 for (i = 0; i < nx; i++) { 151 for (i = 0; i < nx; i++) {
154 struct dst_entry *dst1 = dst_alloc(&xfrm6_dst_ops); 152 struct dst_entry *dst1 = dst_alloc(&xfrm6_dst_ops);
155 struct xfrm_dst *xdst; 153 struct xfrm_dst *xdst;
156 int tunnel = 0;
157 154
158 if (unlikely(dst1 == NULL)) { 155 if (unlikely(dst1 == NULL)) {
159 err = -ENOBUFS; 156 err = -ENOBUFS;
@@ -177,19 +174,27 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
177 174
178 dst1->next = dst_prev; 175 dst1->next = dst_prev;
179 dst_prev = dst1; 176 dst_prev = dst1;
180 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 177
181 remote = __xfrm6_bundle_addr_remote(xfrm[i], remote);
182 local = __xfrm6_bundle_addr_local(xfrm[i], local);
183 tunnel = 1;
184 }
185 __xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]); 178 __xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]);
186 trailer_len += xfrm[i]->props.trailer_len; 179 trailer_len += xfrm[i]->props.trailer_len;
187 180
188 if (tunnel) { 181 if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) {
189 ipv6_addr_copy(&fl_tunnel.fl6_dst, remote); 182 unsigned short encap_family = xfrm[i]->props.family;
190 ipv6_addr_copy(&fl_tunnel.fl6_src, local); 183 switch(encap_family) {
184 case AF_INET:
185 fl_tunnel.fl4_dst = xfrm[i]->id.daddr.a4;
186 fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4;
187 break;
188 case AF_INET6:
189 ipv6_addr_copy(&fl_tunnel.fl6_dst, (struct in6_addr*)&xfrm[i]->id.daddr.a6);
190 ipv6_addr_copy(&fl_tunnel.fl6_src, (struct in6_addr*)&xfrm[i]->props.saddr.a6);
191 break;
192 default:
193 BUG_ON(1);
194 }
195
191 err = xfrm_dst_lookup((struct xfrm_dst **) &rt, 196 err = xfrm_dst_lookup((struct xfrm_dst **) &rt,
192 &fl_tunnel, AF_INET6); 197 &fl_tunnel, encap_family);
193 if (err) 198 if (err)
194 goto error; 199 goto error;
195 } else 200 } else
@@ -208,6 +213,7 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
208 i = 0; 213 i = 0;
209 for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) { 214 for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) {
210 struct xfrm_dst *x = (struct xfrm_dst*)dst_prev; 215 struct xfrm_dst *x = (struct xfrm_dst*)dst_prev;
216 struct xfrm_state_afinfo *afinfo;
211 217
212 dst_prev->xfrm = xfrm[i++]; 218 dst_prev->xfrm = xfrm[i++];
213 dst_prev->dev = rt->u.dst.dev; 219 dst_prev->dev = rt->u.dst.dev;
@@ -224,7 +230,17 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
224 /* Copy neighbour for reachability confirmation */ 230 /* Copy neighbour for reachability confirmation */
225 dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour); 231 dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
226 dst_prev->input = rt->u.dst.input; 232 dst_prev->input = rt->u.dst.input;
227 dst_prev->output = xfrm6_output; 233 /* XXX: When IPv4 is implemented as module and can be unloaded,
234 * we should manage reference to xfrm4_output in afinfo->output.
235 * Miyazawa
236 */
237 afinfo = xfrm_state_get_afinfo(dst_prev->xfrm->props.family);
238 if (!afinfo) {
239 dst = *dst_p;
240 goto error;
241 };
242 dst_prev->output = afinfo->output;
243 xfrm_state_put_afinfo(afinfo);
228 /* Sheit... I remember I did this right. Apparently, 244 /* Sheit... I remember I did this right. Apparently,
229 * it was magically lost, so this code needs audit */ 245 * it was magically lost, so this code needs audit */
230 x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL); 246 x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 9ddaa9d41539..60ad5f074e0a 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -171,6 +171,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
171 .init_tempsel = __xfrm6_init_tempsel, 171 .init_tempsel = __xfrm6_init_tempsel,
172 .tmpl_sort = __xfrm6_tmpl_sort, 172 .tmpl_sort = __xfrm6_tmpl_sort,
173 .state_sort = __xfrm6_state_sort, 173 .state_sort = __xfrm6_state_sort,
174 .output = xfrm6_output,
174}; 175};
175 176
176void __init xfrm6_state_init(void) 177void __init xfrm6_state_init(void)
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 76c661566dfd..89f283c51dff 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -2035,19 +2035,27 @@ static void __exit ipx_proto_finito(void)
2035 2035
2036 ipxitf_cleanup(); 2036 ipxitf_cleanup();
2037 2037
2038 unregister_snap_client(pSNAP_datalink); 2038 if (pSNAP_datalink) {
2039 pSNAP_datalink = NULL; 2039 unregister_snap_client(pSNAP_datalink);
2040 pSNAP_datalink = NULL;
2041 }
2040 2042
2041 unregister_8022_client(p8022_datalink); 2043 if (p8022_datalink) {
2042 p8022_datalink = NULL; 2044 unregister_8022_client(p8022_datalink);
2045 p8022_datalink = NULL;
2046 }
2043 2047
2044 dev_remove_pack(&ipx_8023_packet_type); 2048 dev_remove_pack(&ipx_8023_packet_type);
2045 destroy_8023_client(p8023_datalink); 2049 if (p8023_datalink) {
2046 p8023_datalink = NULL; 2050 destroy_8023_client(p8023_datalink);
2051 p8023_datalink = NULL;
2052 }
2047 2053
2048 dev_remove_pack(&ipx_dix_packet_type); 2054 dev_remove_pack(&ipx_dix_packet_type);
2049 destroy_EII_client(pEII_datalink); 2055 if (pEII_datalink) {
2050 pEII_datalink = NULL; 2056 destroy_EII_client(pEII_datalink);
2057 pEII_datalink = NULL;
2058 }
2051 2059
2052 proto_unregister(&ipx_proto); 2060 proto_unregister(&ipx_proto);
2053 sock_unregister(ipx_family_ops.family); 2061 sock_unregister(ipx_family_ops.family);
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index b1ee99a59c0c..2a571b43ebec 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -91,6 +91,12 @@ struct ias_object *irias_new_object( char *name, int id)
91 91
92 obj->magic = IAS_OBJECT_MAGIC; 92 obj->magic = IAS_OBJECT_MAGIC;
93 obj->name = strndup(name, IAS_MAX_CLASSNAME); 93 obj->name = strndup(name, IAS_MAX_CLASSNAME);
94 if (!obj->name) {
95 IRDA_WARNING("%s(), Unable to allocate name!\n",
96 __FUNCTION__);
97 kfree(obj);
98 return NULL;
99 }
94 obj->id = id; 100 obj->id = id;
95 101
96 /* Locking notes : the attrib spinlock has lower precendence 102 /* Locking notes : the attrib spinlock has lower precendence
@@ -101,6 +107,7 @@ struct ias_object *irias_new_object( char *name, int id)
101 if (obj->attribs == NULL) { 107 if (obj->attribs == NULL) {
102 IRDA_WARNING("%s(), Unable to allocate attribs!\n", 108 IRDA_WARNING("%s(), Unable to allocate attribs!\n",
103 __FUNCTION__); 109 __FUNCTION__);
110 kfree(obj->name);
104 kfree(obj); 111 kfree(obj);
105 return NULL; 112 return NULL;
106 } 113 }
@@ -357,6 +364,15 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
357 364
358 /* Insert value */ 365 /* Insert value */
359 attrib->value = irias_new_integer_value(value); 366 attrib->value = irias_new_integer_value(value);
367 if (!attrib->name || !attrib->value) {
368 IRDA_WARNING("%s: Unable to allocate attribute!\n",
369 __FUNCTION__);
370 if (attrib->value)
371 irias_delete_value(attrib->value);
372 kfree(attrib->name);
373 kfree(attrib);
374 return;
375 }
360 376
361 irias_add_attrib(obj, attrib, owner); 377 irias_add_attrib(obj, attrib, owner);
362} 378}
@@ -391,6 +407,15 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
391 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 407 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
392 408
393 attrib->value = irias_new_octseq_value( octets, len); 409 attrib->value = irias_new_octseq_value( octets, len);
410 if (!attrib->name || !attrib->value) {
411 IRDA_WARNING("%s: Unable to allocate attribute!\n",
412 __FUNCTION__);
413 if (attrib->value)
414 irias_delete_value(attrib->value);
415 kfree(attrib->name);
416 kfree(attrib);
417 return;
418 }
394 419
395 irias_add_attrib(obj, attrib, owner); 420 irias_add_attrib(obj, attrib, owner);
396} 421}
@@ -424,6 +449,15 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
424 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 449 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
425 450
426 attrib->value = irias_new_string_value(value); 451 attrib->value = irias_new_string_value(value);
452 if (!attrib->name || !attrib->value) {
453 IRDA_WARNING("%s: Unable to allocate attribute!\n",
454 __FUNCTION__);
455 if (attrib->value)
456 irias_delete_value(attrib->value);
457 kfree(attrib->name);
458 kfree(attrib);
459 return;
460 }
427 461
428 irias_add_attrib(obj, attrib, owner); 462 irias_add_attrib(obj, attrib, owner);
429} 463}
@@ -473,6 +507,12 @@ struct ias_value *irias_new_string_value(char *string)
473 value->type = IAS_STRING; 507 value->type = IAS_STRING;
474 value->charset = CS_ASCII; 508 value->charset = CS_ASCII;
475 value->t.string = strndup(string, IAS_MAX_STRING); 509 value->t.string = strndup(string, IAS_MAX_STRING);
510 if (!value->t.string) {
511 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
512 kfree(value);
513 return NULL;
514 }
515
476 value->len = strlen(value->t.string); 516 value->len = strlen(value->t.string);
477 517
478 return value; 518 return value;
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 2bb04ac09329..310776dd6109 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -144,12 +144,18 @@ static int __init irlan_init(void)
144 /* Register with IrLMP as a client */ 144 /* Register with IrLMP as a client */
145 ckey = irlmp_register_client(hints, &irlan_client_discovery_indication, 145 ckey = irlmp_register_client(hints, &irlan_client_discovery_indication,
146 NULL, NULL); 146 NULL, NULL);
147 147 if (!ckey)
148 goto err_ckey;
149
148 /* Register with IrLMP as a service */ 150 /* Register with IrLMP as a service */
149 skey = irlmp_register_service(hints); 151 skey = irlmp_register_service(hints);
152 if (!skey)
153 goto err_skey;
150 154
151 /* Start the master IrLAN instance (the only one for now) */ 155 /* Start the master IrLAN instance (the only one for now) */
152 new = irlan_open(DEV_ADDR_ANY, DEV_ADDR_ANY); 156 new = irlan_open(DEV_ADDR_ANY, DEV_ADDR_ANY);
157 if (!new)
158 goto err_open;
153 159
154 /* The master will only open its (listen) control TSAP */ 160 /* The master will only open its (listen) control TSAP */
155 irlan_provider_open_ctrl_tsap(new); 161 irlan_provider_open_ctrl_tsap(new);
@@ -158,6 +164,17 @@ static int __init irlan_init(void)
158 irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS); 164 irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
159 165
160 return 0; 166 return 0;
167
168err_open:
169 irlmp_unregister_service(skey);
170err_skey:
171 irlmp_unregister_client(ckey);
172err_ckey:
173#ifdef CONFIG_PROC_FS
174 remove_proc_entry("irlan", proc_irda);
175#endif /* CONFIG_PROC_FS */
176
177 return -ENOMEM;
161} 178}
162 179
163static void __exit irlan_cleanup(void) 180static void __exit irlan_cleanup(void)
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
new file mode 100644
index 000000000000..f8fcc3d10327
--- /dev/null
+++ b/net/iucv/Kconfig
@@ -0,0 +1,15 @@
1config IUCV
2 tristate "IUCV support (VM only)"
3 depends on S390
4 help
5 Select this option if you want to use inter-user communication under
6 VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast
7 communication link between VM guests.
8
9config AFIUCV
10 tristate "AF_IUCV support (VM only)"
11 depends on IUCV
12 help
13 Select this option if you want to use inter-user communication under
14 VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast
15 communication link between VM guests.
diff --git a/net/iucv/Makefile b/net/iucv/Makefile
new file mode 100644
index 000000000000..7bfdc8532675
--- /dev/null
+++ b/net/iucv/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for IUCV
3#
4
5obj-$(CONFIG_IUCV) += iucv.o
6obj-$(CONFIG_AFIUCV) += af_iucv.o
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
new file mode 100644
index 000000000000..acc94214bde6
--- /dev/null
+++ b/net/iucv/af_iucv.c
@@ -0,0 +1,1077 @@
1/*
2 * linux/net/iucv/af_iucv.c
3 *
4 * IUCV protocol stack for Linux on zSeries
5 *
6 * Copyright 2006 IBM Corporation
7 *
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/list.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/skbuff.h>
19#include <linux/init.h>
20#include <linux/poll.h>
21#include <net/sock.h>
22#include <asm/ebcdic.h>
23#include <asm/cpcmd.h>
24#include <linux/kmod.h>
25
26#include <net/iucv/iucv.h>
27#include <net/iucv/af_iucv.h>
28
29#define CONFIG_IUCV_SOCK_DEBUG 1
30
31#define IPRMDATA 0x80
32#define VERSION "1.0"
33
34static char iucv_userid[80];
35
36static struct proto_ops iucv_sock_ops;
37
38static struct proto iucv_proto = {
39 .name = "AF_IUCV",
40 .owner = THIS_MODULE,
41 .obj_size = sizeof(struct iucv_sock),
42};
43
44/* Call Back functions */
45static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
46static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
47static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
48static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
49static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
50
51static struct iucv_sock_list iucv_sk_list = {
52 .lock = RW_LOCK_UNLOCKED,
53 .autobind_name = ATOMIC_INIT(0)
54};
55
56static struct iucv_handler af_iucv_handler = {
57 .path_pending = iucv_callback_connreq,
58 .path_complete = iucv_callback_connack,
59 .path_severed = iucv_callback_connrej,
60 .message_pending = iucv_callback_rx,
61 .message_complete = iucv_callback_txdone
62};
63
64static inline void high_nmcpy(unsigned char *dst, char *src)
65{
66 memcpy(dst, src, 8);
67}
68
69static inline void low_nmcpy(unsigned char *dst, char *src)
70{
71 memcpy(&dst[8], src, 8);
72}
73
74/* Timers */
75static void iucv_sock_timeout(unsigned long arg)
76{
77 struct sock *sk = (struct sock *)arg;
78
79 bh_lock_sock(sk);
80 sk->sk_err = ETIMEDOUT;
81 sk->sk_state_change(sk);
82 bh_unlock_sock(sk);
83
84 iucv_sock_kill(sk);
85 sock_put(sk);
86}
87
88static void iucv_sock_clear_timer(struct sock *sk)
89{
90 sk_stop_timer(sk, &sk->sk_timer);
91}
92
93static void iucv_sock_init_timer(struct sock *sk)
94{
95 init_timer(&sk->sk_timer);
96 sk->sk_timer.function = iucv_sock_timeout;
97 sk->sk_timer.data = (unsigned long)sk;
98}
99
100static struct sock *__iucv_get_sock_by_name(char *nm)
101{
102 struct sock *sk;
103 struct hlist_node *node;
104
105 sk_for_each(sk, node, &iucv_sk_list.head)
106 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
107 return sk;
108
109 return NULL;
110}
111
112static void iucv_sock_destruct(struct sock *sk)
113{
114 skb_queue_purge(&sk->sk_receive_queue);
115 skb_queue_purge(&sk->sk_write_queue);
116}
117
118/* Cleanup Listen */
119static void iucv_sock_cleanup_listen(struct sock *parent)
120{
121 struct sock *sk;
122
123 /* Close non-accepted connections */
124 while ((sk = iucv_accept_dequeue(parent, NULL))) {
125 iucv_sock_close(sk);
126 iucv_sock_kill(sk);
127 }
128
129 parent->sk_state = IUCV_CLOSED;
130 sock_set_flag(parent, SOCK_ZAPPED);
131}
132
133/* Kill socket */
134static void iucv_sock_kill(struct sock *sk)
135{
136 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
137 return;
138
139 iucv_sock_unlink(&iucv_sk_list, sk);
140 sock_set_flag(sk, SOCK_DEAD);
141 sock_put(sk);
142}
143
144/* Close an IUCV socket */
145static void iucv_sock_close(struct sock *sk)
146{
147 unsigned char user_data[16];
148 struct iucv_sock *iucv = iucv_sk(sk);
149 int err;
150
151 iucv_sock_clear_timer(sk);
152 lock_sock(sk);
153
154 switch(sk->sk_state) {
155 case IUCV_LISTEN:
156 iucv_sock_cleanup_listen(sk);
157 break;
158
159 case IUCV_CONNECTED:
160 case IUCV_DISCONN:
161 err = 0;
162 if (iucv->path) {
163 low_nmcpy(user_data, iucv->src_name);
164 high_nmcpy(user_data, iucv->dst_name);
165 ASCEBC(user_data, sizeof(user_data));
166 err = iucv_path_sever(iucv->path, user_data);
167 iucv_path_free(iucv->path);
168 iucv->path = NULL;
169 }
170
171 sk->sk_state = IUCV_CLOSED;
172 sk->sk_state_change(sk);
173 sk->sk_err = ECONNRESET;
174 sk->sk_state_change(sk);
175
176 skb_queue_purge(&iucv->send_skb_q);
177
178 sock_set_flag(sk, SOCK_ZAPPED);
179 break;
180
181 default:
182 sock_set_flag(sk, SOCK_ZAPPED);
183 break;
184 };
185
186 release_sock(sk);
187 iucv_sock_kill(sk);
188}
189
190static void iucv_sock_init(struct sock *sk, struct sock *parent)
191{
192 if (parent)
193 sk->sk_type = parent->sk_type;
194}
195
196static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
197{
198 struct sock *sk;
199
200 sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1);
201 if (!sk)
202 return NULL;
203
204 sock_init_data(sock, sk);
205 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
206 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
207 iucv_sk(sk)->send_tag = 0;
208
209 sk->sk_destruct = iucv_sock_destruct;
210 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
211 sk->sk_allocation = GFP_DMA;
212
213 sock_reset_flag(sk, SOCK_ZAPPED);
214
215 sk->sk_protocol = proto;
216 sk->sk_state = IUCV_OPEN;
217
218 iucv_sock_init_timer(sk);
219
220 iucv_sock_link(&iucv_sk_list, sk);
221 return sk;
222}
223
224/* Create an IUCV socket */
225static int iucv_sock_create(struct socket *sock, int protocol)
226{
227 struct sock *sk;
228
229 if (sock->type != SOCK_STREAM)
230 return -ESOCKTNOSUPPORT;
231
232 sock->state = SS_UNCONNECTED;
233 sock->ops = &iucv_sock_ops;
234
235 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
236 if (!sk)
237 return -ENOMEM;
238
239 iucv_sock_init(sk, NULL);
240
241 return 0;
242}
243
244void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
245{
246 write_lock_bh(&l->lock);
247 sk_add_node(sk, &l->head);
248 write_unlock_bh(&l->lock);
249}
250
251void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
252{
253 write_lock_bh(&l->lock);
254 sk_del_node_init(sk);
255 write_unlock_bh(&l->lock);
256}
257
258void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
259{
260 sock_hold(sk);
261 list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q);
262 iucv_sk(sk)->parent = parent;
263 parent->sk_ack_backlog++;
264}
265
266void iucv_accept_unlink(struct sock *sk)
267{
268 list_del_init(&iucv_sk(sk)->accept_q);
269 iucv_sk(sk)->parent->sk_ack_backlog--;
270 iucv_sk(sk)->parent = NULL;
271 sock_put(sk);
272}
273
274struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
275{
276 struct iucv_sock *isk, *n;
277 struct sock *sk;
278
279 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
280 sk = (struct sock *) isk;
281 lock_sock(sk);
282
283 if (sk->sk_state == IUCV_CLOSED) {
284 release_sock(sk);
285 iucv_accept_unlink(sk);
286 continue;
287 }
288
289 if (sk->sk_state == IUCV_CONNECTED ||
290 sk->sk_state == IUCV_SEVERED ||
291 !newsock) {
292 iucv_accept_unlink(sk);
293 if (newsock)
294 sock_graft(sk, newsock);
295
296 if (sk->sk_state == IUCV_SEVERED)
297 sk->sk_state = IUCV_DISCONN;
298
299 release_sock(sk);
300 return sk;
301 }
302
303 release_sock(sk);
304 }
305 return NULL;
306}
307
308int iucv_sock_wait_state(struct sock *sk, int state, int state2,
309 unsigned long timeo)
310{
311 DECLARE_WAITQUEUE(wait, current);
312 int err = 0;
313
314 add_wait_queue(sk->sk_sleep, &wait);
315 while (sk->sk_state != state && sk->sk_state != state2) {
316 set_current_state(TASK_INTERRUPTIBLE);
317
318 if (!timeo) {
319 err = -EAGAIN;
320 break;
321 }
322
323 if (signal_pending(current)) {
324 err = sock_intr_errno(timeo);
325 break;
326 }
327
328 release_sock(sk);
329 timeo = schedule_timeout(timeo);
330 lock_sock(sk);
331
332 err = sock_error(sk);
333 if (err)
334 break;
335 }
336 set_current_state(TASK_RUNNING);
337 remove_wait_queue(sk->sk_sleep, &wait);
338 return err;
339}
340
341/* Bind an unbound socket */
342static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
343 int addr_len)
344{
345 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
346 struct sock *sk = sock->sk;
347 struct iucv_sock *iucv;
348 int err;
349
350 /* Verify the input sockaddr */
351 if (!addr || addr->sa_family != AF_IUCV)
352 return -EINVAL;
353
354 lock_sock(sk);
355 if (sk->sk_state != IUCV_OPEN) {
356 err = -EBADFD;
357 goto done;
358 }
359
360 write_lock_bh(&iucv_sk_list.lock);
361
362 iucv = iucv_sk(sk);
363 if (__iucv_get_sock_by_name(sa->siucv_name)) {
364 err = -EADDRINUSE;
365 goto done_unlock;
366 }
367 if (iucv->path) {
368 err = 0;
369 goto done_unlock;
370 }
371
372 /* Bind the socket */
373 memcpy(iucv->src_name, sa->siucv_name, 8);
374
375 /* Copy the user id */
376 memcpy(iucv->src_user_id, iucv_userid, 8);
377 sk->sk_state = IUCV_BOUND;
378 err = 0;
379
380done_unlock:
381 /* Release the socket list lock */
382 write_unlock_bh(&iucv_sk_list.lock);
383done:
384 release_sock(sk);
385 return err;
386}
387
388/* Automatically bind an unbound socket */
389static int iucv_sock_autobind(struct sock *sk)
390{
391 struct iucv_sock *iucv = iucv_sk(sk);
392 char query_buffer[80];
393 char name[12];
394 int err = 0;
395
396 /* Set the userid and name */
397 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
398 if (unlikely(err))
399 return -EPROTO;
400
401 memcpy(iucv->src_user_id, query_buffer, 8);
402
403 write_lock_bh(&iucv_sk_list.lock);
404
405 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
406 while (__iucv_get_sock_by_name(name)) {
407 sprintf(name, "%08x",
408 atomic_inc_return(&iucv_sk_list.autobind_name));
409 }
410
411 write_unlock_bh(&iucv_sk_list.lock);
412
413 memcpy(&iucv->src_name, name, 8);
414
415 return err;
416}
417
418/* Connect an unconnected socket */
419static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
420 int alen, int flags)
421{
422 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
423 struct sock *sk = sock->sk;
424 struct iucv_sock *iucv;
425 unsigned char user_data[16];
426 int err;
427
428 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
429 return -EINVAL;
430
431 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
432 return -EBADFD;
433
434 if (sk->sk_type != SOCK_STREAM)
435 return -EINVAL;
436
437 iucv = iucv_sk(sk);
438
439 if (sk->sk_state == IUCV_OPEN) {
440 err = iucv_sock_autobind(sk);
441 if (unlikely(err))
442 return err;
443 }
444
445 lock_sock(sk);
446
447 /* Set the destination information */
448 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
449 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
450
451 high_nmcpy(user_data, sa->siucv_name);
452 low_nmcpy(user_data, iucv_sk(sk)->src_name);
453 ASCEBC(user_data, sizeof(user_data));
454
455 iucv = iucv_sk(sk);
456 /* Create path. */
457 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
458 IPRMDATA, GFP_KERNEL);
459 err = iucv_path_connect(iucv->path, &af_iucv_handler,
460 sa->siucv_user_id, NULL, user_data, sk);
461 if (err) {
462 iucv_path_free(iucv->path);
463 iucv->path = NULL;
464 err = -ECONNREFUSED;
465 goto done;
466 }
467
468 if (sk->sk_state != IUCV_CONNECTED) {
469 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
470 sock_sndtimeo(sk, flags & O_NONBLOCK));
471 }
472
473 if (sk->sk_state == IUCV_DISCONN) {
474 release_sock(sk);
475 return -ECONNREFUSED;
476 }
477done:
478 release_sock(sk);
479 return err;
480}
481
482/* Move a socket into listening state. */
483static int iucv_sock_listen(struct socket *sock, int backlog)
484{
485 struct sock *sk = sock->sk;
486 int err;
487
488 lock_sock(sk);
489
490 err = -EINVAL;
491 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
492 goto done;
493
494 sk->sk_max_ack_backlog = backlog;
495 sk->sk_ack_backlog = 0;
496 sk->sk_state = IUCV_LISTEN;
497 err = 0;
498
499done:
500 release_sock(sk);
501 return err;
502}
503
504/* Accept a pending connection */
505static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
506 int flags)
507{
508 DECLARE_WAITQUEUE(wait, current);
509 struct sock *sk = sock->sk, *nsk;
510 long timeo;
511 int err = 0;
512
513 lock_sock(sk);
514
515 if (sk->sk_state != IUCV_LISTEN) {
516 err = -EBADFD;
517 goto done;
518 }
519
520 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
521
522 /* Wait for an incoming connection */
523 add_wait_queue_exclusive(sk->sk_sleep, &wait);
524 while (!(nsk = iucv_accept_dequeue(sk, newsock))){
525 set_current_state(TASK_INTERRUPTIBLE);
526 if (!timeo) {
527 err = -EAGAIN;
528 break;
529 }
530
531 release_sock(sk);
532 timeo = schedule_timeout(timeo);
533 lock_sock(sk);
534
535 if (sk->sk_state != IUCV_LISTEN) {
536 err = -EBADFD;
537 break;
538 }
539
540 if (signal_pending(current)) {
541 err = sock_intr_errno(timeo);
542 break;
543 }
544 }
545
546 set_current_state(TASK_RUNNING);
547 remove_wait_queue(sk->sk_sleep, &wait);
548
549 if (err)
550 goto done;
551
552 newsock->state = SS_CONNECTED;
553
554done:
555 release_sock(sk);
556 return err;
557}
558
559static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
560 int *len, int peer)
561{
562 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
563 struct sock *sk = sock->sk;
564
565 addr->sa_family = AF_IUCV;
566 *len = sizeof(struct sockaddr_iucv);
567
568 if (peer) {
569 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
570 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
571 } else {
572 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
573 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
574 }
575 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
576 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
577 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
578
579 return 0;
580}
581
582static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
583 struct msghdr *msg, size_t len)
584{
585 struct sock *sk = sock->sk;
586 struct iucv_sock *iucv = iucv_sk(sk);
587 struct sk_buff *skb;
588 struct iucv_message txmsg;
589 int err;
590
591 err = sock_error(sk);
592 if (err)
593 return err;
594
595 if (msg->msg_flags & MSG_OOB)
596 return -EOPNOTSUPP;
597
598 lock_sock(sk);
599
600 if (sk->sk_shutdown & SEND_SHUTDOWN) {
601 err = -EPIPE;
602 goto out;
603 }
604
605 if (sk->sk_state == IUCV_CONNECTED){
606 if(!(skb = sock_alloc_send_skb(sk, len,
607 msg->msg_flags & MSG_DONTWAIT,
608 &err)))
609 return err;
610
611 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){
612 err = -EFAULT;
613 goto fail;
614 }
615
616 txmsg.class = 0;
617 txmsg.tag = iucv->send_tag++;
618 memcpy(skb->cb, &txmsg.tag, 4);
619 skb_queue_tail(&iucv->send_skb_q, skb);
620 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
621 (void *) skb->data, skb->len);
622 if (err) {
623 if (err == 3)
624 printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
625 skb_unlink(skb, &iucv->send_skb_q);
626 err = -EPIPE;
627 goto fail;
628 }
629
630 } else {
631 err = -ENOTCONN;
632 goto out;
633 }
634
635 release_sock(sk);
636 return len;
637
638fail:
639 kfree_skb(skb);
640out:
641 release_sock(sk);
642 return err;
643}
644
645static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
646 struct msghdr *msg, size_t len, int flags)
647{
648 int noblock = flags & MSG_DONTWAIT;
649 struct sock *sk = sock->sk;
650 int target, copied = 0;
651 struct sk_buff *skb;
652 int err = 0;
653
654 if (flags & (MSG_OOB))
655 return -EOPNOTSUPP;
656
657 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
658
659 skb = skb_recv_datagram(sk, flags, noblock, &err);
660 if (!skb) {
661 if (sk->sk_shutdown & RCV_SHUTDOWN)
662 return 0;
663 return err;
664 }
665
666 copied = min_t(unsigned int, skb->len, len);
667
668 if (memcpy_toiovec(msg->msg_iov, skb->data, copied)) {
669 skb_queue_head(&sk->sk_receive_queue, skb);
670 if (copied == 0)
671 return -EFAULT;
672 }
673
674 len -= copied;
675
676 /* Mark read part of skb as used */
677 if (!(flags & MSG_PEEK)) {
678 skb_pull(skb, copied);
679
680 if (skb->len) {
681 skb_queue_head(&sk->sk_receive_queue, skb);
682 goto done;
683 }
684
685 kfree_skb(skb);
686 } else
687 skb_queue_head(&sk->sk_receive_queue, skb);
688
689done:
690 return err ? : copied;
691}
692
693static inline unsigned int iucv_accept_poll(struct sock *parent)
694{
695 struct iucv_sock *isk, *n;
696 struct sock *sk;
697
698 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
699 sk = (struct sock *) isk;
700
701 if (sk->sk_state == IUCV_CONNECTED)
702 return POLLIN | POLLRDNORM;
703 }
704
705 return 0;
706}
707
708unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
709 poll_table *wait)
710{
711 struct sock *sk = sock->sk;
712 unsigned int mask = 0;
713
714 poll_wait(file, sk->sk_sleep, wait);
715
716 if (sk->sk_state == IUCV_LISTEN)
717 return iucv_accept_poll(sk);
718
719 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
720 mask |= POLLERR;
721
722 if (sk->sk_shutdown & RCV_SHUTDOWN)
723 mask |= POLLRDHUP;
724
725 if (sk->sk_shutdown == SHUTDOWN_MASK)
726 mask |= POLLHUP;
727
728 if (!skb_queue_empty(&sk->sk_receive_queue) ||
729 (sk->sk_shutdown & RCV_SHUTDOWN))
730 mask |= POLLIN | POLLRDNORM;
731
732 if (sk->sk_state == IUCV_CLOSED)
733 mask |= POLLHUP;
734
735 if (sock_writeable(sk))
736 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
737 else
738 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
739
740 return mask;
741}
742
743static int iucv_sock_shutdown(struct socket *sock, int how)
744{
745 struct sock *sk = sock->sk;
746 struct iucv_sock *iucv = iucv_sk(sk);
747 struct iucv_message txmsg;
748 int err = 0;
749 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
750
751 how++;
752
753 if ((how & ~SHUTDOWN_MASK) || !how)
754 return -EINVAL;
755
756 lock_sock(sk);
757 switch(sk->sk_state) {
758 case IUCV_CLOSED:
759 err = -ENOTCONN;
760 goto fail;
761
762 default:
763 sk->sk_shutdown |= how;
764 break;
765 }
766
767 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
768 txmsg.class = 0;
769 txmsg.tag = 0;
770 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
771 (void *) prmmsg, 8);
772 if (err) {
773 switch(err) {
774 case 1:
775 err = -ENOTCONN;
776 break;
777 case 2:
778 err = -ECONNRESET;
779 break;
780 default:
781 err = -ENOTCONN;
782 break;
783 }
784 }
785 }
786
787 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
788 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
789 if (err)
790 err = -ENOTCONN;
791
792 skb_queue_purge(&sk->sk_receive_queue);
793 }
794
795 /* Wake up anyone sleeping in poll */
796 sk->sk_state_change(sk);
797
798fail:
799 release_sock(sk);
800 return err;
801}
802
803static int iucv_sock_release(struct socket *sock)
804{
805 struct sock *sk = sock->sk;
806 int err = 0;
807
808 if (!sk)
809 return 0;
810
811 iucv_sock_close(sk);
812
813 /* Unregister with IUCV base support */
814 if (iucv_sk(sk)->path) {
815 iucv_path_sever(iucv_sk(sk)->path, NULL);
816 iucv_path_free(iucv_sk(sk)->path);
817 iucv_sk(sk)->path = NULL;
818 }
819
820 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime){
821 lock_sock(sk);
822 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0,
823 sk->sk_lingertime);
824 release_sock(sk);
825 }
826
827 sock_orphan(sk);
828 iucv_sock_kill(sk);
829 return err;
830}
831
832/* Callback wrappers - called from iucv base support */
833static int iucv_callback_connreq(struct iucv_path *path,
834 u8 ipvmid[8], u8 ipuser[16])
835{
836 unsigned char user_data[16];
837 unsigned char nuser_data[16];
838 unsigned char src_name[8];
839 struct hlist_node *node;
840 struct sock *sk, *nsk;
841 struct iucv_sock *iucv, *niucv;
842 int err;
843
844 memcpy(src_name, ipuser, 8);
845 EBCASC(src_name, 8);
846 /* Find out if this path belongs to af_iucv. */
847 read_lock(&iucv_sk_list.lock);
848 iucv = NULL;
849 sk_for_each(sk, node, &iucv_sk_list.head)
850 if (sk->sk_state == IUCV_LISTEN &&
851 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
852 /*
853 * Found a listening socket with
854 * src_name == ipuser[0-7].
855 */
856 iucv = iucv_sk(sk);
857 break;
858 }
859 read_unlock(&iucv_sk_list.lock);
860 if (!iucv)
861 /* No socket found, not one of our paths. */
862 return -EINVAL;
863
864 bh_lock_sock(sk);
865
866 /* Check if parent socket is listening */
867 low_nmcpy(user_data, iucv->src_name);
868 high_nmcpy(user_data, iucv->dst_name);
869 ASCEBC(user_data, sizeof(user_data));
870 if (sk->sk_state != IUCV_LISTEN) {
871 err = iucv_path_sever(path, user_data);
872 goto fail;
873 }
874
875 /* Check for backlog size */
876 if (sk_acceptq_is_full(sk)) {
877 err = iucv_path_sever(path, user_data);
878 goto fail;
879 }
880
881 /* Create the new socket */
882 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
883 if (!nsk){
884 err = iucv_path_sever(path, user_data);
885 goto fail;
886 }
887
888 niucv = iucv_sk(nsk);
889 iucv_sock_init(nsk, sk);
890
891 /* Set the new iucv_sock */
892 memcpy(niucv->dst_name, ipuser + 8, 8);
893 EBCASC(niucv->dst_name, 8);
894 memcpy(niucv->dst_user_id, ipvmid, 8);
895 memcpy(niucv->src_name, iucv->src_name, 8);
896 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
897 niucv->path = path;
898
899 /* Call iucv_accept */
900 high_nmcpy(nuser_data, ipuser + 8);
901 memcpy(nuser_data + 8, niucv->src_name, 8);
902 ASCEBC(nuser_data + 8, 8);
903
904 path->msglim = IUCV_QUEUELEN_DEFAULT;
905 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
906 if (err){
907 err = iucv_path_sever(path, user_data);
908 goto fail;
909 }
910
911 iucv_accept_enqueue(sk, nsk);
912
913 /* Wake up accept */
914 nsk->sk_state = IUCV_CONNECTED;
915 sk->sk_data_ready(sk, 1);
916 err = 0;
917fail:
918 bh_unlock_sock(sk);
919 return 0;
920}
921
922static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
923{
924 struct sock *sk = path->private;
925
926 sk->sk_state = IUCV_CONNECTED;
927 sk->sk_state_change(sk);
928}
929
930static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
931{
932 struct sock *sk = path->private;
933 struct sk_buff *skb;
934 int rc;
935
936 if (sk->sk_shutdown & RCV_SHUTDOWN)
937 return;
938
939 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
940 if (!skb) {
941 iucv_message_reject(path, msg);
942 return;
943 }
944
945 if (msg->flags & IPRMDATA) {
946 skb->data = NULL;
947 skb->len = 0;
948 } else {
949 rc = iucv_message_receive(path, msg, 0, skb->data,
950 msg->length, NULL);
951 if (rc) {
952 kfree_skb(skb);
953 return;
954 }
955
956 skb->h.raw = skb->data;
957 skb->nh.raw = skb->data;
958 skb->len = msg->length;
959 }
960
961 if (sock_queue_rcv_skb(sk, skb))
962 kfree_skb(skb);
963}
964
965static void iucv_callback_txdone(struct iucv_path *path,
966 struct iucv_message *msg)
967{
968 struct sock *sk = path->private;
969 struct sk_buff *this;
970 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
971 struct sk_buff *list_skb = list->next;
972 unsigned long flags;
973
974 spin_lock_irqsave(&list->lock, flags);
975
976 do {
977 this = list_skb;
978 list_skb = list_skb->next;
979 } while (memcmp(&msg->tag, this->cb, 4));
980
981 spin_unlock_irqrestore(&list->lock, flags);
982
983 skb_unlink(this, &iucv_sk(sk)->send_skb_q);
984 kfree_skb(this);
985}
986
987static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
988{
989 struct sock *sk = path->private;
990
991 if (!list_empty(&iucv_sk(sk)->accept_q))
992 sk->sk_state = IUCV_SEVERED;
993 else
994 sk->sk_state = IUCV_DISCONN;
995
996 sk->sk_state_change(sk);
997}
998
999static struct proto_ops iucv_sock_ops = {
1000 .family = PF_IUCV,
1001 .owner = THIS_MODULE,
1002 .release = iucv_sock_release,
1003 .bind = iucv_sock_bind,
1004 .connect = iucv_sock_connect,
1005 .listen = iucv_sock_listen,
1006 .accept = iucv_sock_accept,
1007 .getname = iucv_sock_getname,
1008 .sendmsg = iucv_sock_sendmsg,
1009 .recvmsg = iucv_sock_recvmsg,
1010 .poll = iucv_sock_poll,
1011 .ioctl = sock_no_ioctl,
1012 .mmap = sock_no_mmap,
1013 .socketpair = sock_no_socketpair,
1014 .shutdown = iucv_sock_shutdown,
1015 .setsockopt = sock_no_setsockopt,
1016 .getsockopt = sock_no_getsockopt
1017};
1018
1019static struct net_proto_family iucv_sock_family_ops = {
1020 .family = AF_IUCV,
1021 .owner = THIS_MODULE,
1022 .create = iucv_sock_create,
1023};
1024
1025static int afiucv_init(void)
1026{
1027 int err;
1028
1029 if (!MACHINE_IS_VM) {
1030 printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
1031 err = -EPROTONOSUPPORT;
1032 goto out;
1033 }
1034 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1035 if (unlikely(err)) {
1036 printk(KERN_ERR "AF_IUCV needs the VM userid\n");
1037 err = -EPROTONOSUPPORT;
1038 goto out;
1039 }
1040
1041 err = iucv_register(&af_iucv_handler, 0);
1042 if (err)
1043 goto out;
1044 err = proto_register(&iucv_proto, 0);
1045 if (err)
1046 goto out_iucv;
1047 err = sock_register(&iucv_sock_family_ops);
1048 if (err)
1049 goto out_proto;
1050 printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
1051 return 0;
1052
1053out_proto:
1054 proto_unregister(&iucv_proto);
1055out_iucv:
1056 iucv_unregister(&af_iucv_handler, 0);
1057out:
1058 return err;
1059}
1060
1061static void __exit afiucv_exit(void)
1062{
1063 sock_unregister(PF_IUCV);
1064 proto_unregister(&iucv_proto);
1065 iucv_unregister(&af_iucv_handler, 0);
1066
1067 printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
1068}
1069
1070module_init(afiucv_init);
1071module_exit(afiucv_exit);
1072
1073MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1074MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1075MODULE_VERSION(VERSION);
1076MODULE_LICENSE("GPL");
1077MODULE_ALIAS_NETPROTO(PF_IUCV);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
new file mode 100644
index 000000000000..1b10d576f222
--- /dev/null
+++ b/net/iucv/iucv.c
@@ -0,0 +1,1619 @@
1/*
2 * IUCV base infrastructure.
3 *
4 * Copyright 2001, 2006 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s):
6 * Original source:
7 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
8 * Xenia Tkatschow (xenia@us.ibm.com)
9 * 2Gb awareness and general cleanup:
10 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
11 * Rewritten for af_iucv:
12 * Martin Schwidefsky <schwidefsky@de.ibm.com>
13 *
14 * Documentation used:
15 * The original source
16 * CP Programming Service, IBM document # SC24-5760
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35
36#include <linux/spinlock.h>
37#include <linux/kernel.h>
38#include <linux/slab.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/list.h>
42#include <linux/errno.h>
43#include <linux/err.h>
44#include <linux/device.h>
45#include <linux/cpu.h>
46#include <net/iucv/iucv.h>
47#include <asm/atomic.h>
48#include <asm/ebcdic.h>
49#include <asm/io.h>
50#include <asm/s390_ext.h>
51#include <asm/s390_rdev.h>
52#include <asm/smp.h>
53
54/*
55 * FLAGS:
56 * All flags are defined in the field IPFLAGS1 of each function
57 * and can be found in CP Programming Services.
58 * IPSRCCLS - Indicates you have specified a source class.
59 * IPTRGCLS - Indicates you have specified a target class.
60 * IPFGPID - Indicates you have specified a pathid.
61 * IPFGMID - Indicates you have specified a message ID.
62 * IPNORPY - Indicates a one-way message. No reply expected.
63 * IPALL - Indicates that all paths are affected.
64 */
65#define IUCV_IPSRCCLS 0x01
66#define IUCV_IPTRGCLS 0x01
67#define IUCV_IPFGPID 0x02
68#define IUCV_IPFGMID 0x04
69#define IUCV_IPNORPY 0x10
70#define IUCV_IPALL 0x80
71
72static int iucv_bus_match (struct device *dev, struct device_driver *drv)
73{
74 return 0;
75}
76
77struct bus_type iucv_bus = {
78 .name = "iucv",
79 .match = iucv_bus_match,
80};
81
82struct device *iucv_root;
83static int iucv_available;
84
85/* General IUCV interrupt structure */
86struct iucv_irq_data {
87 u16 ippathid;
88 u8 ipflags1;
89 u8 iptype;
90 u32 res2[8];
91};
92
93struct iucv_work {
94 struct list_head list;
95 struct iucv_irq_data data;
96};
97
98static LIST_HEAD(iucv_work_queue);
99static DEFINE_SPINLOCK(iucv_work_lock);
100
101static struct iucv_irq_data *iucv_irq_data;
102static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;
103static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;
104
105static void iucv_tasklet_handler(unsigned long);
106static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_handler,0);
107
108enum iucv_command_codes {
109 IUCV_QUERY = 0,
110 IUCV_RETRIEVE_BUFFER = 2,
111 IUCV_SEND = 4,
112 IUCV_RECEIVE = 5,
113 IUCV_REPLY = 6,
114 IUCV_REJECT = 8,
115 IUCV_PURGE = 9,
116 IUCV_ACCEPT = 10,
117 IUCV_CONNECT = 11,
118 IUCV_DECLARE_BUFFER = 12,
119 IUCV_QUIESCE = 13,
120 IUCV_RESUME = 14,
121 IUCV_SEVER = 15,
122 IUCV_SETMASK = 16,
123};
124
125/*
126 * Error messages that are used with the iucv_sever function. They get
127 * converted to EBCDIC.
128 */
129static char iucv_error_no_listener[16] = "NO LISTENER";
130static char iucv_error_no_memory[16] = "NO MEMORY";
131static char iucv_error_pathid[16] = "INVALID PATHID";
132
133/*
134 * iucv_handler_list: List of registered handlers.
135 */
136static LIST_HEAD(iucv_handler_list);
137
138/*
139 * iucv_path_table: an array of iucv_path structures.
140 */
141static struct iucv_path **iucv_path_table;
142static unsigned long iucv_max_pathid;
143
144/*
145 * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table
146 */
147static DEFINE_SPINLOCK(iucv_table_lock);
148
149/*
150 * iucv_tasklet_cpu: contains the number of the cpu executing the tasklet.
151 * Needed for iucv_path_sever called from tasklet.
152 */
153static int iucv_tasklet_cpu = -1;
154
155/*
156 * Mutex and wait queue for iucv_register/iucv_unregister.
157 */
158static DEFINE_MUTEX(iucv_register_mutex);
159
160/*
161 * Counter for number of non-smp capable handlers.
162 */
163static int iucv_nonsmp_handler;
164
165/*
166 * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect,
167 * iucv_path_quiesce and iucv_path_sever.
168 */
169struct iucv_cmd_control {
170 u16 ippathid;
171 u8 ipflags1;
172 u8 iprcode;
173 u16 ipmsglim;
174 u16 res1;
175 u8 ipvmid[8];
176 u8 ipuser[16];
177 u8 iptarget[8];
178} __attribute__ ((packed,aligned(8)));
179
180/*
181 * Data in parameter list iucv structure. Used by iucv_message_send,
182 * iucv_message_send2way and iucv_message_reply.
183 */
184struct iucv_cmd_dpl {
185 u16 ippathid;
186 u8 ipflags1;
187 u8 iprcode;
188 u32 ipmsgid;
189 u32 iptrgcls;
190 u8 iprmmsg[8];
191 u32 ipsrccls;
192 u32 ipmsgtag;
193 u32 ipbfadr2;
194 u32 ipbfln2f;
195 u32 res;
196} __attribute__ ((packed,aligned(8)));
197
198/*
199 * Data in buffer iucv structure. Used by iucv_message_receive,
200 * iucv_message_reject, iucv_message_send, iucv_message_send2way
201 * and iucv_declare_cpu.
202 */
203struct iucv_cmd_db {
204 u16 ippathid;
205 u8 ipflags1;
206 u8 iprcode;
207 u32 ipmsgid;
208 u32 iptrgcls;
209 u32 ipbfadr1;
210 u32 ipbfln1f;
211 u32 ipsrccls;
212 u32 ipmsgtag;
213 u32 ipbfadr2;
214 u32 ipbfln2f;
215 u32 res;
216} __attribute__ ((packed,aligned(8)));
217
218/*
219 * Purge message iucv structure. Used by iucv_message_purge.
220 */
221struct iucv_cmd_purge {
222 u16 ippathid;
223 u8 ipflags1;
224 u8 iprcode;
225 u32 ipmsgid;
226 u8 ipaudit[3];
227 u8 res1[5];
228 u32 res2;
229 u32 ipsrccls;
230 u32 ipmsgtag;
231 u32 res3[3];
232} __attribute__ ((packed,aligned(8)));
233
234/*
235 * Set mask iucv structure. Used by iucv_enable_cpu.
236 */
237struct iucv_cmd_set_mask {
238 u8 ipmask;
239 u8 res1[2];
240 u8 iprcode;
241 u32 res2[9];
242} __attribute__ ((packed,aligned(8)));
243
244union iucv_param {
245 struct iucv_cmd_control ctrl;
246 struct iucv_cmd_dpl dpl;
247 struct iucv_cmd_db db;
248 struct iucv_cmd_purge purge;
249 struct iucv_cmd_set_mask set_mask;
250};
251
252/*
253 * Anchor for per-cpu IUCV command parameter block.
254 */
255static union iucv_param *iucv_param;
256
257/**
258 * iucv_call_b2f0
259 * @code: identifier of IUCV call to CP.
260 * @parm: pointer to a struct iucv_parm block
261 *
262 * Calls CP to execute IUCV commands.
263 *
264 * Returns the result of the CP IUCV call.
265 */
266static inline int iucv_call_b2f0(int command, union iucv_param *parm)
267{
268 register unsigned long reg0 asm ("0");
269 register unsigned long reg1 asm ("1");
270 int ccode;
271
272 reg0 = command;
273 reg1 = virt_to_phys(parm);
274 asm volatile(
275 " .long 0xb2f01000\n"
276 " ipm %0\n"
277 " srl %0,28\n"
278 : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1)
279 : "m" (*parm) : "cc");
280 return (ccode == 1) ? parm->ctrl.iprcode : ccode;
281}
282
283/**
284 * iucv_query_maxconn
285 *
286 * Determines the maximum number of connections that may be established.
287 *
288 * Returns the maximum number of connections or -EPERM is IUCV is not
289 * available.
290 */
291static int iucv_query_maxconn(void)
292{
293 register unsigned long reg0 asm ("0");
294 register unsigned long reg1 asm ("1");
295 void *param;
296 int ccode;
297
298 param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA);
299 if (!param)
300 return -ENOMEM;
301 reg0 = IUCV_QUERY;
302 reg1 = (unsigned long) param;
303 asm volatile (
304 " .long 0xb2f01000\n"
305 " ipm %0\n"
306 " srl %0,28\n"
307 : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
308 if (ccode == 0)
309 iucv_max_pathid = reg0;
310 kfree(param);
311 return ccode ? -EPERM : 0;
312}
313
314/**
315 * iucv_allow_cpu
316 * @data: unused
317 *
318 * Allow iucv interrupts on this cpu.
319 */
320static void iucv_allow_cpu(void *data)
321{
322 int cpu = smp_processor_id();
323 union iucv_param *parm;
324
325 /*
326 * Enable all iucv interrupts.
327 * ipmask contains bits for the different interrupts
328 * 0x80 - Flag to allow nonpriority message pending interrupts
329 * 0x40 - Flag to allow priority message pending interrupts
330 * 0x20 - Flag to allow nonpriority message completion interrupts
331 * 0x10 - Flag to allow priority message completion interrupts
332 * 0x08 - Flag to allow IUCV control interrupts
333 */
334 parm = percpu_ptr(iucv_param, smp_processor_id());
335 memset(parm, 0, sizeof(union iucv_param));
336 parm->set_mask.ipmask = 0xf8;
337 iucv_call_b2f0(IUCV_SETMASK, parm);
338
339 /* Set indication that iucv interrupts are allowed for this cpu. */
340 cpu_set(cpu, iucv_irq_cpumask);
341}
342
343/**
344 * iucv_block_cpu
345 * @data: unused
346 *
347 * Block iucv interrupts on this cpu.
348 */
349static void iucv_block_cpu(void *data)
350{
351 int cpu = smp_processor_id();
352 union iucv_param *parm;
353
354 /* Disable all iucv interrupts. */
355 parm = percpu_ptr(iucv_param, smp_processor_id());
356 memset(parm, 0, sizeof(union iucv_param));
357 iucv_call_b2f0(IUCV_SETMASK, parm);
358
359 /* Clear indication that iucv interrupts are allowed for this cpu. */
360 cpu_clear(cpu, iucv_irq_cpumask);
361}
362
363/**
364 * iucv_declare_cpu
365 * @data: unused
366 *
367 * Declare a interupt buffer on this cpu.
368 */
369static void iucv_declare_cpu(void *data)
370{
371 int cpu = smp_processor_id();
372 union iucv_param *parm;
373 int rc;
374
375 if (cpu_isset(cpu, iucv_buffer_cpumask))
376 return;
377
378 /* Declare interrupt buffer. */
379 parm = percpu_ptr(iucv_param, cpu);
380 memset(parm, 0, sizeof(union iucv_param));
381 parm->db.ipbfadr1 = virt_to_phys(percpu_ptr(iucv_irq_data, cpu));
382 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
383 if (rc) {
384 char *err = "Unknown";
385 switch(rc) {
386 case 0x03:
387 err = "Directory error";
388 break;
389 case 0x0a:
390 err = "Invalid length";
391 break;
392 case 0x13:
393 err = "Buffer already exists";
394 break;
395 case 0x3e:
396 err = "Buffer overlap";
397 break;
398 case 0x5c:
399 err = "Paging or storage error";
400 break;
401 }
402 printk(KERN_WARNING "iucv_register: iucv_declare_buffer "
403 "on cpu %i returned error 0x%02x (%s)\n", cpu, rc, err);
404 return;
405 }
406
407 /* Set indication that an iucv buffer exists for this cpu. */
408 cpu_set(cpu, iucv_buffer_cpumask);
409
410 if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask))
411 /* Enable iucv interrupts on this cpu. */
412 iucv_allow_cpu(NULL);
413 else
414 /* Disable iucv interrupts on this cpu. */
415 iucv_block_cpu(NULL);
416}
417
418/**
419 * iucv_retrieve_cpu
420 * @data: unused
421 *
422 * Retrieve interrupt buffer on this cpu.
423 */
424static void iucv_retrieve_cpu(void *data)
425{
426 int cpu = smp_processor_id();
427 union iucv_param *parm;
428
429 if (!cpu_isset(cpu, iucv_buffer_cpumask))
430 return;
431
432 /* Block iucv interrupts. */
433 iucv_block_cpu(NULL);
434
435 /* Retrieve interrupt buffer. */
436 parm = percpu_ptr(iucv_param, cpu);
437 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
438
439 /* Clear indication that an iucv buffer exists for this cpu. */
440 cpu_clear(cpu, iucv_buffer_cpumask);
441}
442
443/**
444 * iucv_setmask_smp
445 *
446 * Allow iucv interrupts on all cpus.
447 */
448static void iucv_setmask_mp(void)
449{
450 int cpu;
451
452 for_each_online_cpu(cpu)
453 /* Enable all cpus with a declared buffer. */
454 if (cpu_isset(cpu, iucv_buffer_cpumask) &&
455 !cpu_isset(cpu, iucv_irq_cpumask))
456 smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu);
457}
458
459/**
460 * iucv_setmask_up
461 *
462 * Allow iucv interrupts on a single cpus.
463 */
464static void iucv_setmask_up(void)
465{
466 cpumask_t cpumask;
467 int cpu;
468
469 /* Disable all cpu but the first in cpu_irq_cpumask. */
470 cpumask = iucv_irq_cpumask;
471 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
472 for_each_cpu_mask(cpu, cpumask)
473 smp_call_function_on(iucv_block_cpu, NULL, 0, 1, cpu);
474}
475
476/**
477 * iucv_enable
478 *
479 * This function makes iucv ready for use. It allocates the pathid
480 * table, declares an iucv interrupt buffer and enables the iucv
481 * interrupts. Called when the first user has registered an iucv
482 * handler.
483 */
484static int iucv_enable(void)
485{
486 size_t alloc_size;
487 int cpu, rc;
488
489 rc = -ENOMEM;
490 alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
491 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
492 if (!iucv_path_table)
493 goto out;
494 /* Declare per cpu buffers. */
495 rc = -EIO;
496 for_each_online_cpu(cpu)
497 smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
498 if (cpus_empty(iucv_buffer_cpumask))
499 /* No cpu could declare an iucv buffer. */
500 goto out_path;
501 return 0;
502
503out_path:
504 kfree(iucv_path_table);
505out:
506 return rc;
507}
508
509/**
510 * iucv_disable
511 *
512 * This function shuts down iucv. It disables iucv interrupts, retrieves
513 * the iucv interrupt buffer and frees the pathid table. Called after the
514 * last user unregister its iucv handler.
515 */
516static void iucv_disable(void)
517{
518 on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1);
519 kfree(iucv_path_table);
520}
521
522#ifdef CONFIG_HOTPLUG_CPU
523static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
524 unsigned long action, void *hcpu)
525{
526 cpumask_t cpumask;
527 long cpu = (long) hcpu;
528
529 switch (action) {
530 case CPU_UP_PREPARE:
531 if (!percpu_populate(iucv_irq_data,
532 sizeof(struct iucv_irq_data),
533 GFP_KERNEL|GFP_DMA, cpu))
534 return NOTIFY_BAD;
535 if (!percpu_populate(iucv_param, sizeof(union iucv_param),
536 GFP_KERNEL|GFP_DMA, cpu)) {
537 percpu_depopulate(iucv_irq_data, cpu);
538 return NOTIFY_BAD;
539 }
540 break;
541 case CPU_UP_CANCELED:
542 case CPU_DEAD:
543 percpu_depopulate(iucv_param, cpu);
544 percpu_depopulate(iucv_irq_data, cpu);
545 break;
546 case CPU_ONLINE:
547 case CPU_DOWN_FAILED:
548 smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
549 break;
550 case CPU_DOWN_PREPARE:
551 cpumask = iucv_buffer_cpumask;
552 cpu_clear(cpu, cpumask);
553 if (cpus_empty(cpumask))
554 /* Can't offline last IUCV enabled cpu. */
555 return NOTIFY_BAD;
556 smp_call_function_on(iucv_retrieve_cpu, NULL, 0, 1, cpu);
557 if (cpus_empty(iucv_irq_cpumask))
558 smp_call_function_on(iucv_allow_cpu, NULL, 0, 1,
559 first_cpu(iucv_buffer_cpumask));
560 break;
561 }
562 return NOTIFY_OK;
563}
564
565static struct notifier_block iucv_cpu_notifier = {
566 .notifier_call = iucv_cpu_notify,
567};
568#endif
569
570/**
571 * iucv_sever_pathid
572 * @pathid: path identification number.
573 * @userdata: 16-bytes of user data.
574 *
575 * Sever an iucv path to free up the pathid. Used internally.
576 */
577static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
578{
579 union iucv_param *parm;
580
581 parm = percpu_ptr(iucv_param, smp_processor_id());
582 memset(parm, 0, sizeof(union iucv_param));
583 if (userdata)
584 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
585 parm->ctrl.ippathid = pathid;
586 return iucv_call_b2f0(IUCV_SEVER, parm);
587}
588
589/**
590 * __iucv_cleanup_pathid
591 * @dummy: unused dummy argument
592 *
593 * Nop function called via smp_call_function to force work items from
594 * pending external iucv interrupts to the work queue.
595 */
596static void __iucv_cleanup_pathid(void *dummy)
597{
598}
599
600/**
601 * iucv_cleanup_pathid
602 * @pathid: 16 bit pathid
603 *
604 * Function called after a path has been severed to find all remaining
605 * work items for the now stale pathid. The caller needs to hold the
606 * iucv_table_lock.
607 */
608static void iucv_cleanup_pathid(u16 pathid)
609{
610 struct iucv_work *p, *n;
611
612 /*
613 * Path is severed, the pathid can be reused immediatly on
614 * a iucv connect or a connection pending interrupt.
615 * iucv_path_connect and connection pending interrupt will
616 * wait until the iucv_table_lock is released before the
617 * recycled pathid enters the system.
618 * Force remaining interrupts to the work queue, then
619 * scan the work queue for items of this path.
620 */
621 smp_call_function(__iucv_cleanup_pathid, NULL, 0, 1);
622 spin_lock_irq(&iucv_work_lock);
623 list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
624 /* Remove work items for pathid except connection pending */
625 if (p->data.ippathid == pathid && p->data.iptype != 0x01) {
626 list_del(&p->list);
627 kfree(p);
628 }
629 }
630 spin_unlock_irq(&iucv_work_lock);
631}
632
633/**
634 * iucv_register:
635 * @handler: address of iucv handler structure
636 * @smp: != 0 indicates that the handler can deal with out of order messages
637 *
638 * Registers a driver with IUCV.
639 *
640 * Returns 0 on success, -ENOMEM if the memory allocation for the pathid
641 * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
642 */
643int iucv_register(struct iucv_handler *handler, int smp)
644{
645 int rc;
646
647 if (!iucv_available)
648 return -ENOSYS;
649 mutex_lock(&iucv_register_mutex);
650 if (!smp)
651 iucv_nonsmp_handler++;
652 if (list_empty(&iucv_handler_list)) {
653 rc = iucv_enable();
654 if (rc)
655 goto out_mutex;
656 } else if (!smp && iucv_nonsmp_handler == 1)
657 iucv_setmask_up();
658 INIT_LIST_HEAD(&handler->paths);
659
660 spin_lock_irq(&iucv_table_lock);
661 list_add_tail(&handler->list, &iucv_handler_list);
662 spin_unlock_irq(&iucv_table_lock);
663 rc = 0;
664out_mutex:
665 mutex_unlock(&iucv_register_mutex);
666 return rc;
667}
668
669/**
670 * iucv_unregister
671 * @handler: address of iucv handler structure
672 * @smp: != 0 indicates that the handler can deal with out of order messages
673 *
674 * Unregister driver from IUCV.
675 */
676void iucv_unregister(struct iucv_handler *handler, int smp)
677{
678 struct iucv_path *p, *n;
679
680 mutex_lock(&iucv_register_mutex);
681 spin_lock_bh(&iucv_table_lock);
682 /* Remove handler from the iucv_handler_list. */
683 list_del_init(&handler->list);
684 /* Sever all pathids still refering to the handler. */
685 list_for_each_entry_safe(p, n, &handler->paths, list) {
686 iucv_sever_pathid(p->pathid, NULL);
687 iucv_path_table[p->pathid] = NULL;
688 list_del(&p->list);
689 iucv_cleanup_pathid(p->pathid);
690 iucv_path_free(p);
691 }
692 spin_unlock_bh(&iucv_table_lock);
693 if (!smp)
694 iucv_nonsmp_handler--;
695 if (list_empty(&iucv_handler_list))
696 iucv_disable();
697 else if (!smp && iucv_nonsmp_handler == 0)
698 iucv_setmask_mp();
699 mutex_unlock(&iucv_register_mutex);
700}
701
702/**
703 * iucv_path_accept
704 * @path: address of iucv path structure
705 * @handler: address of iucv handler structure
706 * @userdata: 16 bytes of data reflected to the communication partner
707 * @private: private data passed to interrupt handlers for this path
708 *
709 * This function is issued after the user received a connection pending
710 * external interrupt and now wishes to complete the IUCV communication path.
711 *
712 * Returns the result of the CP IUCV call.
713 */
714int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
715 u8 userdata[16], void *private)
716{
717 union iucv_param *parm;
718 int rc;
719
720 local_bh_disable();
721 /* Prepare parameter block. */
722 parm = percpu_ptr(iucv_param, smp_processor_id());
723 memset(parm, 0, sizeof(union iucv_param));
724 parm->ctrl.ippathid = path->pathid;
725 parm->ctrl.ipmsglim = path->msglim;
726 if (userdata)
727 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
728 parm->ctrl.ipflags1 = path->flags;
729
730 rc = iucv_call_b2f0(IUCV_ACCEPT, parm);
731 if (!rc) {
732 path->private = private;
733 path->msglim = parm->ctrl.ipmsglim;
734 path->flags = parm->ctrl.ipflags1;
735 }
736 local_bh_enable();
737 return rc;
738}
739
740/**
741 * iucv_path_connect
742 * @path: address of iucv path structure
743 * @handler: address of iucv handler structure
744 * @userid: 8-byte user identification
745 * @system: 8-byte target system identification
746 * @userdata: 16 bytes of data reflected to the communication partner
747 * @private: private data passed to interrupt handlers for this path
748 *
749 * This function establishes an IUCV path. Although the connect may complete
750 * successfully, you are not able to use the path until you receive an IUCV
751 * Connection Complete external interrupt.
752 *
753 * Returns the result of the CP IUCV call.
754 */
755int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
756 u8 userid[8], u8 system[8], u8 userdata[16],
757 void *private)
758{
759 union iucv_param *parm;
760 int rc;
761
762 preempt_disable();
763 if (iucv_tasklet_cpu != smp_processor_id())
764 spin_lock_bh(&iucv_table_lock);
765 parm = percpu_ptr(iucv_param, smp_processor_id());
766 memset(parm, 0, sizeof(union iucv_param));
767 parm->ctrl.ipmsglim = path->msglim;
768 parm->ctrl.ipflags1 = path->flags;
769 if (userid) {
770 memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid));
771 ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
772 EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
773 }
774 if (system) {
775 memcpy(parm->ctrl.iptarget, system,
776 sizeof(parm->ctrl.iptarget));
777 ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
778 EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
779 }
780 if (userdata)
781 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
782
783 rc = iucv_call_b2f0(IUCV_CONNECT, parm);
784 if (!rc) {
785 if (parm->ctrl.ippathid < iucv_max_pathid) {
786 path->pathid = parm->ctrl.ippathid;
787 path->msglim = parm->ctrl.ipmsglim;
788 path->flags = parm->ctrl.ipflags1;
789 path->handler = handler;
790 path->private = private;
791 list_add_tail(&path->list, &handler->paths);
792 iucv_path_table[path->pathid] = path;
793 } else {
794 iucv_sever_pathid(parm->ctrl.ippathid,
795 iucv_error_pathid);
796 rc = -EIO;
797 }
798 }
799 if (iucv_tasklet_cpu != smp_processor_id())
800 spin_unlock_bh(&iucv_table_lock);
801 preempt_enable();
802 return rc;
803}
804
805/**
806 * iucv_path_quiesce:
807 * @path: address of iucv path structure
808 * @userdata: 16 bytes of data reflected to the communication partner
809 *
810 * This function temporarily suspends incoming messages on an IUCV path.
811 * You can later reactivate the path by invoking the iucv_resume function.
812 *
813 * Returns the result from the CP IUCV call.
814 */
815int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
816{
817 union iucv_param *parm;
818 int rc;
819
820 local_bh_disable();
821 parm = percpu_ptr(iucv_param, smp_processor_id());
822 memset(parm, 0, sizeof(union iucv_param));
823 if (userdata)
824 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
825 parm->ctrl.ippathid = path->pathid;
826 rc = iucv_call_b2f0(IUCV_QUIESCE, parm);
827 local_bh_enable();
828 return rc;
829}
830
831/**
832 * iucv_path_resume:
833 * @path: address of iucv path structure
834 * @userdata: 16 bytes of data reflected to the communication partner
835 *
836 * This function resumes incoming messages on an IUCV path that has
837 * been stopped with iucv_path_quiesce.
838 *
839 * Returns the result from the CP IUCV call.
840 */
841int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
842{
843 union iucv_param *parm;
844 int rc;
845
846 local_bh_disable();
847 parm = percpu_ptr(iucv_param, smp_processor_id());
848 memset(parm, 0, sizeof(union iucv_param));
849 if (userdata)
850 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
851 parm->ctrl.ippathid = path->pathid;
852 rc = iucv_call_b2f0(IUCV_RESUME, parm);
853 local_bh_enable();
854 return rc;
855}
856
857/**
858 * iucv_path_sever
859 * @path: address of iucv path structure
860 * @userdata: 16 bytes of data reflected to the communication partner
861 *
862 * This function terminates an IUCV path.
863 *
864 * Returns the result from the CP IUCV call.
865 */
866int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
867{
868 int rc;
869
870
871 preempt_disable();
872 if (iucv_tasklet_cpu != smp_processor_id())
873 spin_lock_bh(&iucv_table_lock);
874 rc = iucv_sever_pathid(path->pathid, userdata);
875 if (!rc) {
876 iucv_path_table[path->pathid] = NULL;
877 list_del_init(&path->list);
878 iucv_cleanup_pathid(path->pathid);
879 }
880 if (iucv_tasklet_cpu != smp_processor_id())
881 spin_unlock_bh(&iucv_table_lock);
882 preempt_enable();
883 return rc;
884}
885
886/**
887 * iucv_message_purge
888 * @path: address of iucv path structure
889 * @msg: address of iucv msg structure
890 * @srccls: source class of message
891 *
892 * Cancels a message you have sent.
893 *
894 * Returns the result from the CP IUCV call.
895 */
896int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
897 u32 srccls)
898{
899 union iucv_param *parm;
900 int rc;
901
902 local_bh_disable();
903 parm = percpu_ptr(iucv_param, smp_processor_id());
904 memset(parm, 0, sizeof(union iucv_param));
905 parm->purge.ippathid = path->pathid;
906 parm->purge.ipmsgid = msg->id;
907 parm->purge.ipsrccls = srccls;
908 parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID;
909 rc = iucv_call_b2f0(IUCV_PURGE, parm);
910 if (!rc) {
911 msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8;
912 msg->tag = parm->purge.ipmsgtag;
913 }
914 local_bh_enable();
915 return rc;
916}
917
918/**
919 * iucv_message_receive
920 * @path: address of iucv path structure
921 * @msg: address of iucv msg structure
922 * @flags: how the message is received (IUCV_IPBUFLST)
923 * @buffer: address of data buffer or address of struct iucv_array
924 * @size: length of data buffer
925 * @residual:
926 *
927 * This function receives messages that are being sent to you over
928 * established paths. This function will deal with RMDATA messages
929 * embedded in struct iucv_message as well.
930 *
931 * Returns the result from the CP IUCV call.
932 */
933int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
934 u8 flags, void *buffer, size_t size, size_t *residual)
935{
936 union iucv_param *parm;
937 struct iucv_array *array;
938 u8 *rmmsg;
939 size_t copy;
940 int rc;
941
942 if (msg->flags & IUCV_IPRMDATA) {
943 /*
944 * Message is 8 bytes long and has been stored to the
945 * message descriptor itself.
946 */
947 rc = (size < 8) ? 5 : 0;
948 if (residual)
949 *residual = abs(size - 8);
950 rmmsg = msg->rmmsg;
951 if (flags & IUCV_IPBUFLST) {
952 /* Copy to struct iucv_array. */
953 size = (size < 8) ? size : 8;
954 for (array = buffer; size > 0; array++) {
955 copy = min_t(size_t, size, array->length);
956 memcpy((u8 *)(addr_t) array->address,
957 rmmsg, copy);
958 rmmsg += copy;
959 size -= copy;
960 }
961 } else {
962 /* Copy to direct buffer. */
963 memcpy(buffer, rmmsg, min_t(size_t, size, 8));
964 }
965 return 0;
966 }
967
968 local_bh_disable();
969 parm = percpu_ptr(iucv_param, smp_processor_id());
970 memset(parm, 0, sizeof(union iucv_param));
971 parm->db.ipbfadr1 = (u32)(addr_t) buffer;
972 parm->db.ipbfln1f = (u32) size;
973 parm->db.ipmsgid = msg->id;
974 parm->db.ippathid = path->pathid;
975 parm->db.iptrgcls = msg->class;
976 parm->db.ipflags1 = (flags | IUCV_IPFGPID |
977 IUCV_IPFGMID | IUCV_IPTRGCLS);
978 rc = iucv_call_b2f0(IUCV_RECEIVE, parm);
979 if (!rc || rc == 5) {
980 msg->flags = parm->db.ipflags1;
981 if (residual)
982 *residual = parm->db.ipbfln1f;
983 }
984 local_bh_enable();
985 return rc;
986}
987
988/**
989 * iucv_message_reject
990 * @path: address of iucv path structure
991 * @msg: address of iucv msg structure
992 *
993 * The reject function refuses a specified message. Between the time you
994 * are notified of a message and the time that you complete the message,
995 * the message may be rejected.
996 *
997 * Returns the result from the CP IUCV call.
998 */
999int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1000{
1001 union iucv_param *parm;
1002 int rc;
1003
1004 local_bh_disable();
1005 parm = percpu_ptr(iucv_param, smp_processor_id());
1006 memset(parm, 0, sizeof(union iucv_param));
1007 parm->db.ippathid = path->pathid;
1008 parm->db.ipmsgid = msg->id;
1009 parm->db.iptrgcls = msg->class;
1010 parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID);
1011 rc = iucv_call_b2f0(IUCV_REJECT, parm);
1012 local_bh_enable();
1013 return rc;
1014}
1015
1016/**
1017 * iucv_message_reply
1018 * @path: address of iucv path structure
1019 * @msg: address of iucv msg structure
1020 * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
1021 * @reply: address of reply data buffer or address of struct iucv_array
1022 * @size: length of reply data buffer
1023 *
1024 * This function responds to the two-way messages that you receive. You
1025 * must identify completely the message to which you wish to reply. ie,
1026 * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
1027 * the parameter list.
1028 *
1029 * Returns the result from the CP IUCV call.
1030 */
1031int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1032 u8 flags, void *reply, size_t size)
1033{
1034 union iucv_param *parm;
1035 int rc;
1036
1037 local_bh_disable();
1038 parm = percpu_ptr(iucv_param, smp_processor_id());
1039 memset(parm, 0, sizeof(union iucv_param));
1040 if (flags & IUCV_IPRMDATA) {
1041 parm->dpl.ippathid = path->pathid;
1042 parm->dpl.ipflags1 = flags;
1043 parm->dpl.ipmsgid = msg->id;
1044 parm->dpl.iptrgcls = msg->class;
1045 memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
1046 } else {
1047 parm->db.ipbfadr1 = (u32)(addr_t) reply;
1048 parm->db.ipbfln1f = (u32) size;
1049 parm->db.ippathid = path->pathid;
1050 parm->db.ipflags1 = flags;
1051 parm->db.ipmsgid = msg->id;
1052 parm->db.iptrgcls = msg->class;
1053 }
1054 rc = iucv_call_b2f0(IUCV_REPLY, parm);
1055 local_bh_enable();
1056 return rc;
1057}
1058
1059/**
1060 * iucv_message_send
1061 * @path: address of iucv path structure
1062 * @msg: address of iucv msg structure
1063 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
1064 * @srccls: source class of message
1065 * @buffer: address of send buffer or address of struct iucv_array
1066 * @size: length of send buffer
1067 *
1068 * This function transmits data to another application. Data to be
1069 * transmitted is in a buffer and this is a one-way message and the
1070 * receiver will not reply to the message.
1071 *
1072 * Returns the result from the CP IUCV call.
1073 */
1074int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1075 u8 flags, u32 srccls, void *buffer, size_t size)
1076{
1077 union iucv_param *parm;
1078 int rc;
1079
1080 local_bh_disable();
1081 parm = percpu_ptr(iucv_param, smp_processor_id());
1082 memset(parm, 0, sizeof(union iucv_param));
1083 if (flags & IUCV_IPRMDATA) {
1084 /* Message of 8 bytes can be placed into the parameter list. */
1085 parm->dpl.ippathid = path->pathid;
1086 parm->dpl.ipflags1 = flags | IUCV_IPNORPY;
1087 parm->dpl.iptrgcls = msg->class;
1088 parm->dpl.ipsrccls = srccls;
1089 parm->dpl.ipmsgtag = msg->tag;
1090 memcpy(parm->dpl.iprmmsg, buffer, 8);
1091 } else {
1092 parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1093 parm->db.ipbfln1f = (u32) size;
1094 parm->db.ippathid = path->pathid;
1095 parm->db.ipflags1 = flags | IUCV_IPNORPY;
1096 parm->db.iptrgcls = msg->class;
1097 parm->db.ipsrccls = srccls;
1098 parm->db.ipmsgtag = msg->tag;
1099 }
1100 rc = iucv_call_b2f0(IUCV_SEND, parm);
1101 if (!rc)
1102 msg->id = parm->db.ipmsgid;
1103 local_bh_enable();
1104 return rc;
1105}
1106
1107/**
1108 * iucv_message_send2way
1109 * @path: address of iucv path structure
1110 * @msg: address of iucv msg structure
1111 * @flags: how the message is sent and the reply is received
1112 * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST)
1113 * @srccls: source class of message
1114 * @buffer: address of send buffer or address of struct iucv_array
1115 * @size: length of send buffer
1116 * @ansbuf: address of answer buffer or address of struct iucv_array
1117 * @asize: size of reply buffer
1118 *
1119 * This function transmits data to another application. Data to be
1120 * transmitted is in a buffer. The receiver of the send is expected to
1121 * reply to the message and a buffer is provided into which IUCV moves
1122 * the reply to this message.
1123 *
1124 * Returns the result from the CP IUCV call.
1125 */
1126int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1127 u8 flags, u32 srccls, void *buffer, size_t size,
1128 void *answer, size_t asize, size_t *residual)
1129{
1130 union iucv_param *parm;
1131 int rc;
1132
1133 local_bh_disable();
1134 parm = percpu_ptr(iucv_param, smp_processor_id());
1135 memset(parm, 0, sizeof(union iucv_param));
1136 if (flags & IUCV_IPRMDATA) {
1137 parm->dpl.ippathid = path->pathid;
1138 parm->dpl.ipflags1 = path->flags; /* priority message */
1139 parm->dpl.iptrgcls = msg->class;
1140 parm->dpl.ipsrccls = srccls;
1141 parm->dpl.ipmsgtag = msg->tag;
1142 parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
1143 parm->dpl.ipbfln2f = (u32) asize;
1144 memcpy(parm->dpl.iprmmsg, buffer, 8);
1145 } else {
1146 parm->db.ippathid = path->pathid;
1147 parm->db.ipflags1 = path->flags; /* priority message */
1148 parm->db.iptrgcls = msg->class;
1149 parm->db.ipsrccls = srccls;
1150 parm->db.ipmsgtag = msg->tag;
1151 parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1152 parm->db.ipbfln1f = (u32) size;
1153 parm->db.ipbfadr2 = (u32)(addr_t) answer;
1154 parm->db.ipbfln2f = (u32) asize;
1155 }
1156 rc = iucv_call_b2f0(IUCV_SEND, parm);
1157 if (!rc)
1158 msg->id = parm->db.ipmsgid;
1159 local_bh_enable();
1160 return rc;
1161}
1162
1163/**
1164 * iucv_path_pending
1165 * @data: Pointer to external interrupt buffer
1166 *
1167 * Process connection pending work item. Called from tasklet while holding
1168 * iucv_table_lock.
1169 */
1170struct iucv_path_pending {
1171 u16 ippathid;
1172 u8 ipflags1;
1173 u8 iptype;
1174 u16 ipmsglim;
1175 u16 res1;
1176 u8 ipvmid[8];
1177 u8 ipuser[16];
1178 u32 res3;
1179 u8 ippollfg;
1180 u8 res4[3];
1181} __attribute__ ((packed));
1182
1183static void iucv_path_pending(struct iucv_irq_data *data)
1184{
1185 struct iucv_path_pending *ipp = (void *) data;
1186 struct iucv_handler *handler;
1187 struct iucv_path *path;
1188 char *error;
1189
1190 BUG_ON(iucv_path_table[ipp->ippathid]);
1191 /* New pathid, handler found. Create a new path struct. */
1192 error = iucv_error_no_memory;
1193 path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC);
1194 if (!path)
1195 goto out_sever;
1196 path->pathid = ipp->ippathid;
1197 iucv_path_table[path->pathid] = path;
1198 EBCASC(ipp->ipvmid, 8);
1199
1200 /* Call registered handler until one is found that wants the path. */
1201 list_for_each_entry(handler, &iucv_handler_list, list) {
1202 if (!handler->path_pending)
1203 continue;
1204 /*
1205 * Add path to handler to allow a call to iucv_path_sever
1206 * inside the path_pending function. If the handler returns
1207 * an error remove the path from the handler again.
1208 */
1209 list_add(&path->list, &handler->paths);
1210 path->handler = handler;
1211 if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser))
1212 return;
1213 list_del(&path->list);
1214 path->handler = NULL;
1215 }
1216 /* No handler wanted the path. */
1217 iucv_path_table[path->pathid] = NULL;
1218 iucv_path_free(path);
1219 error = iucv_error_no_listener;
1220out_sever:
1221 iucv_sever_pathid(ipp->ippathid, error);
1222}
1223
1224/**
1225 * iucv_path_complete
1226 * @data: Pointer to external interrupt buffer
1227 *
1228 * Process connection complete work item. Called from tasklet while holding
1229 * iucv_table_lock.
1230 */
1231struct iucv_path_complete {
1232 u16 ippathid;
1233 u8 ipflags1;
1234 u8 iptype;
1235 u16 ipmsglim;
1236 u16 res1;
1237 u8 res2[8];
1238 u8 ipuser[16];
1239 u32 res3;
1240 u8 ippollfg;
1241 u8 res4[3];
1242} __attribute__ ((packed));
1243
1244static void iucv_path_complete(struct iucv_irq_data *data)
1245{
1246 struct iucv_path_complete *ipc = (void *) data;
1247 struct iucv_path *path = iucv_path_table[ipc->ippathid];
1248
1249 BUG_ON(!path || !path->handler);
1250 if (path->handler->path_complete)
1251 path->handler->path_complete(path, ipc->ipuser);
1252}
1253
1254/**
1255 * iucv_path_severed
1256 * @data: Pointer to external interrupt buffer
1257 *
1258 * Process connection severed work item. Called from tasklet while holding
1259 * iucv_table_lock.
1260 */
1261struct iucv_path_severed {
1262 u16 ippathid;
1263 u8 res1;
1264 u8 iptype;
1265 u32 res2;
1266 u8 res3[8];
1267 u8 ipuser[16];
1268 u32 res4;
1269 u8 ippollfg;
1270 u8 res5[3];
1271} __attribute__ ((packed));
1272
1273static void iucv_path_severed(struct iucv_irq_data *data)
1274{
1275 struct iucv_path_severed *ips = (void *) data;
1276 struct iucv_path *path = iucv_path_table[ips->ippathid];
1277
1278 BUG_ON(!path || !path->handler);
1279 if (path->handler->path_severed)
1280 path->handler->path_severed(path, ips->ipuser);
1281 else {
1282 iucv_sever_pathid(path->pathid, NULL);
1283 iucv_path_table[path->pathid] = NULL;
1284 list_del_init(&path->list);
1285 iucv_cleanup_pathid(path->pathid);
1286 iucv_path_free(path);
1287 }
1288}
1289
1290/**
1291 * iucv_path_quiesced
1292 * @data: Pointer to external interrupt buffer
1293 *
1294 * Process connection quiesced work item. Called from tasklet while holding
1295 * iucv_table_lock.
1296 */
1297struct iucv_path_quiesced {
1298 u16 ippathid;
1299 u8 res1;
1300 u8 iptype;
1301 u32 res2;
1302 u8 res3[8];
1303 u8 ipuser[16];
1304 u32 res4;
1305 u8 ippollfg;
1306 u8 res5[3];
1307} __attribute__ ((packed));
1308
1309static void iucv_path_quiesced(struct iucv_irq_data *data)
1310{
1311 struct iucv_path_quiesced *ipq = (void *) data;
1312 struct iucv_path *path = iucv_path_table[ipq->ippathid];
1313
1314 BUG_ON(!path || !path->handler);
1315 if (path->handler->path_quiesced)
1316 path->handler->path_quiesced(path, ipq->ipuser);
1317}
1318
1319/**
1320 * iucv_path_resumed
1321 * @data: Pointer to external interrupt buffer
1322 *
1323 * Process connection resumed work item. Called from tasklet while holding
1324 * iucv_table_lock.
1325 */
1326struct iucv_path_resumed {
1327 u16 ippathid;
1328 u8 res1;
1329 u8 iptype;
1330 u32 res2;
1331 u8 res3[8];
1332 u8 ipuser[16];
1333 u32 res4;
1334 u8 ippollfg;
1335 u8 res5[3];
1336} __attribute__ ((packed));
1337
1338static void iucv_path_resumed(struct iucv_irq_data *data)
1339{
1340 struct iucv_path_resumed *ipr = (void *) data;
1341 struct iucv_path *path = iucv_path_table[ipr->ippathid];
1342
1343 BUG_ON(!path || !path->handler);
1344 if (path->handler->path_resumed)
1345 path->handler->path_resumed(path, ipr->ipuser);
1346}
1347
1348/**
1349 * iucv_message_complete
1350 * @data: Pointer to external interrupt buffer
1351 *
1352 * Process message complete work item. Called from tasklet while holding
1353 * iucv_table_lock.
1354 */
1355struct iucv_message_complete {
1356 u16 ippathid;
1357 u8 ipflags1;
1358 u8 iptype;
1359 u32 ipmsgid;
1360 u32 ipaudit;
1361 u8 iprmmsg[8];
1362 u32 ipsrccls;
1363 u32 ipmsgtag;
1364 u32 res;
1365 u32 ipbfln2f;
1366 u8 ippollfg;
1367 u8 res2[3];
1368} __attribute__ ((packed));
1369
1370static void iucv_message_complete(struct iucv_irq_data *data)
1371{
1372 struct iucv_message_complete *imc = (void *) data;
1373 struct iucv_path *path = iucv_path_table[imc->ippathid];
1374 struct iucv_message msg;
1375
1376 BUG_ON(!path || !path->handler);
1377 if (path->handler->message_complete) {
1378 msg.flags = imc->ipflags1;
1379 msg.id = imc->ipmsgid;
1380 msg.audit = imc->ipaudit;
1381 memcpy(msg.rmmsg, imc->iprmmsg, 8);
1382 msg.class = imc->ipsrccls;
1383 msg.tag = imc->ipmsgtag;
1384 msg.length = imc->ipbfln2f;
1385 path->handler->message_complete(path, &msg);
1386 }
1387}
1388
1389/**
1390 * iucv_message_pending
1391 * @data: Pointer to external interrupt buffer
1392 *
1393 * Process message pending work item. Called from tasklet while holding
1394 * iucv_table_lock.
1395 */
1396struct iucv_message_pending {
1397 u16 ippathid;
1398 u8 ipflags1;
1399 u8 iptype;
1400 u32 ipmsgid;
1401 u32 iptrgcls;
1402 union {
1403 u32 iprmmsg1_u32;
1404 u8 iprmmsg1[4];
1405 } ln1msg1;
1406 union {
1407 u32 ipbfln1f;
1408 u8 iprmmsg2[4];
1409 } ln1msg2;
1410 u32 res1[3];
1411 u32 ipbfln2f;
1412 u8 ippollfg;
1413 u8 res2[3];
1414} __attribute__ ((packed));
1415
1416static void iucv_message_pending(struct iucv_irq_data *data)
1417{
1418 struct iucv_message_pending *imp = (void *) data;
1419 struct iucv_path *path = iucv_path_table[imp->ippathid];
1420 struct iucv_message msg;
1421
1422 BUG_ON(!path || !path->handler);
1423 if (path->handler->message_pending) {
1424 msg.flags = imp->ipflags1;
1425 msg.id = imp->ipmsgid;
1426 msg.class = imp->iptrgcls;
1427 if (imp->ipflags1 & IUCV_IPRMDATA) {
1428 memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8);
1429 msg.length = 8;
1430 } else
1431 msg.length = imp->ln1msg2.ipbfln1f;
1432 msg.reply_size = imp->ipbfln2f;
1433 path->handler->message_pending(path, &msg);
1434 }
1435}
1436
1437/**
1438 * iucv_tasklet_handler:
1439 *
1440 * This tasklet loops over the queue of irq buffers created by
1441 * iucv_external_interrupt, calls the appropriate action handler
1442 * and then frees the buffer.
1443 */
1444static void iucv_tasklet_handler(unsigned long ignored)
1445{
1446 typedef void iucv_irq_fn(struct iucv_irq_data *);
1447 static iucv_irq_fn *irq_fn[] = {
1448 [0x01] = iucv_path_pending,
1449 [0x02] = iucv_path_complete,
1450 [0x03] = iucv_path_severed,
1451 [0x04] = iucv_path_quiesced,
1452 [0x05] = iucv_path_resumed,
1453 [0x06] = iucv_message_complete,
1454 [0x07] = iucv_message_complete,
1455 [0x08] = iucv_message_pending,
1456 [0x09] = iucv_message_pending,
1457 };
1458 struct iucv_work *p;
1459
1460 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1461 spin_lock(&iucv_table_lock);
1462 iucv_tasklet_cpu = smp_processor_id();
1463
1464 spin_lock_irq(&iucv_work_lock);
1465 while (!list_empty(&iucv_work_queue)) {
1466 p = list_entry(iucv_work_queue.next, struct iucv_work, list);
1467 list_del_init(&p->list);
1468 spin_unlock_irq(&iucv_work_lock);
1469 irq_fn[p->data.iptype](&p->data);
1470 kfree(p);
1471 spin_lock_irq(&iucv_work_lock);
1472 }
1473 spin_unlock_irq(&iucv_work_lock);
1474
1475 iucv_tasklet_cpu = -1;
1476 spin_unlock(&iucv_table_lock);
1477}
1478
1479/**
1480 * iucv_external_interrupt
1481 * @code: irq code
1482 *
1483 * Handles external interrupts coming in from CP.
1484 * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
1485 */
1486static void iucv_external_interrupt(u16 code)
1487{
1488 struct iucv_irq_data *p;
1489 struct iucv_work *work;
1490
1491 p = percpu_ptr(iucv_irq_data, smp_processor_id());
1492 if (p->ippathid >= iucv_max_pathid) {
1493 printk(KERN_WARNING "iucv_do_int: Got interrupt with "
1494 "pathid %d > max_connections (%ld)\n",
1495 p->ippathid, iucv_max_pathid - 1);
1496 iucv_sever_pathid(p->ippathid, iucv_error_no_listener);
1497 return;
1498 }
1499 if (p->iptype < 0x01 || p->iptype > 0x09) {
1500 printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n");
1501 return;
1502 }
1503 work = kmalloc(sizeof(struct iucv_work), GFP_ATOMIC);
1504 if (!work) {
1505 printk(KERN_WARNING "iucv_external_interrupt: out of memory\n");
1506 return;
1507 }
1508 memcpy(&work->data, p, sizeof(work->data));
1509 spin_lock(&iucv_work_lock);
1510 list_add_tail(&work->list, &iucv_work_queue);
1511 spin_unlock(&iucv_work_lock);
1512 tasklet_schedule(&iucv_tasklet);
1513}
1514
1515/**
1516 * iucv_init
1517 *
1518 * Allocates and initializes various data structures.
1519 */
1520static int iucv_init(void)
1521{
1522 int rc;
1523
1524 if (!MACHINE_IS_VM) {
1525 rc = -EPROTONOSUPPORT;
1526 goto out;
1527 }
1528 rc = iucv_query_maxconn();
1529 if (rc)
1530 goto out;
1531 rc = register_external_interrupt (0x4000, iucv_external_interrupt);
1532 if (rc)
1533 goto out;
1534 rc = bus_register(&iucv_bus);
1535 if (rc)
1536 goto out_int;
1537 iucv_root = s390_root_dev_register("iucv");
1538 if (IS_ERR(iucv_root)) {
1539 rc = PTR_ERR(iucv_root);
1540 goto out_bus;
1541 }
1542 /* Note: GFP_DMA used used to get memory below 2G */
1543 iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data),
1544 GFP_KERNEL|GFP_DMA);
1545 if (!iucv_irq_data) {
1546 rc = -ENOMEM;
1547 goto out_root;
1548 }
1549 /* Allocate parameter blocks. */
1550 iucv_param = percpu_alloc(sizeof(union iucv_param),
1551 GFP_KERNEL|GFP_DMA);
1552 if (!iucv_param) {
1553 rc = -ENOMEM;
1554 goto out_extint;
1555 }
1556 register_hotcpu_notifier(&iucv_cpu_notifier);
1557 ASCEBC(iucv_error_no_listener, 16);
1558 ASCEBC(iucv_error_no_memory, 16);
1559 ASCEBC(iucv_error_pathid, 16);
1560 iucv_available = 1;
1561 return 0;
1562
1563out_extint:
1564 percpu_free(iucv_irq_data);
1565out_root:
1566 s390_root_dev_unregister(iucv_root);
1567out_bus:
1568 bus_unregister(&iucv_bus);
1569out_int:
1570 unregister_external_interrupt(0x4000, iucv_external_interrupt);
1571out:
1572 return rc;
1573}
1574
1575/**
1576 * iucv_exit
1577 *
1578 * Frees everything allocated from iucv_init.
1579 */
1580static void iucv_exit(void)
1581{
1582 struct iucv_work *p, *n;
1583
1584 spin_lock_irq(&iucv_work_lock);
1585 list_for_each_entry_safe(p, n, &iucv_work_queue, list)
1586 kfree(p);
1587 spin_unlock_irq(&iucv_work_lock);
1588 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1589 percpu_free(iucv_param);
1590 percpu_free(iucv_irq_data);
1591 s390_root_dev_unregister(iucv_root);
1592 bus_unregister(&iucv_bus);
1593 unregister_external_interrupt(0x4000, iucv_external_interrupt);
1594}
1595
1596subsys_initcall(iucv_init);
1597module_exit(iucv_exit);
1598
1599/**
1600 * Export all public stuff
1601 */
1602EXPORT_SYMBOL (iucv_bus);
1603EXPORT_SYMBOL (iucv_root);
1604EXPORT_SYMBOL (iucv_register);
1605EXPORT_SYMBOL (iucv_unregister);
1606EXPORT_SYMBOL (iucv_path_accept);
1607EXPORT_SYMBOL (iucv_path_connect);
1608EXPORT_SYMBOL (iucv_path_quiesce);
1609EXPORT_SYMBOL (iucv_path_sever);
1610EXPORT_SYMBOL (iucv_message_purge);
1611EXPORT_SYMBOL (iucv_message_receive);
1612EXPORT_SYMBOL (iucv_message_reject);
1613EXPORT_SYMBOL (iucv_message_reply);
1614EXPORT_SYMBOL (iucv_message_send);
1615EXPORT_SYMBOL (iucv_message_send2way);
1616
1617MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
1618MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
1619MODULE_LICENSE("GPL");
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 5dd5094659a1..b4e444063d1f 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2345,6 +2345,196 @@ out:
2345 return err; 2345 return err;
2346} 2346}
2347 2347
2348#ifdef CONFIG_NET_KEY_MIGRATE
2349static int pfkey_sockaddr_pair_size(sa_family_t family)
2350{
2351 switch (family) {
2352 case AF_INET:
2353 return PFKEY_ALIGN8(sizeof(struct sockaddr_in) * 2);
2354#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2355 case AF_INET6:
2356 return PFKEY_ALIGN8(sizeof(struct sockaddr_in6) * 2);
2357#endif
2358 default:
2359 return 0;
2360 }
2361 /* NOTREACHED */
2362}
2363
2364static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq,
2365 xfrm_address_t *saddr, xfrm_address_t *daddr,
2366 u16 *family)
2367{
2368 struct sockaddr *sa = (struct sockaddr *)(rq + 1);
2369 if (rq->sadb_x_ipsecrequest_len <
2370 pfkey_sockaddr_pair_size(sa->sa_family))
2371 return -EINVAL;
2372
2373 switch (sa->sa_family) {
2374 case AF_INET:
2375 {
2376 struct sockaddr_in *sin;
2377 sin = (struct sockaddr_in *)sa;
2378 if ((sin+1)->sin_family != AF_INET)
2379 return -EINVAL;
2380 memcpy(&saddr->a4, &sin->sin_addr, sizeof(saddr->a4));
2381 sin++;
2382 memcpy(&daddr->a4, &sin->sin_addr, sizeof(daddr->a4));
2383 *family = AF_INET;
2384 break;
2385 }
2386#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2387 case AF_INET6:
2388 {
2389 struct sockaddr_in6 *sin6;
2390 sin6 = (struct sockaddr_in6 *)sa;
2391 if ((sin6+1)->sin6_family != AF_INET6)
2392 return -EINVAL;
2393 memcpy(&saddr->a6, &sin6->sin6_addr,
2394 sizeof(saddr->a6));
2395 sin6++;
2396 memcpy(&daddr->a6, &sin6->sin6_addr,
2397 sizeof(daddr->a6));
2398 *family = AF_INET6;
2399 break;
2400 }
2401#endif
2402 default:
2403 return -EINVAL;
2404 }
2405
2406 return 0;
2407}
2408
2409static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2410 struct xfrm_migrate *m)
2411{
2412 int err;
2413 struct sadb_x_ipsecrequest *rq2;
2414
2415 if (len <= sizeof(struct sadb_x_ipsecrequest) ||
2416 len < rq1->sadb_x_ipsecrequest_len)
2417 return -EINVAL;
2418
2419 /* old endoints */
2420 err = parse_sockaddr_pair(rq1, &m->old_saddr, &m->old_daddr,
2421 &m->old_family);
2422 if (err)
2423 return err;
2424
2425 rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
2426 len -= rq1->sadb_x_ipsecrequest_len;
2427
2428 if (len <= sizeof(struct sadb_x_ipsecrequest) ||
2429 len < rq2->sadb_x_ipsecrequest_len)
2430 return -EINVAL;
2431
2432 /* new endpoints */
2433 err = parse_sockaddr_pair(rq2, &m->new_saddr, &m->new_daddr,
2434 &m->new_family);
2435 if (err)
2436 return err;
2437
2438 if (rq1->sadb_x_ipsecrequest_proto != rq2->sadb_x_ipsecrequest_proto ||
2439 rq1->sadb_x_ipsecrequest_mode != rq2->sadb_x_ipsecrequest_mode ||
2440 rq1->sadb_x_ipsecrequest_reqid != rq2->sadb_x_ipsecrequest_reqid)
2441 return -EINVAL;
2442
2443 m->proto = rq1->sadb_x_ipsecrequest_proto;
2444 m->mode = rq1->sadb_x_ipsecrequest_mode - 1;
2445 m->reqid = rq1->sadb_x_ipsecrequest_reqid;
2446
2447 return ((int)(rq1->sadb_x_ipsecrequest_len +
2448 rq2->sadb_x_ipsecrequest_len));
2449}
2450
2451static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
2452 struct sadb_msg *hdr, void **ext_hdrs)
2453{
2454 int i, len, ret, err = -EINVAL;
2455 u8 dir;
2456 struct sadb_address *sa;
2457 struct sadb_x_policy *pol;
2458 struct sadb_x_ipsecrequest *rq;
2459 struct xfrm_selector sel;
2460 struct xfrm_migrate m[XFRM_MAX_DEPTH];
2461
2462 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
2463 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
2464 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
2465 err = -EINVAL;
2466 goto out;
2467 }
2468
2469 pol = ext_hdrs[SADB_X_EXT_POLICY - 1];
2470 if (!pol) {
2471 err = -EINVAL;
2472 goto out;
2473 }
2474
2475 if (pol->sadb_x_policy_dir >= IPSEC_DIR_MAX) {
2476 err = -EINVAL;
2477 goto out;
2478 }
2479
2480 dir = pol->sadb_x_policy_dir - 1;
2481 memset(&sel, 0, sizeof(sel));
2482
2483 /* set source address info of selector */
2484 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC - 1];
2485 sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr);
2486 sel.prefixlen_s = sa->sadb_address_prefixlen;
2487 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
2488 sel.sport = ((struct sockaddr_in *)(sa + 1))->sin_port;
2489 if (sel.sport)
2490 sel.sport_mask = ~0;
2491
2492 /* set destination address info of selector */
2493 sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1],
2494 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
2495 sel.prefixlen_d = sa->sadb_address_prefixlen;
2496 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
2497 sel.dport = ((struct sockaddr_in *)(sa + 1))->sin_port;
2498 if (sel.dport)
2499 sel.dport_mask = ~0;
2500
2501 rq = (struct sadb_x_ipsecrequest *)(pol + 1);
2502
2503 /* extract ipsecrequests */
2504 i = 0;
2505 len = pol->sadb_x_policy_len * 8 - sizeof(struct sadb_x_policy);
2506
2507 while (len > 0 && i < XFRM_MAX_DEPTH) {
2508 ret = ipsecrequests_to_migrate(rq, len, &m[i]);
2509 if (ret < 0) {
2510 err = ret;
2511 goto out;
2512 } else {
2513 rq = (struct sadb_x_ipsecrequest *)((u8 *)rq + ret);
2514 len -= ret;
2515 i++;
2516 }
2517 }
2518
2519 if (!i || len > 0) {
2520 err = -EINVAL;
2521 goto out;
2522 }
2523
2524 return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i);
2525
2526 out:
2527 return err;
2528}
2529#else
2530static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
2531 struct sadb_msg *hdr, void **ext_hdrs)
2532{
2533 return -ENOPROTOOPT;
2534}
2535#endif
2536
2537
2348static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 2538static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
2349{ 2539{
2350 unsigned int dir; 2540 unsigned int dir;
@@ -2473,6 +2663,7 @@ static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
2473 [SADB_X_SPDFLUSH] = pfkey_spdflush, 2663 [SADB_X_SPDFLUSH] = pfkey_spdflush,
2474 [SADB_X_SPDSETIDX] = pfkey_spdadd, 2664 [SADB_X_SPDSETIDX] = pfkey_spdadd,
2475 [SADB_X_SPDDELETE2] = pfkey_spdget, 2665 [SADB_X_SPDDELETE2] = pfkey_spdget,
2666 [SADB_X_MIGRATE] = pfkey_migrate,
2476}; 2667};
2477 2668
2478static int pfkey_process(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr) 2669static int pfkey_process(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr)
@@ -3118,6 +3309,236 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3118 return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL); 3309 return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL);
3119} 3310}
3120 3311
3312#ifdef CONFIG_NET_KEY_MIGRATE
3313static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
3314 struct xfrm_selector *sel)
3315{
3316 struct sadb_address *addr;
3317 struct sockaddr_in *sin;
3318#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3319 struct sockaddr_in6 *sin6;
3320#endif
3321 addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize);
3322 addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8;
3323 addr->sadb_address_exttype = type;
3324 addr->sadb_address_proto = sel->proto;
3325 addr->sadb_address_reserved = 0;
3326
3327 switch (type) {
3328 case SADB_EXT_ADDRESS_SRC:
3329 if (sel->family == AF_INET) {
3330 addr->sadb_address_prefixlen = sel->prefixlen_s;
3331 sin = (struct sockaddr_in *)(addr + 1);
3332 sin->sin_family = AF_INET;
3333 memcpy(&sin->sin_addr.s_addr, &sel->saddr,
3334 sizeof(sin->sin_addr.s_addr));
3335 sin->sin_port = 0;
3336 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3337 }
3338#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3339 else if (sel->family == AF_INET6) {
3340 addr->sadb_address_prefixlen = sel->prefixlen_s;
3341 sin6 = (struct sockaddr_in6 *)(addr + 1);
3342 sin6->sin6_family = AF_INET6;
3343 sin6->sin6_port = 0;
3344 sin6->sin6_flowinfo = 0;
3345 sin6->sin6_scope_id = 0;
3346 memcpy(&sin6->sin6_addr.s6_addr, &sel->saddr,
3347 sizeof(sin6->sin6_addr.s6_addr));
3348 }
3349#endif
3350 break;
3351 case SADB_EXT_ADDRESS_DST:
3352 if (sel->family == AF_INET) {
3353 addr->sadb_address_prefixlen = sel->prefixlen_d;
3354 sin = (struct sockaddr_in *)(addr + 1);
3355 sin->sin_family = AF_INET;
3356 memcpy(&sin->sin_addr.s_addr, &sel->daddr,
3357 sizeof(sin->sin_addr.s_addr));
3358 sin->sin_port = 0;
3359 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3360 }
3361#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3362 else if (sel->family == AF_INET6) {
3363 addr->sadb_address_prefixlen = sel->prefixlen_d;
3364 sin6 = (struct sockaddr_in6 *)(addr + 1);
3365 sin6->sin6_family = AF_INET6;
3366 sin6->sin6_port = 0;
3367 sin6->sin6_flowinfo = 0;
3368 sin6->sin6_scope_id = 0;
3369 memcpy(&sin6->sin6_addr.s6_addr, &sel->daddr,
3370 sizeof(sin6->sin6_addr.s6_addr));
3371 }
3372#endif
3373 break;
3374 default:
3375 return -EINVAL;
3376 }
3377
3378 return 0;
3379}
3380
3381static int set_ipsecrequest(struct sk_buff *skb,
3382 uint8_t proto, uint8_t mode, int level,
3383 uint32_t reqid, uint8_t family,
3384 xfrm_address_t *src, xfrm_address_t *dst)
3385{
3386 struct sadb_x_ipsecrequest *rq;
3387 struct sockaddr_in *sin;
3388#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3389 struct sockaddr_in6 *sin6;
3390#endif
3391 int size_req;
3392
3393 size_req = sizeof(struct sadb_x_ipsecrequest) +
3394 pfkey_sockaddr_pair_size(family);
3395
3396 rq = (struct sadb_x_ipsecrequest *)skb_put(skb, size_req);
3397 memset(rq, 0, size_req);
3398 rq->sadb_x_ipsecrequest_len = size_req;
3399 rq->sadb_x_ipsecrequest_proto = proto;
3400 rq->sadb_x_ipsecrequest_mode = mode;
3401 rq->sadb_x_ipsecrequest_level = level;
3402 rq->sadb_x_ipsecrequest_reqid = reqid;
3403
3404 switch (family) {
3405 case AF_INET:
3406 sin = (struct sockaddr_in *)(rq + 1);
3407 sin->sin_family = AF_INET;
3408 memcpy(&sin->sin_addr.s_addr, src,
3409 sizeof(sin->sin_addr.s_addr));
3410 sin++;
3411 sin->sin_family = AF_INET;
3412 memcpy(&sin->sin_addr.s_addr, dst,
3413 sizeof(sin->sin_addr.s_addr));
3414 break;
3415#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3416 case AF_INET6:
3417 sin6 = (struct sockaddr_in6 *)(rq + 1);
3418 sin6->sin6_family = AF_INET6;
3419 sin6->sin6_port = 0;
3420 sin6->sin6_flowinfo = 0;
3421 sin6->sin6_scope_id = 0;
3422 memcpy(&sin6->sin6_addr.s6_addr, src,
3423 sizeof(sin6->sin6_addr.s6_addr));
3424 sin6++;
3425 sin6->sin6_family = AF_INET6;
3426 sin6->sin6_port = 0;
3427 sin6->sin6_flowinfo = 0;
3428 sin6->sin6_scope_id = 0;
3429 memcpy(&sin6->sin6_addr.s6_addr, dst,
3430 sizeof(sin6->sin6_addr.s6_addr));
3431 break;
3432#endif
3433 default:
3434 return -EINVAL;
3435 }
3436
3437 return 0;
3438}
3439#endif
3440
3441#ifdef CONFIG_NET_KEY_MIGRATE
3442static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
3443 struct xfrm_migrate *m, int num_bundles)
3444{
3445 int i;
3446 int sasize_sel;
3447 int size = 0;
3448 int size_pol = 0;
3449 struct sk_buff *skb;
3450 struct sadb_msg *hdr;
3451 struct sadb_x_policy *pol;
3452 struct xfrm_migrate *mp;
3453
3454 if (type != XFRM_POLICY_TYPE_MAIN)
3455 return 0;
3456
3457 if (num_bundles <= 0 || num_bundles > XFRM_MAX_DEPTH)
3458 return -EINVAL;
3459
3460 /* selector */
3461 sasize_sel = pfkey_sockaddr_size(sel->family);
3462 if (!sasize_sel)
3463 return -EINVAL;
3464 size += (sizeof(struct sadb_address) + sasize_sel) * 2;
3465
3466 /* policy info */
3467 size_pol += sizeof(struct sadb_x_policy);
3468
3469 /* ipsecrequests */
3470 for (i = 0, mp = m; i < num_bundles; i++, mp++) {
3471 /* old locator pair */
3472 size_pol += sizeof(struct sadb_x_ipsecrequest) +
3473 pfkey_sockaddr_pair_size(mp->old_family);
3474 /* new locator pair */
3475 size_pol += sizeof(struct sadb_x_ipsecrequest) +
3476 pfkey_sockaddr_pair_size(mp->new_family);
3477 }
3478
3479 size += sizeof(struct sadb_msg) + size_pol;
3480
3481 /* alloc buffer */
3482 skb = alloc_skb(size, GFP_ATOMIC);
3483 if (skb == NULL)
3484 return -ENOMEM;
3485
3486 hdr = (struct sadb_msg *)skb_put(skb, sizeof(struct sadb_msg));
3487 hdr->sadb_msg_version = PF_KEY_V2;
3488 hdr->sadb_msg_type = SADB_X_MIGRATE;
3489 hdr->sadb_msg_satype = pfkey_proto2satype(m->proto);
3490 hdr->sadb_msg_len = size / 8;
3491 hdr->sadb_msg_errno = 0;
3492 hdr->sadb_msg_reserved = 0;
3493 hdr->sadb_msg_seq = 0;
3494 hdr->sadb_msg_pid = 0;
3495
3496 /* selector src */
3497 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
3498
3499 /* selector dst */
3500 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_DST, sel);
3501
3502 /* policy information */
3503 pol = (struct sadb_x_policy *)skb_put(skb, sizeof(struct sadb_x_policy));
3504 pol->sadb_x_policy_len = size_pol / 8;
3505 pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
3506 pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
3507 pol->sadb_x_policy_dir = dir + 1;
3508 pol->sadb_x_policy_id = 0;
3509 pol->sadb_x_policy_priority = 0;
3510
3511 for (i = 0, mp = m; i < num_bundles; i++, mp++) {
3512 /* old ipsecrequest */
3513 if (set_ipsecrequest(skb, mp->proto, mp->mode + 1,
3514 (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE),
3515 mp->reqid, mp->old_family,
3516 &mp->old_saddr, &mp->old_daddr) < 0) {
3517 return -EINVAL;
3518 }
3519
3520 /* new ipsecrequest */
3521 if (set_ipsecrequest(skb, mp->proto, mp->mode + 1,
3522 (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE),
3523 mp->reqid, mp->new_family,
3524 &mp->new_saddr, &mp->new_daddr) < 0) {
3525 return -EINVAL;
3526 }
3527 }
3528
3529 /* broadcast migrate message to sockets */
3530 pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL);
3531
3532 return 0;
3533}
3534#else
3535static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
3536 struct xfrm_migrate *m, int num_bundles)
3537{
3538 return -ENOPROTOOPT;
3539}
3540#endif
3541
3121static int pfkey_sendmsg(struct kiocb *kiocb, 3542static int pfkey_sendmsg(struct kiocb *kiocb,
3122 struct socket *sock, struct msghdr *msg, size_t len) 3543 struct socket *sock, struct msghdr *msg, size_t len)
3123{ 3544{
@@ -3287,6 +3708,7 @@ static struct xfrm_mgr pfkeyv2_mgr =
3287 .compile_policy = pfkey_compile_policy, 3708 .compile_policy = pfkey_compile_policy,
3288 .new_mapping = pfkey_send_new_mapping, 3709 .new_mapping = pfkey_send_new_mapping,
3289 .notify_policy = pfkey_send_policy_notify, 3710 .notify_policy = pfkey_send_policy_notify,
3711 .migrate = pfkey_send_migrate,
3290}; 3712};
3291 3713
3292static void __exit ipsec_pfkey_exit(void) 3714static void __exit ipsec_pfkey_exit(void)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 80107d4909c5..748f7f00909a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -235,6 +235,19 @@ config NF_CONNTRACK_PPTP
235 235
236 To compile it as a module, choose M here. If unsure, say N. 236 To compile it as a module, choose M here. If unsure, say N.
237 237
238config NF_CONNTRACK_SANE
239 tristate "SANE protocol support (EXPERIMENTAL)"
240 depends on EXPERIMENTAL && NF_CONNTRACK
241 help
242 SANE is a protocol for remote access to scanners as implemented
243 by the 'saned' daemon. Like FTP, it uses separate control and
244 data connections.
245
246 With this module you can support SANE on a connection tracking
247 firewall.
248
249 To compile it as a module, choose M here. If unsure, say N.
250
238config NF_CONNTRACK_SIP 251config NF_CONNTRACK_SIP
239 tristate "SIP protocol support (EXPERIMENTAL)" 252 tristate "SIP protocol support (EXPERIMENTAL)"
240 depends on EXPERIMENTAL && NF_CONNTRACK 253 depends on EXPERIMENTAL && NF_CONNTRACK
@@ -382,6 +395,32 @@ config NETFILTER_XT_TARGET_CONNSECMARK
382 395
383 To compile it as a module, choose M here. If unsure, say N. 396 To compile it as a module, choose M here. If unsure, say N.
384 397
398config NETFILTER_XT_TARGET_TCPMSS
399 tristate '"TCPMSS" target support'
400 depends on NETFILTER_XTABLES && (IPV6 || IPV6=n)
401 ---help---
402 This option adds a `TCPMSS' target, which allows you to alter the
403 MSS value of TCP SYN packets, to control the maximum size for that
404 connection (usually limiting it to your outgoing interface's MTU
405 minus 40).
406
407 This is used to overcome criminally braindead ISPs or servers which
408 block ICMP Fragmentation Needed packets. The symptoms of this
409 problem are that everything works fine from your Linux
410 firewall/router, but machines behind it can never exchange large
411 packets:
412 1) Web browsers connect, then hang with no data received.
413 2) Small mail works fine, but large emails hang.
414 3) ssh works fine, but scp hangs after initial handshaking.
415
416 Workaround: activate this option and add a rule to your firewall
417 configuration like:
418
419 iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN \
420 -j TCPMSS --clamp-mss-to-pmtu
421
422 To compile it as a module, choose M here. If unsure, say N.
423
385config NETFILTER_XT_MATCH_COMMENT 424config NETFILTER_XT_MATCH_COMMENT
386 tristate '"comment" match support' 425 tristate '"comment" match support'
387 depends on NETFILTER_XTABLES 426 depends on NETFILTER_XTABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 5dc5574f7e99..b2b5c7566b26 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o
29obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o 29obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o
30obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o 30obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o
31obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o 31obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
32obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
32obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o 33obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
33obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o 34obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
34 35
@@ -44,6 +45,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
44obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o 45obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
45obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o 46obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
46obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o 47obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
48obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o 49obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
48 50
49# matches 51# matches
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 626b0011dd89..6fccdcf43e08 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -60,12 +60,9 @@ static DEFINE_RWLOCK(tcp_lock);
60 If it's non-zero, we mark only out of window RST segments as INVALID. */ 60 If it's non-zero, we mark only out of window RST segments as INVALID. */
61int nf_ct_tcp_be_liberal __read_mostly = 0; 61int nf_ct_tcp_be_liberal __read_mostly = 0;
62 62
63/* When connection is picked up from the middle, how many packets are required 63/* If it is set to zero, we disable picking up already established
64 to pass in each direction when we assume we are in sync - if any side uses
65 window scaling, we lost the game.
66 If it is set to zero, we disable picking up already established
67 connections. */ 64 connections. */
68int nf_ct_tcp_loose __read_mostly = 3; 65int nf_ct_tcp_loose __read_mostly = 1;
69 66
70/* Max number of the retransmitted packets without receiving an (acceptable) 67/* Max number of the retransmitted packets without receiving an (acceptable)
71 ACK from the destination. If this number is reached, a shorter timer 68 ACK from the destination. If this number is reached, a shorter timer
@@ -650,11 +647,10 @@ static int tcp_in_window(struct ip_ct_tcp *state,
650 before(sack, receiver->td_end + 1), 647 before(sack, receiver->td_end + 1),
651 after(ack, receiver->td_end - MAXACKWINDOW(sender))); 648 after(ack, receiver->td_end - MAXACKWINDOW(sender)));
652 649
653 if (sender->loose || receiver->loose || 650 if (before(seq, sender->td_maxend + 1) &&
654 (before(seq, sender->td_maxend + 1) && 651 after(end, sender->td_end - receiver->td_maxwin - 1) &&
655 after(end, sender->td_end - receiver->td_maxwin - 1) && 652 before(sack, receiver->td_end + 1) &&
656 before(sack, receiver->td_end + 1) && 653 after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
657 after(ack, receiver->td_end - MAXACKWINDOW(sender)))) {
658 /* 654 /*
659 * Take into account window scaling (RFC 1323). 655 * Take into account window scaling (RFC 1323).
660 */ 656 */
@@ -699,15 +695,13 @@ static int tcp_in_window(struct ip_ct_tcp *state,
699 state->retrans = 0; 695 state->retrans = 0;
700 } 696 }
701 } 697 }
702 /*
703 * Close the window of disabled window tracking :-)
704 */
705 if (sender->loose)
706 sender->loose--;
707
708 res = 1; 698 res = 1;
709 } else { 699 } else {
710 if (LOG_INVALID(IPPROTO_TCP)) 700 res = 0;
701 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
702 nf_ct_tcp_be_liberal)
703 res = 1;
704 if (!res && LOG_INVALID(IPPROTO_TCP))
711 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 705 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
712 "nf_ct_tcp: %s ", 706 "nf_ct_tcp: %s ",
713 before(seq, sender->td_maxend + 1) ? 707 before(seq, sender->td_maxend + 1) ?
@@ -718,8 +712,6 @@ static int tcp_in_window(struct ip_ct_tcp *state,
718 : "ACK is over the upper bound (ACKed data not seen yet)" 712 : "ACK is over the upper bound (ACKed data not seen yet)"
719 : "SEQ is under the lower bound (already ACKed data retransmitted)" 713 : "SEQ is under the lower bound (already ACKed data retransmitted)"
720 : "SEQ is over the upper bound (over the window of the receiver)"); 714 : "SEQ is over the upper bound (over the window of the receiver)");
721
722 res = nf_ct_tcp_be_liberal;
723 } 715 }
724 716
725 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " 717 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
@@ -1063,8 +1055,6 @@ static int tcp_new(struct nf_conn *conntrack,
1063 1055
1064 tcp_options(skb, dataoff, th, &conntrack->proto.tcp.seen[0]); 1056 tcp_options(skb, dataoff, th, &conntrack->proto.tcp.seen[0]);
1065 conntrack->proto.tcp.seen[1].flags = 0; 1057 conntrack->proto.tcp.seen[1].flags = 0;
1066 conntrack->proto.tcp.seen[0].loose =
1067 conntrack->proto.tcp.seen[1].loose = 0;
1068 } else if (nf_ct_tcp_loose == 0) { 1058 } else if (nf_ct_tcp_loose == 0) {
1069 /* Don't try to pick up connections. */ 1059 /* Don't try to pick up connections. */
1070 return 0; 1060 return 0;
@@ -1085,11 +1075,11 @@ static int tcp_new(struct nf_conn *conntrack,
1085 conntrack->proto.tcp.seen[0].td_maxwin; 1075 conntrack->proto.tcp.seen[0].td_maxwin;
1086 conntrack->proto.tcp.seen[0].td_scale = 0; 1076 conntrack->proto.tcp.seen[0].td_scale = 0;
1087 1077
1088 /* We assume SACK. Should we assume window scaling too? */ 1078 /* We assume SACK and liberal window checking to handle
1079 * window scaling */
1089 conntrack->proto.tcp.seen[0].flags = 1080 conntrack->proto.tcp.seen[0].flags =
1090 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM; 1081 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
1091 conntrack->proto.tcp.seen[0].loose = 1082 IP_CT_TCP_FLAG_BE_LIBERAL;
1092 conntrack->proto.tcp.seen[1].loose = nf_ct_tcp_loose;
1093 } 1083 }
1094 1084
1095 conntrack->proto.tcp.seen[1].td_end = 0; 1085 conntrack->proto.tcp.seen[1].td_end = 0;
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
new file mode 100644
index 000000000000..eb2d1dc46d45
--- /dev/null
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -0,0 +1,242 @@
1/* SANE connection tracking helper
2 * (SANE = Scanner Access Now Easy)
3 * For documentation about the SANE network protocol see
4 * http://www.sane-project.org/html/doc015.html
5 */
6
7/* Copyright (C) 2007 Red Hat, Inc.
8 * Author: Michal Schmidt <mschmidt@redhat.com>
9 * Based on the FTP conntrack helper (net/netfilter/nf_conntrack_ftp.c):
10 * (C) 1999-2001 Paul `Rusty' Russell
11 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
12 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
13 * (C) 2003 Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/netfilter.h>
23#include <linux/in.h>
24#include <linux/tcp.h>
25#include <net/netfilter/nf_conntrack.h>
26#include <net/netfilter/nf_conntrack_helper.h>
27#include <net/netfilter/nf_conntrack_expect.h>
28#include <linux/netfilter/nf_conntrack_sane.h>
29
30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Michal Schmidt <mschmidt@redhat.com>");
32MODULE_DESCRIPTION("SANE connection tracking helper");
33
34static char *sane_buffer;
35
36static DEFINE_SPINLOCK(nf_sane_lock);
37
38#define MAX_PORTS 8
39static u_int16_t ports[MAX_PORTS];
40static unsigned int ports_c;
41module_param_array(ports, ushort, &ports_c, 0400);
42
43#if 0
44#define DEBUGP printk
45#else
46#define DEBUGP(format, args...)
47#endif
48
49struct sane_request {
50 __be32 RPC_code;
51#define SANE_NET_START 7 /* RPC code */
52
53 __be32 handle;
54};
55
56struct sane_reply_net_start {
57 __be32 status;
58#define SANE_STATUS_SUCCESS 0
59
60 __be16 zero;
61 __be16 port;
62 /* other fields aren't interesting for conntrack */
63};
64
65static int help(struct sk_buff **pskb,
66 unsigned int protoff,
67 struct nf_conn *ct,
68 enum ip_conntrack_info ctinfo)
69{
70 unsigned int dataoff, datalen;
71 struct tcphdr _tcph, *th;
72 char *sb_ptr;
73 int ret = NF_ACCEPT;
74 int dir = CTINFO2DIR(ctinfo);
75 struct nf_ct_sane_master *ct_sane_info;
76 struct nf_conntrack_expect *exp;
77 struct nf_conntrack_tuple *tuple;
78 struct sane_request *req;
79 struct sane_reply_net_start *reply;
80 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
81
82 ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
83 /* Until there's been traffic both ways, don't look in packets. */
84 if (ctinfo != IP_CT_ESTABLISHED &&
85 ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY)
86 return NF_ACCEPT;
87
88 /* Not a full tcp header? */
89 th = skb_header_pointer(*pskb, protoff, sizeof(_tcph), &_tcph);
90 if (th == NULL)
91 return NF_ACCEPT;
92
93 /* No data? */
94 dataoff = protoff + th->doff * 4;
95 if (dataoff >= (*pskb)->len)
96 return NF_ACCEPT;
97
98 datalen = (*pskb)->len - dataoff;
99
100 spin_lock_bh(&nf_sane_lock);
101 sb_ptr = skb_header_pointer(*pskb, dataoff, datalen, sane_buffer);
102 BUG_ON(sb_ptr == NULL);
103
104 if (dir == IP_CT_DIR_ORIGINAL) {
105 if (datalen != sizeof(struct sane_request))
106 goto out;
107
108 req = (struct sane_request *)sb_ptr;
109 if (req->RPC_code != htonl(SANE_NET_START)) {
110 /* Not an interesting command */
111 ct_sane_info->state = SANE_STATE_NORMAL;
112 goto out;
113 }
114
115 /* We're interested in the next reply */
116 ct_sane_info->state = SANE_STATE_START_REQUESTED;
117 goto out;
118 }
119
120 /* Is it a reply to an uninteresting command? */
121 if (ct_sane_info->state != SANE_STATE_START_REQUESTED)
122 goto out;
123
124 /* It's a reply to SANE_NET_START. */
125 ct_sane_info->state = SANE_STATE_NORMAL;
126
127 if (datalen < sizeof(struct sane_reply_net_start)) {
128 DEBUGP("nf_ct_sane: NET_START reply too short\n");
129 goto out;
130 }
131
132 reply = (struct sane_reply_net_start *)sb_ptr;
133 if (reply->status != htonl(SANE_STATUS_SUCCESS)) {
134 /* saned refused the command */
135 DEBUGP("nf_ct_sane: unsuccessful SANE_STATUS = %u\n",
136 ntohl(reply->status));
137 goto out;
138 }
139
140 /* Invalid saned reply? Ignore it. */
141 if (reply->zero != 0)
142 goto out;
143
144 exp = nf_conntrack_expect_alloc(ct);
145 if (exp == NULL) {
146 ret = NF_DROP;
147 goto out;
148 }
149
150 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
151 nf_conntrack_expect_init(exp, family,
152 &tuple->src.u3, &tuple->dst.u3,
153 IPPROTO_TCP,
154 NULL, &reply->port);
155
156 DEBUGP("nf_ct_sane: expect: ");
157 NF_CT_DUMP_TUPLE(&exp->tuple);
158 NF_CT_DUMP_TUPLE(&exp->mask);
159
160 /* Can't expect this? Best to drop packet now. */
161 if (nf_conntrack_expect_related(exp) != 0)
162 ret = NF_DROP;
163
164 nf_conntrack_expect_put(exp);
165
166out:
167 spin_unlock_bh(&nf_sane_lock);
168 return ret;
169}
170
171static struct nf_conntrack_helper sane[MAX_PORTS][2];
172static char sane_names[MAX_PORTS][2][sizeof("sane-65535")];
173
174/* don't make this __exit, since it's called from __init ! */
175static void nf_conntrack_sane_fini(void)
176{
177 int i, j;
178
179 for (i = 0; i < ports_c; i++) {
180 for (j = 0; j < 2; j++) {
181 DEBUGP("nf_ct_sane: unregistering helper for pf: %d "
182 "port: %d\n",
183 sane[i][j].tuple.src.l3num, ports[i]);
184 nf_conntrack_helper_unregister(&sane[i][j]);
185 }
186 }
187
188 kfree(sane_buffer);
189}
190
191static int __init nf_conntrack_sane_init(void)
192{
193 int i, j = -1, ret = 0;
194 char *tmpname;
195
196 sane_buffer = kmalloc(65536, GFP_KERNEL);
197 if (!sane_buffer)
198 return -ENOMEM;
199
200 if (ports_c == 0)
201 ports[ports_c++] = SANE_PORT;
202
203 /* FIXME should be configurable whether IPv4 and IPv6 connections
204 are tracked or not - YK */
205 for (i = 0; i < ports_c; i++) {
206 sane[i][0].tuple.src.l3num = PF_INET;
207 sane[i][1].tuple.src.l3num = PF_INET6;
208 for (j = 0; j < 2; j++) {
209 sane[i][j].tuple.src.u.tcp.port = htons(ports[i]);
210 sane[i][j].tuple.dst.protonum = IPPROTO_TCP;
211 sane[i][j].mask.src.u.tcp.port = 0xFFFF;
212 sane[i][j].mask.dst.protonum = 0xFF;
213 sane[i][j].max_expected = 1;
214 sane[i][j].timeout = 5 * 60; /* 5 Minutes */
215 sane[i][j].me = THIS_MODULE;
216 sane[i][j].help = help;
217 tmpname = &sane_names[i][j][0];
218 if (ports[i] == SANE_PORT)
219 sprintf(tmpname, "sane");
220 else
221 sprintf(tmpname, "sane-%d", ports[i]);
222 sane[i][j].name = tmpname;
223
224 DEBUGP("nf_ct_sane: registering helper for pf: %d "
225 "port: %d\n",
226 sane[i][j].tuple.src.l3num, ports[i]);
227 ret = nf_conntrack_helper_register(&sane[i][j]);
228 if (ret) {
229 printk(KERN_ERR "nf_ct_sane: failed to "
230 "register helper for pf: %d port: %d\n",
231 sane[i][j].tuple.src.l3num, ports[i]);
232 nf_conntrack_sane_fini();
233 return ret;
234 }
235 }
236 }
237
238 return 0;
239}
240
241module_init(nf_conntrack_sane_init);
242module_exit(nf_conntrack_sane_fini);
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index 50de965bb104..195e92990da7 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -33,9 +33,7 @@ target(struct sk_buff **pskb,
33{ 33{
34 const struct xt_classify_target_info *clinfo = targinfo; 34 const struct xt_classify_target_info *clinfo = targinfo;
35 35
36 if ((*pskb)->priority != clinfo->priority) 36 (*pskb)->priority = clinfo->priority;
37 (*pskb)->priority = clinfo->priority;
38
39 return XT_CONTINUE; 37 return XT_CONTINUE;
40} 38}
41 39
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
index 0534bfa65cce..795c058b16a5 100644
--- a/net/netfilter/xt_CONNMARK.c
+++ b/net/netfilter/xt_CONNMARK.c
@@ -61,7 +61,7 @@ target(struct sk_buff **pskb,
61#else 61#else
62 nf_conntrack_event_cache(IPCT_MARK, *pskb); 62 nf_conntrack_event_cache(IPCT_MARK, *pskb);
63#endif 63#endif
64 } 64 }
65 break; 65 break;
66 case XT_CONNMARK_SAVE: 66 case XT_CONNMARK_SAVE:
67 newmark = (*ctmark & ~markinfo->mask) | 67 newmark = (*ctmark & ~markinfo->mask) |
@@ -78,8 +78,7 @@ target(struct sk_buff **pskb,
78 case XT_CONNMARK_RESTORE: 78 case XT_CONNMARK_RESTORE:
79 mark = (*pskb)->mark; 79 mark = (*pskb)->mark;
80 diff = (*ctmark ^ mark) & markinfo->mask; 80 diff = (*ctmark ^ mark) & markinfo->mask;
81 if (diff != 0) 81 (*pskb)->mark = mark ^ diff;
82 (*pskb)->mark = mark ^ diff;
83 break; 82 break;
84 } 83 }
85 } 84 }
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index a3fe3c334b09..1ab0db641f96 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -41,8 +41,7 @@ static void secmark_save(struct sk_buff *skb)
41 41
42 connsecmark = nf_ct_get_secmark(skb, &ctinfo); 42 connsecmark = nf_ct_get_secmark(skb, &ctinfo);
43 if (connsecmark && !*connsecmark) 43 if (connsecmark && !*connsecmark)
44 if (*connsecmark != skb->secmark) 44 *connsecmark = skb->secmark;
45 *connsecmark = skb->secmark;
46 } 45 }
47} 46}
48 47
@@ -58,8 +57,7 @@ static void secmark_restore(struct sk_buff *skb)
58 57
59 connsecmark = nf_ct_get_secmark(skb, &ctinfo); 58 connsecmark = nf_ct_get_secmark(skb, &ctinfo);
60 if (connsecmark && *connsecmark) 59 if (connsecmark && *connsecmark)
61 if (skb->secmark != *connsecmark) 60 skb->secmark = *connsecmark;
62 skb->secmark = *connsecmark;
63 } 61 }
64} 62}
65 63
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c
index 0b48547e8d64..cfc45af357d5 100644
--- a/net/netfilter/xt_MARK.c
+++ b/net/netfilter/xt_MARK.c
@@ -31,9 +31,7 @@ target_v0(struct sk_buff **pskb,
31{ 31{
32 const struct xt_mark_target_info *markinfo = targinfo; 32 const struct xt_mark_target_info *markinfo = targinfo;
33 33
34 if((*pskb)->mark != markinfo->mark) 34 (*pskb)->mark = markinfo->mark;
35 (*pskb)->mark = markinfo->mark;
36
37 return XT_CONTINUE; 35 return XT_CONTINUE;
38} 36}
39 37
@@ -62,9 +60,7 @@ target_v1(struct sk_buff **pskb,
62 break; 60 break;
63 } 61 }
64 62
65 if((*pskb)->mark != mark) 63 (*pskb)->mark = mark;
66 (*pskb)->mark = mark;
67
68 return XT_CONTINUE; 64 return XT_CONTINUE;
69} 65}
70 66
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index add752196290..f1131c3a9db5 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -47,9 +47,7 @@ static unsigned int target(struct sk_buff **pskb, const struct net_device *in,
47 BUG(); 47 BUG();
48 } 48 }
49 49
50 if ((*pskb)->secmark != secmark) 50 (*pskb)->secmark = secmark;
51 (*pskb)->secmark = secmark;
52
53 return XT_CONTINUE; 51 return XT_CONTINUE;
54} 52}
55 53
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
new file mode 100644
index 000000000000..db7e38c08de2
--- /dev/null
+++ b/net/netfilter/xt_TCPMSS.c
@@ -0,0 +1,296 @@
1/*
2 * This is a module which is used for setting the MSS option in TCP packets.
3 *
4 * Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/ip.h>
14#include <linux/ipv6.h>
15#include <linux/tcp.h>
16#include <net/ipv6.h>
17#include <net/tcp.h>
18
19#include <linux/netfilter_ipv4/ip_tables.h>
20#include <linux/netfilter_ipv6/ip6_tables.h>
21#include <linux/netfilter/x_tables.h>
22#include <linux/netfilter/xt_tcpudp.h>
23#include <linux/netfilter/xt_TCPMSS.h>
24
25MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
27MODULE_DESCRIPTION("x_tables TCP MSS modification module");
28MODULE_ALIAS("ipt_TCPMSS");
29MODULE_ALIAS("ip6t_TCPMSS");
30
31static inline unsigned int
32optlen(const u_int8_t *opt, unsigned int offset)
33{
34 /* Beware zero-length options: make finite progress */
35 if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
36 return 1;
37 else
38 return opt[offset+1];
39}
40
41static int
42tcpmss_mangle_packet(struct sk_buff **pskb,
43 const struct xt_tcpmss_info *info,
44 unsigned int tcphoff,
45 unsigned int minlen)
46{
47 struct tcphdr *tcph;
48 unsigned int tcplen, i;
49 __be16 oldval;
50 u16 newmss;
51 u8 *opt;
52
53 if (!skb_make_writable(pskb, (*pskb)->len))
54 return -1;
55
56 tcplen = (*pskb)->len - tcphoff;
57 tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff);
58
59 /* Since it passed flags test in tcp match, we know it is is
60 not a fragment, and has data >= tcp header length. SYN
61 packets should not contain data: if they did, then we risk
62 running over MTU, sending Frag Needed and breaking things
63 badly. --RR */
64 if (tcplen != tcph->doff*4) {
65 if (net_ratelimit())
66 printk(KERN_ERR "xt_TCPMSS: bad length (%u bytes)\n",
67 (*pskb)->len);
68 return -1;
69 }
70
71 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
72 if (dst_mtu((*pskb)->dst) <= minlen) {
73 if (net_ratelimit())
74 printk(KERN_ERR "xt_TCPMSS: "
75 "unknown or invalid path-MTU (%u)\n",
76 dst_mtu((*pskb)->dst));
77 return -1;
78 }
79 newmss = dst_mtu((*pskb)->dst) - minlen;
80 } else
81 newmss = info->mss;
82
83 opt = (u_int8_t *)tcph;
84 for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
85 if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
86 opt[i+1] == TCPOLEN_MSS) {
87 u_int16_t oldmss;
88
89 oldmss = (opt[i+2] << 8) | opt[i+3];
90
91 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
92 oldmss <= newmss)
93 return 0;
94
95 opt[i+2] = (newmss & 0xff00) >> 8;
96 opt[i+3] = (newmss & 0x00ff);
97
98 nf_proto_csum_replace2(&tcph->check, *pskb,
99 htons(oldmss), htons(newmss), 0);
100 return 0;
101 }
102 }
103
104 /*
105 * MSS Option not found ?! add it..
106 */
107 if (skb_tailroom((*pskb)) < TCPOLEN_MSS) {
108 struct sk_buff *newskb;
109
110 newskb = skb_copy_expand(*pskb, skb_headroom(*pskb),
111 TCPOLEN_MSS, GFP_ATOMIC);
112 if (!newskb)
113 return -1;
114 kfree_skb(*pskb);
115 *pskb = newskb;
116 tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff);
117 }
118
119 skb_put((*pskb), TCPOLEN_MSS);
120
121 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
122 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
123
124 nf_proto_csum_replace2(&tcph->check, *pskb,
125 htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
126 opt[0] = TCPOPT_MSS;
127 opt[1] = TCPOLEN_MSS;
128 opt[2] = (newmss & 0xff00) >> 8;
129 opt[3] = (newmss & 0x00ff);
130
131 nf_proto_csum_replace4(&tcph->check, *pskb, 0, *((__be32 *)opt), 0);
132
133 oldval = ((__be16 *)tcph)[6];
134 tcph->doff += TCPOLEN_MSS/4;
135 nf_proto_csum_replace2(&tcph->check, *pskb,
136 oldval, ((__be16 *)tcph)[6], 0);
137 return TCPOLEN_MSS;
138}
139
140static unsigned int
141xt_tcpmss_target4(struct sk_buff **pskb,
142 const struct net_device *in,
143 const struct net_device *out,
144 unsigned int hooknum,
145 const struct xt_target *target,
146 const void *targinfo)
147{
148 struct iphdr *iph = (*pskb)->nh.iph;
149 __be16 newlen;
150 int ret;
151
152 ret = tcpmss_mangle_packet(pskb, targinfo, iph->ihl * 4,
153 sizeof(*iph) + sizeof(struct tcphdr));
154 if (ret < 0)
155 return NF_DROP;
156 if (ret > 0) {
157 iph = (*pskb)->nh.iph;
158 newlen = htons(ntohs(iph->tot_len) + ret);
159 nf_csum_replace2(&iph->check, iph->tot_len, newlen);
160 iph->tot_len = newlen;
161 }
162 return XT_CONTINUE;
163}
164
165#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
166static unsigned int
167xt_tcpmss_target6(struct sk_buff **pskb,
168 const struct net_device *in,
169 const struct net_device *out,
170 unsigned int hooknum,
171 const struct xt_target *target,
172 const void *targinfo)
173{
174 struct ipv6hdr *ipv6h = (*pskb)->nh.ipv6h;
175 u8 nexthdr;
176 int tcphoff;
177 int ret;
178
179 nexthdr = ipv6h->nexthdr;
180 tcphoff = ipv6_skip_exthdr(*pskb, sizeof(*ipv6h), &nexthdr);
181 if (tcphoff < 0) {
182 WARN_ON(1);
183 return NF_DROP;
184 }
185 ret = tcpmss_mangle_packet(pskb, targinfo, tcphoff,
186 sizeof(*ipv6h) + sizeof(struct tcphdr));
187 if (ret < 0)
188 return NF_DROP;
189 if (ret > 0) {
190 ipv6h = (*pskb)->nh.ipv6h;
191 ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret);
192 }
193 return XT_CONTINUE;
194}
195#endif
196
197#define TH_SYN 0x02
198
199/* Must specify -p tcp --syn */
200static inline int find_syn_match(const struct xt_entry_match *m)
201{
202 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
203
204 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
205 tcpinfo->flg_cmp & TH_SYN &&
206 !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
207 return 1;
208
209 return 0;
210}
211
212static int
213xt_tcpmss_checkentry4(const char *tablename,
214 const void *entry,
215 const struct xt_target *target,
216 void *targinfo,
217 unsigned int hook_mask)
218{
219 const struct xt_tcpmss_info *info = targinfo;
220 const struct ipt_entry *e = entry;
221
222 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
223 (hook_mask & ~((1 << NF_IP_FORWARD) |
224 (1 << NF_IP_LOCAL_OUT) |
225 (1 << NF_IP_POST_ROUTING))) != 0) {
226 printk("xt_TCPMSS: path-MTU clamping only supported in "
227 "FORWARD, OUTPUT and POSTROUTING hooks\n");
228 return 0;
229 }
230 if (IPT_MATCH_ITERATE(e, find_syn_match))
231 return 1;
232 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
233 return 0;
234}
235
236#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
237static int
238xt_tcpmss_checkentry6(const char *tablename,
239 const void *entry,
240 const struct xt_target *target,
241 void *targinfo,
242 unsigned int hook_mask)
243{
244 const struct xt_tcpmss_info *info = targinfo;
245 const struct ip6t_entry *e = entry;
246
247 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
248 (hook_mask & ~((1 << NF_IP6_FORWARD) |
249 (1 << NF_IP6_LOCAL_OUT) |
250 (1 << NF_IP6_POST_ROUTING))) != 0) {
251 printk("xt_TCPMSS: path-MTU clamping only supported in "
252 "FORWARD, OUTPUT and POSTROUTING hooks\n");
253 return 0;
254 }
255 if (IP6T_MATCH_ITERATE(e, find_syn_match))
256 return 1;
257 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
258 return 0;
259}
260#endif
261
262static struct xt_target xt_tcpmss_reg[] = {
263 {
264 .family = AF_INET,
265 .name = "TCPMSS",
266 .checkentry = xt_tcpmss_checkentry4,
267 .target = xt_tcpmss_target4,
268 .targetsize = sizeof(struct xt_tcpmss_info),
269 .proto = IPPROTO_TCP,
270 .me = THIS_MODULE,
271 },
272#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
273 {
274 .family = AF_INET6,
275 .name = "TCPMSS",
276 .checkentry = xt_tcpmss_checkentry6,
277 .target = xt_tcpmss_target6,
278 .targetsize = sizeof(struct xt_tcpmss_info),
279 .proto = IPPROTO_TCP,
280 .me = THIS_MODULE,
281 },
282#endif
283};
284
285static int __init xt_tcpmss_init(void)
286{
287 return xt_register_targets(xt_tcpmss_reg, ARRAY_SIZE(xt_tcpmss_reg));
288}
289
290static void __exit xt_tcpmss_fini(void)
291{
292 xt_unregister_targets(xt_tcpmss_reg, ARRAY_SIZE(xt_tcpmss_reg));
293}
294
295module_init(xt_tcpmss_init);
296module_exit(xt_tcpmss_fini);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index f28bf69d3d42..bd1f7a2048d6 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -414,6 +414,7 @@ hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst,
414 switch (nexthdr) { 414 switch (nexthdr) {
415 case IPPROTO_TCP: 415 case IPPROTO_TCP:
416 case IPPROTO_UDP: 416 case IPPROTO_UDP:
417 case IPPROTO_UDPLITE:
417 case IPPROTO_SCTP: 418 case IPPROTO_SCTP:
418 case IPPROTO_DCCP: 419 case IPPROTO_DCCP:
419 ports = skb_header_pointer(skb, protoff, sizeof(_ports), 420 ports = skb_header_pointer(skb, protoff, sizeof(_ports),
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6dc01bdeb76b..a6fa48788e8f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -60,6 +60,7 @@
60#include <linux/netdevice.h> 60#include <linux/netdevice.h>
61#include <linux/if_packet.h> 61#include <linux/if_packet.h>
62#include <linux/wireless.h> 62#include <linux/wireless.h>
63#include <linux/kernel.h>
63#include <linux/kmod.h> 64#include <linux/kmod.h>
64#include <net/ip.h> 65#include <net/ip.h>
65#include <net/protocol.h> 66#include <net/protocol.h>
@@ -200,7 +201,8 @@ struct packet_sock {
200#endif 201#endif
201 struct packet_type prot_hook; 202 struct packet_type prot_hook;
202 spinlock_t bind_lock; 203 spinlock_t bind_lock;
203 char running; /* prot_hook is attached*/ 204 unsigned int running:1, /* prot_hook is attached*/
205 auxdata:1;
204 int ifindex; /* bound device */ 206 int ifindex; /* bound device */
205 __be16 num; 207 __be16 num;
206#ifdef CONFIG_PACKET_MULTICAST 208#ifdef CONFIG_PACKET_MULTICAST
@@ -214,6 +216,16 @@ struct packet_sock {
214#endif 216#endif
215}; 217};
216 218
219struct packet_skb_cb {
220 unsigned int origlen;
221 union {
222 struct sockaddr_pkt pkt;
223 struct sockaddr_ll ll;
224 } sa;
225};
226
227#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
228
217#ifdef CONFIG_PACKET_MMAP 229#ifdef CONFIG_PACKET_MMAP
218 230
219static inline char *packet_lookup_frame(struct packet_sock *po, unsigned int position) 231static inline char *packet_lookup_frame(struct packet_sock *po, unsigned int position)
@@ -293,7 +305,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
293 /* drop conntrack reference */ 305 /* drop conntrack reference */
294 nf_reset(skb); 306 nf_reset(skb);
295 307
296 spkt = (struct sockaddr_pkt*)skb->cb; 308 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
297 309
298 skb_push(skb, skb->data-skb->mac.raw); 310 skb_push(skb, skb->data-skb->mac.raw);
299 311
@@ -512,7 +524,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
512 skb = nskb; 524 skb = nskb;
513 } 525 }
514 526
515 sll = (struct sockaddr_ll*)skb->cb; 527 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
528 sizeof(skb->cb));
529
530 sll = &PACKET_SKB_CB(skb)->sa.ll;
516 sll->sll_family = AF_PACKET; 531 sll->sll_family = AF_PACKET;
517 sll->sll_hatype = dev->type; 532 sll->sll_hatype = dev->type;
518 sll->sll_protocol = skb->protocol; 533 sll->sll_protocol = skb->protocol;
@@ -523,6 +538,8 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
523 if (dev->hard_header_parse) 538 if (dev->hard_header_parse)
524 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr); 539 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
525 540
541 PACKET_SKB_CB(skb)->origlen = skb->len;
542
526 if (pskb_trim(skb, snaplen)) 543 if (pskb_trim(skb, snaplen))
527 goto drop_n_acct; 544 goto drop_n_acct;
528 545
@@ -582,11 +599,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
582 else if (skb->pkt_type == PACKET_OUTGOING) { 599 else if (skb->pkt_type == PACKET_OUTGOING) {
583 /* Special case: outgoing packets have ll header at head */ 600 /* Special case: outgoing packets have ll header at head */
584 skb_pull(skb, skb->nh.raw - skb->data); 601 skb_pull(skb, skb->nh.raw - skb->data);
585 if (skb->ip_summed == CHECKSUM_PARTIAL)
586 status |= TP_STATUS_CSUMNOTREADY;
587 } 602 }
588 } 603 }
589 604
605 if (skb->ip_summed == CHECKSUM_PARTIAL)
606 status |= TP_STATUS_CSUMNOTREADY;
607
590 snaplen = skb->len; 608 snaplen = skb->len;
591 609
592 res = run_filter(skb, sk, snaplen); 610 res = run_filter(skb, sk, snaplen);
@@ -1092,7 +1110,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1092 * it in now. 1110 * it in now.
1093 */ 1111 */
1094 1112
1095 sll = (struct sockaddr_ll*)skb->cb; 1113 sll = &PACKET_SKB_CB(skb)->sa.ll;
1096 if (sock->type == SOCK_PACKET) 1114 if (sock->type == SOCK_PACKET)
1097 msg->msg_namelen = sizeof(struct sockaddr_pkt); 1115 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1098 else 1116 else
@@ -1117,7 +1135,22 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1117 sock_recv_timestamp(msg, sk, skb); 1135 sock_recv_timestamp(msg, sk, skb);
1118 1136
1119 if (msg->msg_name) 1137 if (msg->msg_name)
1120 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1138 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1139 msg->msg_namelen);
1140
1141 if (pkt_sk(sk)->auxdata) {
1142 struct tpacket_auxdata aux;
1143
1144 aux.tp_status = TP_STATUS_USER;
1145 if (skb->ip_summed == CHECKSUM_PARTIAL)
1146 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1147 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1148 aux.tp_snaplen = skb->len;
1149 aux.tp_mac = 0;
1150 aux.tp_net = skb->nh.raw - skb->data;
1151
1152 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1153 }
1121 1154
1122 /* 1155 /*
1123 * Free or return the buffer as appropriate. Again this 1156 * Free or return the buffer as appropriate. Again this
@@ -1317,6 +1350,7 @@ static int
1317packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 1350packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1318{ 1351{
1319 struct sock *sk = sock->sk; 1352 struct sock *sk = sock->sk;
1353 struct packet_sock *po = pkt_sk(sk);
1320 int ret; 1354 int ret;
1321 1355
1322 if (level != SOL_PACKET) 1356 if (level != SOL_PACKET)
@@ -1369,6 +1403,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1369 return 0; 1403 return 0;
1370 } 1404 }
1371#endif 1405#endif
1406 case PACKET_AUXDATA:
1407 {
1408 int val;
1409
1410 if (optlen < sizeof(val))
1411 return -EINVAL;
1412 if (copy_from_user(&val, optval, sizeof(val)))
1413 return -EFAULT;
1414
1415 po->auxdata = !!val;
1416 return 0;
1417 }
1372 default: 1418 default:
1373 return -ENOPROTOOPT; 1419 return -ENOPROTOOPT;
1374 } 1420 }
@@ -1378,8 +1424,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1378 char __user *optval, int __user *optlen) 1424 char __user *optval, int __user *optlen)
1379{ 1425{
1380 int len; 1426 int len;
1427 int val;
1381 struct sock *sk = sock->sk; 1428 struct sock *sk = sock->sk;
1382 struct packet_sock *po = pkt_sk(sk); 1429 struct packet_sock *po = pkt_sk(sk);
1430 void *data;
1431 struct tpacket_stats st;
1383 1432
1384 if (level != SOL_PACKET) 1433 if (level != SOL_PACKET)
1385 return -ENOPROTOOPT; 1434 return -ENOPROTOOPT;
@@ -1392,9 +1441,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1392 1441
1393 switch(optname) { 1442 switch(optname) {
1394 case PACKET_STATISTICS: 1443 case PACKET_STATISTICS:
1395 {
1396 struct tpacket_stats st;
1397
1398 if (len > sizeof(struct tpacket_stats)) 1444 if (len > sizeof(struct tpacket_stats))
1399 len = sizeof(struct tpacket_stats); 1445 len = sizeof(struct tpacket_stats);
1400 spin_lock_bh(&sk->sk_receive_queue.lock); 1446 spin_lock_bh(&sk->sk_receive_queue.lock);
@@ -1403,16 +1449,23 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1403 spin_unlock_bh(&sk->sk_receive_queue.lock); 1449 spin_unlock_bh(&sk->sk_receive_queue.lock);
1404 st.tp_packets += st.tp_drops; 1450 st.tp_packets += st.tp_drops;
1405 1451
1406 if (copy_to_user(optval, &st, len)) 1452 data = &st;
1407 return -EFAULT; 1453 break;
1454 case PACKET_AUXDATA:
1455 if (len > sizeof(int))
1456 len = sizeof(int);
1457 val = po->auxdata;
1458
1459 data = &val;
1408 break; 1460 break;
1409 }
1410 default: 1461 default:
1411 return -ENOPROTOOPT; 1462 return -ENOPROTOOPT;
1412 } 1463 }
1413 1464
1414 if (put_user(len, optlen)) 1465 if (put_user(len, optlen))
1415 return -EFAULT; 1466 return -EFAULT;
1467 if (copy_to_user(optval, data, len))
1468 return -EFAULT;
1416 return 0; 1469 return 0;
1417} 1470}
1418 1471
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 01e69138578d..4c68c718f5ec 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -52,7 +52,7 @@ static struct tcf_hashinfo ipt_hash_info = {
52 52
53static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook) 53static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
54{ 54{
55 struct ipt_target *target; 55 struct xt_target *target;
56 int ret = 0; 56 int ret = 0;
57 57
58 target = xt_request_find_target(AF_INET, t->u.user.name, 58 target = xt_request_find_target(AF_INET, t->u.user.name,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index bc116bd6937c..3b6e6a780927 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -209,7 +209,7 @@ static void dev_watchdog(unsigned long arg)
209 dev->name); 209 dev->name);
210 dev->tx_timeout(dev); 210 dev->tx_timeout(dev);
211 } 211 }
212 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) 212 if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo)))
213 dev_hold(dev); 213 dev_hold(dev);
214 } 214 }
215 } 215 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2567b4c96c1e..000e043ebd62 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -372,6 +372,20 @@ static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *
372 return 0; 372 return 0;
373} 373}
374 374
375static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
376 struct gnet_dump *d)
377{
378 struct prio_sched_data *q = qdisc_priv(sch);
379 struct Qdisc *cl_q;
380
381 cl_q = q->queues[cl - 1];
382 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
383 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
384 return -1;
385
386 return 0;
387}
388
375static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 389static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
376{ 390{
377 struct prio_sched_data *q = qdisc_priv(sch); 391 struct prio_sched_data *q = qdisc_priv(sch);
@@ -414,6 +428,7 @@ static struct Qdisc_class_ops prio_class_ops = {
414 .bind_tcf = prio_bind, 428 .bind_tcf = prio_bind,
415 .unbind_tcf = prio_put, 429 .unbind_tcf = prio_put,
416 .dump = prio_dump_class, 430 .dump = prio_dump_class,
431 .dump_stats = prio_dump_class_stats,
417}; 432};
418 433
419static struct Qdisc_ops prio_qdisc_ops = { 434static struct Qdisc_ops prio_qdisc_ops = {
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 459cda258a5c..82844801e421 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -143,6 +143,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
143 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 143 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
144 (iph->protocol == IPPROTO_TCP || 144 (iph->protocol == IPPROTO_TCP ||
145 iph->protocol == IPPROTO_UDP || 145 iph->protocol == IPPROTO_UDP ||
146 iph->protocol == IPPROTO_UDPLITE ||
146 iph->protocol == IPPROTO_SCTP || 147 iph->protocol == IPPROTO_SCTP ||
147 iph->protocol == IPPROTO_DCCP || 148 iph->protocol == IPPROTO_DCCP ||
148 iph->protocol == IPPROTO_ESP)) 149 iph->protocol == IPPROTO_ESP))
@@ -156,6 +157,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
156 h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; 157 h2 = iph->saddr.s6_addr32[3]^iph->nexthdr;
157 if (iph->nexthdr == IPPROTO_TCP || 158 if (iph->nexthdr == IPPROTO_TCP ||
158 iph->nexthdr == IPPROTO_UDP || 159 iph->nexthdr == IPPROTO_UDP ||
160 iph->nexthdr == IPPROTO_UDPLITE ||
159 iph->nexthdr == IPPROTO_SCTP || 161 iph->nexthdr == IPPROTO_SCTP ||
160 iph->nexthdr == IPPROTO_DCCP || 162 iph->nexthdr == IPPROTO_DCCP ||
161 iph->nexthdr == IPPROTO_ESP) 163 iph->nexthdr == IPPROTO_ESP)
diff --git a/net/socket.c b/net/socket.c
index 4e396312f8d5..5f374e1ff526 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -407,24 +407,11 @@ int sock_map_fd(struct socket *sock)
407 407
408static struct socket *sock_from_file(struct file *file, int *err) 408static struct socket *sock_from_file(struct file *file, int *err)
409{ 409{
410 struct inode *inode;
411 struct socket *sock;
412
413 if (file->f_op == &socket_file_ops) 410 if (file->f_op == &socket_file_ops)
414 return file->private_data; /* set in sock_map_fd */ 411 return file->private_data; /* set in sock_map_fd */
415 412
416 inode = file->f_path.dentry->d_inode; 413 *err = -ENOTSOCK;
417 if (!S_ISSOCK(inode->i_mode)) { 414 return NULL;
418 *err = -ENOTSOCK;
419 return NULL;
420 }
421
422 sock = SOCKET_I(inode);
423 if (sock->file != file) {
424 printk(KERN_ERR "socki_lookup: socket file changed!\n");
425 sock->file = file;
426 }
427 return sock;
428} 415}
429 416
430/** 417/**
@@ -1527,8 +1514,9 @@ asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
1527 struct file *sock_file; 1514 struct file *sock_file;
1528 1515
1529 sock_file = fget_light(fd, &fput_needed); 1516 sock_file = fget_light(fd, &fput_needed);
1517 err = -EBADF;
1530 if (!sock_file) 1518 if (!sock_file)
1531 return -EBADF; 1519 goto out;
1532 1520
1533 sock = sock_from_file(sock_file, &err); 1521 sock = sock_from_file(sock_file, &err);
1534 if (!sock) 1522 if (!sock)
@@ -1555,6 +1543,7 @@ asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
1555 1543
1556out_put: 1544out_put:
1557 fput_light(sock_file, fput_needed); 1545 fput_light(sock_file, fput_needed);
1546out:
1558 return err; 1547 return err;
1559} 1548}
1560 1549
@@ -1586,12 +1575,13 @@ asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
1586 int fput_needed; 1575 int fput_needed;
1587 1576
1588 sock_file = fget_light(fd, &fput_needed); 1577 sock_file = fget_light(fd, &fput_needed);
1578 err = -EBADF;
1589 if (!sock_file) 1579 if (!sock_file)
1590 return -EBADF; 1580 goto out;
1591 1581
1592 sock = sock_from_file(sock_file, &err); 1582 sock = sock_from_file(sock_file, &err);
1593 if (!sock) 1583 if (!sock)
1594 goto out; 1584 goto out_put;
1595 1585
1596 msg.msg_control = NULL; 1586 msg.msg_control = NULL;
1597 msg.msg_controllen = 0; 1587 msg.msg_controllen = 0;
@@ -1610,8 +1600,9 @@ asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
1610 if (err2 < 0) 1600 if (err2 < 0)
1611 err = err2; 1601 err = err2;
1612 } 1602 }
1613out: 1603out_put:
1614 fput_light(sock_file, fput_needed); 1604 fput_light(sock_file, fput_needed);
1605out:
1615 return err; 1606 return err;
1616} 1607}
1617 1608
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 769cdd62c1bb..4d90a179aeda 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -86,8 +86,8 @@ static int wanrouter_device_del_if(struct wan_device *wandev,
86 86
87static struct wan_device *wanrouter_find_device(char *name); 87static struct wan_device *wanrouter_find_device(char *name);
88static int wanrouter_delete_interface(struct wan_device *wandev, char *name); 88static int wanrouter_delete_interface(struct wan_device *wandev, char *name);
89void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags); 89static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
90void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags); 90static void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
91 91
92 92
93 93
@@ -104,8 +104,8 @@ struct wan_device* wanrouter_router_devlist; /* list of registered devices */
104 * Organize Unique Identifiers for encapsulation/decapsulation 104 * Organize Unique Identifiers for encapsulation/decapsulation
105 */ 105 */
106 106
107static unsigned char wanrouter_oui_ether[] = { 0x00, 0x00, 0x00 };
108#if 0 107#if 0
108static unsigned char wanrouter_oui_ether[] = { 0x00, 0x00, 0x00 };
109static unsigned char wanrouter_oui_802_2[] = { 0x00, 0x80, 0xC2 }; 109static unsigned char wanrouter_oui_802_2[] = { 0x00, 0x80, 0xC2 };
110#endif 110#endif
111 111
@@ -246,6 +246,8 @@ int unregister_wan_device(char *name)
246 return 0; 246 return 0;
247} 247}
248 248
249#if 0
250
249/* 251/*
250 * Encapsulate packet. 252 * Encapsulate packet.
251 * 253 *
@@ -341,6 +343,7 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev)
341 return ethertype; 343 return ethertype;
342} 344}
343 345
346#endif /* 0 */
344 347
345/* 348/*
346 * WAN device IOCTL. 349 * WAN device IOCTL.
@@ -799,23 +802,19 @@ static int wanrouter_delete_interface(struct wan_device *wandev, char *name)
799 return 0; 802 return 0;
800} 803}
801 804
802void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags) 805static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
803{ 806{
804 spin_lock_irqsave(lock, *smp_flags); 807 spin_lock_irqsave(lock, *smp_flags);
805} 808}
806 809
807 810
808void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags) 811static void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
809{ 812{
810 spin_unlock_irqrestore(lock, *smp_flags); 813 spin_unlock_irqrestore(lock, *smp_flags);
811} 814}
812 815
813EXPORT_SYMBOL(register_wan_device); 816EXPORT_SYMBOL(register_wan_device);
814EXPORT_SYMBOL(unregister_wan_device); 817EXPORT_SYMBOL(unregister_wan_device);
815EXPORT_SYMBOL(wanrouter_encapsulate);
816EXPORT_SYMBOL(wanrouter_type_trans);
817EXPORT_SYMBOL(lock_adapter_irq);
818EXPORT_SYMBOL(unlock_adapter_irq);
819 818
820MODULE_LICENSE("GPL"); 819MODULE_LICENSE("GPL");
821 820
diff --git a/net/x25/Makefile b/net/x25/Makefile
index 587a71aa411d..a2c34ab6f194 100644
--- a/net/x25/Makefile
+++ b/net/x25/Makefile
@@ -6,5 +6,5 @@ obj-$(CONFIG_X25) += x25.o
6 6
7x25-y := af_x25.o x25_dev.o x25_facilities.o x25_in.o \ 7x25-y := af_x25.o x25_dev.o x25_facilities.o x25_in.o \
8 x25_link.o x25_out.o x25_route.o x25_subr.o \ 8 x25_link.o x25_out.o x25_route.o x25_subr.o \
9 x25_timer.o x25_proc.o 9 x25_timer.o x25_proc.o x25_forward.o
10x25-$(CONFIG_SYSCTL) += sysctl_net_x25.o 10x25-$(CONFIG_SYSCTL) += sysctl_net_x25.o
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index b5c80b189902..b37d894358ec 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -63,6 +63,7 @@ int sysctl_x25_call_request_timeout = X25_DEFAULT_T21;
63int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22; 63int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22;
64int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23; 64int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23;
65int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2; 65int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2;
66int sysctl_x25_forward = 0;
66 67
67HLIST_HEAD(x25_list); 68HLIST_HEAD(x25_list);
68DEFINE_RWLOCK(x25_list_lock); 69DEFINE_RWLOCK(x25_list_lock);
@@ -846,7 +847,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
846 struct x25_address source_addr, dest_addr; 847 struct x25_address source_addr, dest_addr;
847 struct x25_facilities facilities; 848 struct x25_facilities facilities;
848 struct x25_dte_facilities dte_facilities; 849 struct x25_dte_facilities dte_facilities;
849 int len, rc; 850 int len, addr_len, rc;
850 851
851 /* 852 /*
852 * Remove the LCI and frame type. 853 * Remove the LCI and frame type.
@@ -857,7 +858,8 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
857 * Extract the X.25 addresses and convert them to ASCII strings, 858 * Extract the X.25 addresses and convert them to ASCII strings,
858 * and remove them. 859 * and remove them.
859 */ 860 */
860 skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); 861 addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr);
862 skb_pull(skb, addr_len);
861 863
862 /* 864 /*
863 * Get the length of the facilities, skip past them for the moment 865 * Get the length of the facilities, skip past them for the moment
@@ -873,11 +875,28 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
873 sk = x25_find_listener(&source_addr,skb); 875 sk = x25_find_listener(&source_addr,skb);
874 skb_push(skb,len); 876 skb_push(skb,len);
875 877
878 if (sk != NULL && sk_acceptq_is_full(sk)) {
879 goto out_sock_put;
880 }
881
876 /* 882 /*
877 * We can't accept the Call Request. 883 * We dont have any listeners for this incoming call.
884 * Try forwarding it.
878 */ 885 */
879 if (sk == NULL || sk_acceptq_is_full(sk)) 886 if (sk == NULL) {
880 goto out_clear_request; 887 skb_push(skb, addr_len + X25_STD_MIN_LEN);
888 if (sysctl_x25_forward &&
889 x25_forward_call(&dest_addr, nb, skb, lci) > 0)
890 {
891 /* Call was forwarded, dont process it any more */
892 kfree_skb(skb);
893 rc = 1;
894 goto out;
895 } else {
896 /* No listeners, can't forward, clear the call */
897 goto out_clear_request;
898 }
899 }
881 900
882 /* 901 /*
883 * Try to reach a compromise on the requested facilities. 902 * Try to reach a compromise on the requested facilities.
@@ -1598,6 +1617,9 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
1598 x25_disconnect(s, ENETUNREACH, 0, 0); 1617 x25_disconnect(s, ENETUNREACH, 0, 0);
1599 1618
1600 write_unlock_bh(&x25_list_lock); 1619 write_unlock_bh(&x25_list_lock);
1620
1621 /* Remove any related forwards */
1622 x25_clear_forward_by_dev(nb->dev);
1601} 1623}
1602 1624
1603static int __init x25_init(void) 1625static int __init x25_init(void)
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index aabda59c824e..2b2e7fd689f3 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,6 +73,14 @@ static struct ctl_table x25_table[] = {
73 .extra1 = &min_timer, 73 .extra1 = &min_timer,
74 .extra2 = &max_timer, 74 .extra2 = &max_timer,
75 }, 75 },
76 {
77 .ctl_name = NET_X25_FORWARD,
78 .procname = "x25_forward",
79 .data = &sysctl_x25_forward,
80 .maxlen = sizeof(int),
81 .mode = 0644,
82 .proc_handler = &proc_dointvec,
83 },
76 { 0, }, 84 { 0, },
77}; 85};
78 86
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 328d80f000ad..f099fd6a7c0e 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -67,9 +67,18 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
67 return x25_rx_call_request(skb, nb, lci); 67 return x25_rx_call_request(skb, nb, lci);
68 68
69 /* 69 /*
70 * Its not a Call Request, nor is it a control frame. 70 * Its not a Call Request, nor is it a control frame.
71 * Let caller throw it away. 71 * Can we forward it?
72 */ 72 */
73
74 if (x25_forward_data(lci, nb, skb)) {
75 if (frametype == X25_CLEAR_CONFIRMATION) {
76 x25_clear_forward_by_lci(lci);
77 }
78 kfree_skb(skb);
79 return 1;
80 }
81
73/* 82/*
74 x25_transmit_clear_request(nb, lci, 0x0D); 83 x25_transmit_clear_request(nb, lci, 0x0D);
75*/ 84*/
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
new file mode 100644
index 000000000000..d339e0c810a8
--- /dev/null
+++ b/net/x25/x25_forward.c
@@ -0,0 +1,163 @@
1/*
2 * This module:
3 * This module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License
5 * as published by the Free Software Foundation; either version
6 * 2 of the License, or (at your option) any later version.
7 *
8 * History
9 * 03-01-2007 Added forwarding for x.25 Andrew Hendry
10 */
11#include <linux/if_arp.h>
12#include <linux/init.h>
13#include <net/x25.h>
14
15struct list_head x25_forward_list = LIST_HEAD_INIT(x25_forward_list);
16DEFINE_RWLOCK(x25_forward_list_lock);
17
18int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
19 struct sk_buff *skb, int lci)
20{
21 struct x25_route *rt;
22 struct x25_neigh *neigh_new = NULL;
23 struct list_head *entry;
24 struct x25_forward *x25_frwd, *new_frwd;
25 struct sk_buff *skbn;
26 short same_lci = 0;
27 int rc = 0;
28
29 if ((rt = x25_get_route(dest_addr)) != NULL) {
30
31 if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
32 /* This shouldnt happen, if it occurs somehow
33 * do something sensible
34 */
35 goto out_put_route;
36 }
37
38 /* Avoid a loop. This is the normal exit path for a
39 * system with only one x.25 iface and default route
40 */
41 if (rt->dev == from->dev) {
42 goto out_put_nb;
43 }
44
45 /* Remote end sending a call request on an already
46 * established LCI? It shouldnt happen, just in case..
47 */
48 read_lock_bh(&x25_forward_list_lock);
49 list_for_each(entry, &x25_forward_list) {
50 x25_frwd = list_entry(entry, struct x25_forward, node);
51 if (x25_frwd->lci == lci) {
52 printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
53 same_lci = 1;
54 }
55 }
56 read_unlock_bh(&x25_forward_list_lock);
57
58 /* Save the forwarding details for future traffic */
59 if (!same_lci){
60 if ((new_frwd = kmalloc(sizeof(struct x25_forward),
61 GFP_ATOMIC)) == NULL){
62 rc = -ENOMEM;
63 goto out_put_nb;
64 }
65 new_frwd->lci = lci;
66 new_frwd->dev1 = rt->dev;
67 new_frwd->dev2 = from->dev;
68 write_lock_bh(&x25_forward_list_lock);
69 list_add(&new_frwd->node, &x25_forward_list);
70 write_unlock_bh(&x25_forward_list_lock);
71 }
72
73 /* Forward the call request */
74 if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
75 goto out_put_nb;
76 }
77 x25_transmit_link(skbn, neigh_new);
78 rc = 1;
79 }
80
81
82out_put_nb:
83 x25_neigh_put(neigh_new);
84
85out_put_route:
86 x25_route_put(rt);
87 return rc;
88}
89
90
91int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
92
93 struct x25_forward *frwd;
94 struct list_head *entry;
95 struct net_device *peer = NULL;
96 struct x25_neigh *nb;
97 struct sk_buff *skbn;
98 int rc = 0;
99
100 read_lock_bh(&x25_forward_list_lock);
101 list_for_each(entry, &x25_forward_list) {
102 frwd = list_entry(entry, struct x25_forward, node);
103 if (frwd->lci == lci) {
104 /* The call is established, either side can send */
105 if (from->dev == frwd->dev1) {
106 peer = frwd->dev2;
107 } else {
108 peer = frwd->dev1;
109 }
110 break;
111 }
112 }
113 read_unlock_bh(&x25_forward_list_lock);
114
115 if ( (nb = x25_get_neigh(peer)) == NULL)
116 goto out;
117
118 if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
119 goto out;
120
121 }
122 x25_transmit_link(skbn, nb);
123
124 x25_neigh_put(nb);
125 rc = 1;
126out:
127 return rc;
128}
129
130void x25_clear_forward_by_lci(unsigned int lci)
131{
132 struct x25_forward *fwd;
133 struct list_head *entry, *tmp;
134
135 write_lock_bh(&x25_forward_list_lock);
136
137 list_for_each_safe(entry, tmp, &x25_forward_list) {
138 fwd = list_entry(entry, struct x25_forward, node);
139 if (fwd->lci == lci) {
140 list_del(&fwd->node);
141 kfree(fwd);
142 }
143 }
144 write_unlock_bh(&x25_forward_list_lock);
145}
146
147
148void x25_clear_forward_by_dev(struct net_device *dev)
149{
150 struct x25_forward *fwd;
151 struct list_head *entry, *tmp;
152
153 write_lock_bh(&x25_forward_list_lock);
154
155 list_for_each_safe(entry, tmp, &x25_forward_list) {
156 fwd = list_entry(entry, struct x25_forward, node);
157 if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
158 list_del(&fwd->node);
159 kfree(fwd);
160 }
161 }
162 write_unlock_bh(&x25_forward_list_lock);
163}
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index a11837d361d2..e0470bd8c2f9 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -165,6 +165,75 @@ out:
165 return 0; 165 return 0;
166} 166}
167 167
168static __inline__ struct x25_forward *x25_get_forward_idx(loff_t pos)
169{
170 struct x25_forward *f;
171 struct list_head *entry;
172
173 list_for_each(entry, &x25_forward_list) {
174 f = list_entry(entry, struct x25_forward, node);
175 if (!pos--)
176 goto found;
177 }
178
179 f = NULL;
180found:
181 return f;
182}
183
184static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
185{
186 loff_t l = *pos;
187
188 read_lock_bh(&x25_forward_list_lock);
189 return l ? x25_get_forward_idx(--l) : SEQ_START_TOKEN;
190}
191
192static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
193{
194 struct x25_forward *f;
195
196 ++*pos;
197 if (v == SEQ_START_TOKEN) {
198 f = NULL;
199 if (!list_empty(&x25_forward_list))
200 f = list_entry(x25_forward_list.next,
201 struct x25_forward, node);
202 goto out;
203 }
204 f = v;
205 if (f->node.next != &x25_forward_list)
206 f = list_entry(f->node.next, struct x25_forward, node);
207 else
208 f = NULL;
209out:
210 return f;
211
212}
213
214static void x25_seq_forward_stop(struct seq_file *seq, void *v)
215{
216 read_unlock_bh(&x25_forward_list_lock);
217}
218
219static int x25_seq_forward_show(struct seq_file *seq, void *v)
220{
221 struct x25_forward *f;
222
223 if (v == SEQ_START_TOKEN) {
224 seq_printf(seq, "lci dev1 dev2\n");
225 goto out;
226 }
227
228 f = v;
229
230 seq_printf(seq, "%d %-10s %-10s\n",
231 f->lci, f->dev1->name, f->dev2->name);
232
233out:
234 return 0;
235}
236
168static struct seq_operations x25_seq_route_ops = { 237static struct seq_operations x25_seq_route_ops = {
169 .start = x25_seq_route_start, 238 .start = x25_seq_route_start,
170 .next = x25_seq_route_next, 239 .next = x25_seq_route_next,
@@ -179,6 +248,13 @@ static struct seq_operations x25_seq_socket_ops = {
179 .show = x25_seq_socket_show, 248 .show = x25_seq_socket_show,
180}; 249};
181 250
251static struct seq_operations x25_seq_forward_ops = {
252 .start = x25_seq_forward_start,
253 .next = x25_seq_forward_next,
254 .stop = x25_seq_forward_stop,
255 .show = x25_seq_forward_show,
256};
257
182static int x25_seq_socket_open(struct inode *inode, struct file *file) 258static int x25_seq_socket_open(struct inode *inode, struct file *file)
183{ 259{
184 return seq_open(file, &x25_seq_socket_ops); 260 return seq_open(file, &x25_seq_socket_ops);
@@ -189,6 +265,11 @@ static int x25_seq_route_open(struct inode *inode, struct file *file)
189 return seq_open(file, &x25_seq_route_ops); 265 return seq_open(file, &x25_seq_route_ops);
190} 266}
191 267
268static int x25_seq_forward_open(struct inode *inode, struct file *file)
269{
270 return seq_open(file, &x25_seq_forward_ops);
271}
272
192static struct file_operations x25_seq_socket_fops = { 273static struct file_operations x25_seq_socket_fops = {
193 .owner = THIS_MODULE, 274 .owner = THIS_MODULE,
194 .open = x25_seq_socket_open, 275 .open = x25_seq_socket_open,
@@ -205,6 +286,14 @@ static struct file_operations x25_seq_route_fops = {
205 .release = seq_release, 286 .release = seq_release,
206}; 287};
207 288
289static struct file_operations x25_seq_forward_fops = {
290 .owner = THIS_MODULE,
291 .open = x25_seq_forward_open,
292 .read = seq_read,
293 .llseek = seq_lseek,
294 .release = seq_release,
295};
296
208static struct proc_dir_entry *x25_proc_dir; 297static struct proc_dir_entry *x25_proc_dir;
209 298
210int __init x25_proc_init(void) 299int __init x25_proc_init(void)
@@ -225,9 +314,17 @@ int __init x25_proc_init(void)
225 if (!p) 314 if (!p)
226 goto out_socket; 315 goto out_socket;
227 p->proc_fops = &x25_seq_socket_fops; 316 p->proc_fops = &x25_seq_socket_fops;
317
318 p = create_proc_entry("forward", S_IRUGO, x25_proc_dir);
319 if (!p)
320 goto out_forward;
321 p->proc_fops = &x25_seq_forward_fops;
228 rc = 0; 322 rc = 0;
323
229out: 324out:
230 return rc; 325 return rc;
326out_forward:
327 remove_proc_entry("socket", x25_proc_dir);
231out_socket: 328out_socket:
232 remove_proc_entry("route", x25_proc_dir); 329 remove_proc_entry("route", x25_proc_dir);
233out_route: 330out_route:
@@ -237,6 +334,7 @@ out_route:
237 334
238void __exit x25_proc_exit(void) 335void __exit x25_proc_exit(void)
239{ 336{
337 remove_proc_entry("forward", x25_proc_dir);
240 remove_proc_entry("route", x25_proc_dir); 338 remove_proc_entry("route", x25_proc_dir);
241 remove_proc_entry("socket", x25_proc_dir); 339 remove_proc_entry("socket", x25_proc_dir);
242 remove_proc_entry("x25", proc_net); 340 remove_proc_entry("x25", proc_net);
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 2a3fe986b245..883a848bca5b 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -119,6 +119,9 @@ void x25_route_device_down(struct net_device *dev)
119 __x25_remove_route(rt); 119 __x25_remove_route(rt);
120 } 120 }
121 write_unlock_bh(&x25_route_list_lock); 121 write_unlock_bh(&x25_route_list_lock);
122
123 /* Remove any related forwarding */
124 x25_clear_forward_by_dev(dev);
122} 125}
123 126
124/* 127/*
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 0faab6332586..577a4f821b98 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -24,6 +24,17 @@ config XFRM_SUB_POLICY
24 24
25 If unsure, say N. 25 If unsure, say N.
26 26
27config XFRM_MIGRATE
28 bool "Transformation migrate database (EXPERIMENTAL)"
29 depends on XFRM && EXPERIMENTAL
30 ---help---
31 A feature to update locator(s) of a given IPsec security
32 association dynamically. This feature is required, for
33 instance, in a Mobile IPv6 environment with IPsec configuration
34 where mobile nodes change their attachment point to the Internet.
35
36 If unsure, say N.
37
27config NET_KEY 38config NET_KEY
28 tristate "PF_KEY sockets" 39 tristate "PF_KEY sockets"
29 select XFRM 40 select XFRM
@@ -34,4 +45,19 @@ config NET_KEY
34 45
35 Say Y unless you know what you are doing. 46 Say Y unless you know what you are doing.
36 47
48config NET_KEY_MIGRATE
49 bool "PF_KEY MIGRATE (EXPERIMENTAL)"
50 depends on NET_KEY && EXPERIMENTAL
51 select XFRM_MIGRATE
52 ---help---
53 Add a PF_KEY MIGRATE message to PF_KEYv2 socket family.
54 The PF_KEY MIGRATE message is used to dynamically update
55 locator(s) of a given IPsec security association.
56 This feature is required, for instance, in a Mobile IPv6
57 environment with IPsec configuration where mobile nodes
58 change their attachment point to the Internet. Detail
59 information can be found in the internet-draft
60 <draft-sugimoto-mip6-pfkey-migrate>.
61
62 If unsure, say N.
37 63
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index f1cf3402e75c..248f94814dfb 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -266,6 +266,23 @@ static struct xfrm_algo_desc ealg_list[] = {
266 } 266 }
267}, 267},
268{ 268{
269 .name = "cbc(camellia)",
270
271 .uinfo = {
272 .encr = {
273 .blockbits = 128,
274 .defkeybits = 128,
275 }
276 },
277
278 .desc = {
279 .sadb_alg_id = SADB_X_EALG_CAMELLIACBC,
280 .sadb_alg_ivlen = 8,
281 .sadb_alg_minbits = 128,
282 .sadb_alg_maxbits = 256
283 }
284},
285{
269 .name = "cbc(twofish)", 286 .name = "cbc(twofish)",
270 .compat = "twofish", 287 .compat = "twofish",
271 288
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b7e537fe2d75..fa7ce060b454 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2236,3 +2236,234 @@ void __init xfrm_init(void)
2236 xfrm_input_init(); 2236 xfrm_input_init();
2237} 2237}
2238 2238
2239#ifdef CONFIG_XFRM_MIGRATE
2240static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
2241 struct xfrm_selector *sel_tgt)
2242{
2243 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2244 if (sel_tgt->family == sel_cmp->family &&
2245 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2246 sel_cmp->family) == 0 &&
2247 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2248 sel_cmp->family) == 0 &&
2249 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2250 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2251 return 1;
2252 }
2253 } else {
2254 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2255 return 1;
2256 }
2257 }
2258 return 0;
2259}
2260
2261static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
2262 u8 dir, u8 type)
2263{
2264 struct xfrm_policy *pol, *ret = NULL;
2265 struct hlist_node *entry;
2266 struct hlist_head *chain;
2267 u32 priority = ~0U;
2268
2269 read_lock_bh(&xfrm_policy_lock);
2270 chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
2271 hlist_for_each_entry(pol, entry, chain, bydst) {
2272 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2273 pol->type == type) {
2274 ret = pol;
2275 priority = ret->priority;
2276 break;
2277 }
2278 }
2279 chain = &xfrm_policy_inexact[dir];
2280 hlist_for_each_entry(pol, entry, chain, bydst) {
2281 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2282 pol->type == type &&
2283 pol->priority < priority) {
2284 ret = pol;
2285 break;
2286 }
2287 }
2288
2289 if (ret)
2290 xfrm_pol_hold(ret);
2291
2292 read_unlock_bh(&xfrm_policy_lock);
2293
2294 return ret;
2295}
2296
2297static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
2298{
2299 int match = 0;
2300
2301 if (t->mode == m->mode && t->id.proto == m->proto &&
2302 (m->reqid == 0 || t->reqid == m->reqid)) {
2303 switch (t->mode) {
2304 case XFRM_MODE_TUNNEL:
2305 case XFRM_MODE_BEET:
2306 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
2307 m->old_family) == 0 &&
2308 xfrm_addr_cmp(&t->saddr, &m->old_saddr,
2309 m->old_family) == 0) {
2310 match = 1;
2311 }
2312 break;
2313 case XFRM_MODE_TRANSPORT:
2314 /* in case of transport mode, template does not store
2315 any IP addresses, hence we just compare mode and
2316 protocol */
2317 match = 1;
2318 break;
2319 default:
2320 break;
2321 }
2322 }
2323 return match;
2324}
2325
2326/* update endpoint address(es) of template(s) */
2327static int xfrm_policy_migrate(struct xfrm_policy *pol,
2328 struct xfrm_migrate *m, int num_migrate)
2329{
2330 struct xfrm_migrate *mp;
2331 struct dst_entry *dst;
2332 int i, j, n = 0;
2333
2334 write_lock_bh(&pol->lock);
2335 if (unlikely(pol->dead)) {
2336 /* target policy has been deleted */
2337 write_unlock_bh(&pol->lock);
2338 return -ENOENT;
2339 }
2340
2341 for (i = 0; i < pol->xfrm_nr; i++) {
2342 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2343 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
2344 continue;
2345 n++;
2346 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL)
2347 continue;
2348 /* update endpoints */
2349 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
2350 sizeof(pol->xfrm_vec[i].id.daddr));
2351 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
2352 sizeof(pol->xfrm_vec[i].saddr));
2353 pol->xfrm_vec[i].encap_family = mp->new_family;
2354 /* flush bundles */
2355 while ((dst = pol->bundles) != NULL) {
2356 pol->bundles = dst->next;
2357 dst_free(dst);
2358 }
2359 }
2360 }
2361
2362 write_unlock_bh(&pol->lock);
2363
2364 if (!n)
2365 return -ENODATA;
2366
2367 return 0;
2368}
2369
2370static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
2371{
2372 int i, j;
2373
2374 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
2375 return -EINVAL;
2376
2377 for (i = 0; i < num_migrate; i++) {
2378 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
2379 m[i].old_family) == 0) &&
2380 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
2381 m[i].old_family) == 0))
2382 return -EINVAL;
2383 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2384 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2385 return -EINVAL;
2386
2387 /* check if there is any duplicated entry */
2388 for (j = i + 1; j < num_migrate; j++) {
2389 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
2390 sizeof(m[i].old_daddr)) &&
2391 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
2392 sizeof(m[i].old_saddr)) &&
2393 m[i].proto == m[j].proto &&
2394 m[i].mode == m[j].mode &&
2395 m[i].reqid == m[j].reqid &&
2396 m[i].old_family == m[j].old_family)
2397 return -EINVAL;
2398 }
2399 }
2400
2401 return 0;
2402}
2403
2404int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
2405 struct xfrm_migrate *m, int num_migrate)
2406{
2407 int i, err, nx_cur = 0, nx_new = 0;
2408 struct xfrm_policy *pol = NULL;
2409 struct xfrm_state *x, *xc;
2410 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
2411 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
2412 struct xfrm_migrate *mp;
2413
2414 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2415 goto out;
2416
2417 /* Stage 1 - find policy */
2418 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
2419 err = -ENOENT;
2420 goto out;
2421 }
2422
2423 /* Stage 2 - find and update state(s) */
2424 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2425 if ((x = xfrm_migrate_state_find(mp))) {
2426 x_cur[nx_cur] = x;
2427 nx_cur++;
2428 if ((xc = xfrm_state_migrate(x, mp))) {
2429 x_new[nx_new] = xc;
2430 nx_new++;
2431 } else {
2432 err = -ENODATA;
2433 goto restore_state;
2434 }
2435 }
2436 }
2437
2438 /* Stage 3 - update policy */
2439 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2440 goto restore_state;
2441
2442 /* Stage 4 - delete old state(s) */
2443 if (nx_cur) {
2444 xfrm_states_put(x_cur, nx_cur);
2445 xfrm_states_delete(x_cur, nx_cur);
2446 }
2447
2448 /* Stage 5 - announce */
2449 km_migrate(sel, dir, type, m, num_migrate);
2450
2451 xfrm_pol_put(pol);
2452
2453 return 0;
2454out:
2455 return err;
2456
2457restore_state:
2458 if (pol)
2459 xfrm_pol_put(pol);
2460 if (nx_cur)
2461 xfrm_states_put(x_cur, nx_cur);
2462 if (nx_new)
2463 xfrm_states_delete(x_new, nx_new);
2464
2465 return err;
2466}
2467EXPORT_SYMBOL(xfrm_migrate);
2468#endif
2469
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index fdb08d9f34aa..91b02687db52 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -183,9 +183,6 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
183 183
184int __xfrm_state_delete(struct xfrm_state *x); 184int __xfrm_state_delete(struct xfrm_state *x);
185 185
186static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
187static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
188
189int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 186int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
190void km_state_expired(struct xfrm_state *x, int hard, u32 pid); 187void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
191 188
@@ -831,6 +828,160 @@ out:
831} 828}
832EXPORT_SYMBOL(xfrm_state_add); 829EXPORT_SYMBOL(xfrm_state_add);
833 830
831#ifdef CONFIG_XFRM_MIGRATE
832struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
833{
834 int err = -ENOMEM;
835 struct xfrm_state *x = xfrm_state_alloc();
836 if (!x)
837 goto error;
838
839 memcpy(&x->id, &orig->id, sizeof(x->id));
840 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
841 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
842 x->props.mode = orig->props.mode;
843 x->props.replay_window = orig->props.replay_window;
844 x->props.reqid = orig->props.reqid;
845 x->props.family = orig->props.family;
846 x->props.saddr = orig->props.saddr;
847
848 if (orig->aalg) {
849 x->aalg = xfrm_algo_clone(orig->aalg);
850 if (!x->aalg)
851 goto error;
852 }
853 x->props.aalgo = orig->props.aalgo;
854
855 if (orig->ealg) {
856 x->ealg = xfrm_algo_clone(orig->ealg);
857 if (!x->ealg)
858 goto error;
859 }
860 x->props.ealgo = orig->props.ealgo;
861
862 if (orig->calg) {
863 x->calg = xfrm_algo_clone(orig->calg);
864 if (!x->calg)
865 goto error;
866 }
867 x->props.calgo = orig->props.calgo;
868
869 if (orig->encap) {
870 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
871 if (!x->encap)
872 goto error;
873 }
874
875 if (orig->coaddr) {
876 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
877 GFP_KERNEL);
878 if (!x->coaddr)
879 goto error;
880 }
881
882 err = xfrm_init_state(x);
883 if (err)
884 goto error;
885
886 x->props.flags = orig->props.flags;
887
888 x->curlft.add_time = orig->curlft.add_time;
889 x->km.state = orig->km.state;
890 x->km.seq = orig->km.seq;
891
892 return x;
893
894 error:
895 if (errp)
896 *errp = err;
897 if (x) {
898 kfree(x->aalg);
899 kfree(x->ealg);
900 kfree(x->calg);
901 kfree(x->encap);
902 kfree(x->coaddr);
903 }
904 kfree(x);
905 return NULL;
906}
907EXPORT_SYMBOL(xfrm_state_clone);
908
909/* xfrm_state_lock is held */
910struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
911{
912 unsigned int h;
913 struct xfrm_state *x;
914 struct hlist_node *entry;
915
916 if (m->reqid) {
917 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
918 m->reqid, m->old_family);
919 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
920 if (x->props.mode != m->mode ||
921 x->id.proto != m->proto)
922 continue;
923 if (m->reqid && x->props.reqid != m->reqid)
924 continue;
925 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
926 m->old_family) ||
927 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
928 m->old_family))
929 continue;
930 xfrm_state_hold(x);
931 return x;
932 }
933 } else {
934 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
935 m->old_family);
936 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
937 if (x->props.mode != m->mode ||
938 x->id.proto != m->proto)
939 continue;
940 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
941 m->old_family) ||
942 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
943 m->old_family))
944 continue;
945 xfrm_state_hold(x);
946 return x;
947 }
948 }
949
950 return NULL;
951}
952EXPORT_SYMBOL(xfrm_migrate_state_find);
953
954struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
955 struct xfrm_migrate *m)
956{
957 struct xfrm_state *xc;
958 int err;
959
960 xc = xfrm_state_clone(x, &err);
961 if (!xc)
962 return NULL;
963
964 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
965 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
966
967 /* add state */
968 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
969 /* a care is needed when the destination address of the
970 state is to be updated as it is a part of triplet */
971 xfrm_state_insert(xc);
972 } else {
973 if ((err = xfrm_state_add(xc)) < 0)
974 goto error;
975 }
976
977 return xc;
978error:
979 kfree(xc);
980 return NULL;
981}
982EXPORT_SYMBOL(xfrm_state_migrate);
983#endif
984
834int xfrm_state_update(struct xfrm_state *x) 985int xfrm_state_update(struct xfrm_state *x)
835{ 986{
836 struct xfrm_state *x1; 987 struct xfrm_state *x1;
@@ -1345,6 +1496,26 @@ void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1345} 1496}
1346EXPORT_SYMBOL(km_policy_expired); 1497EXPORT_SYMBOL(km_policy_expired);
1347 1498
1499int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1500 struct xfrm_migrate *m, int num_migrate)
1501{
1502 int err = -EINVAL;
1503 int ret;
1504 struct xfrm_mgr *km;
1505
1506 read_lock(&xfrm_km_lock);
1507 list_for_each_entry(km, &xfrm_km_list, list) {
1508 if (km->migrate) {
1509 ret = km->migrate(sel, dir, type, m, num_migrate);
1510 if (!ret)
1511 err = ret;
1512 }
1513 }
1514 read_unlock(&xfrm_km_lock);
1515 return err;
1516}
1517EXPORT_SYMBOL(km_migrate);
1518
1348int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) 1519int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1349{ 1520{
1350 int err = -EINVAL; 1521 int err = -EINVAL;
@@ -1458,7 +1629,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1458} 1629}
1459EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1630EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1460 1631
1461static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family) 1632struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1462{ 1633{
1463 struct xfrm_state_afinfo *afinfo; 1634 struct xfrm_state_afinfo *afinfo;
1464 if (unlikely(family >= NPROTO)) 1635 if (unlikely(family >= NPROTO))
@@ -1470,11 +1641,14 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1470 return afinfo; 1641 return afinfo;
1471} 1642}
1472 1643
1473static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1644void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1474{ 1645{
1475 read_unlock(&xfrm_state_afinfo_lock); 1646 read_unlock(&xfrm_state_afinfo_lock);
1476} 1647}
1477 1648
1649EXPORT_SYMBOL(xfrm_state_get_afinfo);
1650EXPORT_SYMBOL(xfrm_state_put_afinfo);
1651
1478/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ 1652/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1479void xfrm_state_delete_tunnel(struct xfrm_state *x) 1653void xfrm_state_delete_tunnel(struct xfrm_state *x)
1480{ 1654{
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 82f36d396fca..079a5d315759 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1632,6 +1632,176 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1632 return 0; 1632 return 0;
1633} 1633}
1634 1634
1635#ifdef CONFIG_XFRM_MIGRATE
1636static int verify_user_migrate(struct rtattr **xfrma)
1637{
1638 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
1639 struct xfrm_user_migrate *um;
1640
1641 if (!rt)
1642 return -EINVAL;
1643
1644 if ((rt->rta_len - sizeof(*rt)) < sizeof(*um))
1645 return -EINVAL;
1646
1647 return 0;
1648}
1649
1650static int copy_from_user_migrate(struct xfrm_migrate *ma,
1651 struct rtattr **xfrma, int *num)
1652{
1653 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
1654 struct xfrm_user_migrate *um;
1655 int i, num_migrate;
1656
1657 um = RTA_DATA(rt);
1658 num_migrate = (rt->rta_len - sizeof(*rt)) / sizeof(*um);
1659
1660 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1661 return -EINVAL;
1662
1663 for (i = 0; i < num_migrate; i++, um++, ma++) {
1664 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1665 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1666 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1667 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1668
1669 ma->proto = um->proto;
1670 ma->mode = um->mode;
1671 ma->reqid = um->reqid;
1672
1673 ma->old_family = um->old_family;
1674 ma->new_family = um->new_family;
1675 }
1676
1677 *num = i;
1678 return 0;
1679}
1680
1681static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1682 struct rtattr **xfrma)
1683{
1684 struct xfrm_userpolicy_id *pi = NLMSG_DATA(nlh);
1685 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1686 u8 type;
1687 int err;
1688 int n = 0;
1689
1690 err = verify_user_migrate((struct rtattr **)xfrma);
1691 if (err)
1692 return err;
1693
1694 err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
1695 if (err)
1696 return err;
1697
1698 err = copy_from_user_migrate((struct xfrm_migrate *)m,
1699 (struct rtattr **)xfrma, &n);
1700 if (err)
1701 return err;
1702
1703 if (!n)
1704 return 0;
1705
1706 xfrm_migrate(&pi->sel, pi->dir, type, m, n);
1707
1708 return 0;
1709}
1710#else
1711static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1712 struct rtattr **xfrma)
1713{
1714 return -ENOPROTOOPT;
1715}
1716#endif
1717
1718#ifdef CONFIG_XFRM_MIGRATE
1719static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
1720{
1721 struct xfrm_user_migrate um;
1722
1723 memset(&um, 0, sizeof(um));
1724 um.proto = m->proto;
1725 um.mode = m->mode;
1726 um.reqid = m->reqid;
1727 um.old_family = m->old_family;
1728 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
1729 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
1730 um.new_family = m->new_family;
1731 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
1732 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
1733
1734 RTA_PUT(skb, XFRMA_MIGRATE, sizeof(um), &um);
1735 return 0;
1736
1737rtattr_failure:
1738 return -1;
1739}
1740
1741static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
1742 int num_migrate, struct xfrm_selector *sel,
1743 u8 dir, u8 type)
1744{
1745 struct xfrm_migrate *mp;
1746 struct xfrm_userpolicy_id *pol_id;
1747 struct nlmsghdr *nlh;
1748 unsigned char *b = skb->tail;
1749 int i;
1750
1751 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id));
1752 pol_id = NLMSG_DATA(nlh);
1753 nlh->nlmsg_flags = 0;
1754
1755 /* copy data from selector, dir, and type to the pol_id */
1756 memset(pol_id, 0, sizeof(*pol_id));
1757 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
1758 pol_id->dir = dir;
1759
1760 if (copy_to_user_policy_type(type, skb) < 0)
1761 goto nlmsg_failure;
1762
1763 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
1764 if (copy_to_user_migrate(mp, skb) < 0)
1765 goto nlmsg_failure;
1766 }
1767
1768 nlh->nlmsg_len = skb->tail - b;
1769 return skb->len;
1770nlmsg_failure:
1771 skb_trim(skb, b - skb->data);
1772 return -1;
1773}
1774
1775static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1776 struct xfrm_migrate *m, int num_migrate)
1777{
1778 struct sk_buff *skb;
1779 size_t len;
1780
1781 len = RTA_SPACE(sizeof(struct xfrm_user_migrate) * num_migrate);
1782 len += NLMSG_SPACE(sizeof(struct xfrm_userpolicy_id));
1783#ifdef CONFIG_XFRM_SUB_POLICY
1784 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
1785#endif
1786 skb = alloc_skb(len, GFP_ATOMIC);
1787 if (skb == NULL)
1788 return -ENOMEM;
1789
1790 /* build migrate */
1791 if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0)
1792 BUG();
1793
1794 NETLINK_CB(skb).dst_group = XFRMNLGRP_MIGRATE;
1795 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE,
1796 GFP_ATOMIC);
1797}
1798#else
1799static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1800 struct xfrm_migrate *m, int num_migrate)
1801{
1802 return -ENOPROTOOPT;
1803}
1804#endif
1635 1805
1636#define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type)) 1806#define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type))
1637 1807
@@ -1653,6 +1823,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1653 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 1823 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1654 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 1824 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1655 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 1825 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
1826 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1656}; 1827};
1657 1828
1658#undef XMSGSIZE 1829#undef XMSGSIZE
@@ -1679,6 +1850,7 @@ static struct xfrm_link {
1679 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 1850 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
1680 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 1851 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
1681 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 1852 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1853 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
1682}; 1854};
1683 1855
1684static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) 1856static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
@@ -2285,6 +2457,7 @@ static struct xfrm_mgr netlink_mgr = {
2285 .compile_policy = xfrm_compile_policy, 2457 .compile_policy = xfrm_compile_policy,
2286 .notify_policy = xfrm_send_policy_notify, 2458 .notify_policy = xfrm_send_policy_notify,
2287 .report = xfrm_send_report, 2459 .report = xfrm_send_report,
2460 .migrate = xfrm_send_migrate,
2288}; 2461};
2289 2462
2290static int __init xfrm_user_init(void) 2463static int __init xfrm_user_init(void)