aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/device-mapper.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-21 20:08:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-21 20:08:06 -0500
commitb49249d10324d0fd6fb29725c2807dfd80d0edbc (patch)
tree9a8fa724e6c9f9283530979c6e32a311c74999d5 /include/linux/device-mapper.h
parent10532b560bacf23766f9c7dc09778b31b198ff45 (diff)
parent45e621d45e24ffc4cb2b2935e8438987b860063a (diff)
Merge tag 'dm-3.8-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm
Pull dm update from Alasdair G Kergon: "Miscellaneous device-mapper fixes, cleanups and performance improvements. Of particular note: - Disable broken WRITE SAME support in all targets except linear and striped. Use it when kcopyd is zeroing blocks. - Remove several mempools from targets by moving the data into the bio's new front_pad area(which dm calls 'per_bio_data'). - Fix a race in thin provisioning if discards are misused. - Prevent userspace from interfering with the ioctl parameters and use kmalloc for the data buffer if it's small instead of vmalloc. - Throttle some annoying error messages when I/O fails." * tag 'dm-3.8-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm: (36 commits) dm stripe: add WRITE SAME support dm: remove map_info dm snapshot: do not use map_context dm thin: dont use map_context dm raid1: dont use map_context dm flakey: dont use map_context dm raid1: rename read_record to bio_record dm: move target request nr to dm_target_io dm snapshot: use per_bio_data dm verity: use per_bio_data dm raid1: use per_bio_data dm: introduce per_bio_data dm kcopyd: add WRITE SAME support to dm_kcopyd_zero dm linear: add WRITE SAME support dm: add WRITE SAME support dm: prepare to support WRITE SAME dm ioctl: use kmalloc if possible dm ioctl: remove PF_MEMALLOC dm persistent data: improve improve space map block alloc failure message dm thin: use DMERR_LIMIT for errors ...
Diffstat (limited to 'include/linux/device-mapper.h')
-rw-r--r--include/linux/device-mapper.h55
1 files changed, 47 insertions, 8 deletions
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 38d27a10aa5d..bf6afa2fc432 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -23,7 +23,6 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
23union map_info { 23union map_info {
24 void *ptr; 24 void *ptr;
25 unsigned long long ll; 25 unsigned long long ll;
26 unsigned target_request_nr;
27}; 26};
28 27
29/* 28/*
@@ -46,8 +45,7 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
46 * = 1: simple remap complete 45 * = 1: simple remap complete
47 * = 2: The target wants to push back the io 46 * = 2: The target wants to push back the io
48 */ 47 */
49typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio, 48typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
50 union map_info *map_context);
51typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, 49typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
52 union map_info *map_context); 50 union map_info *map_context);
53 51
@@ -60,8 +58,7 @@ typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
60 * 2 : The target wants to push back the io 58 * 2 : The target wants to push back the io
61 */ 59 */
62typedef int (*dm_endio_fn) (struct dm_target *ti, 60typedef int (*dm_endio_fn) (struct dm_target *ti,
63 struct bio *bio, int error, 61 struct bio *bio, int error);
64 union map_info *map_context);
65typedef int (*dm_request_endio_fn) (struct dm_target *ti, 62typedef int (*dm_request_endio_fn) (struct dm_target *ti,
66 struct request *clone, int error, 63 struct request *clone, int error,
67 union map_info *map_context); 64 union map_info *map_context);
@@ -193,18 +190,30 @@ struct dm_target {
193 * A number of zero-length barrier requests that will be submitted 190 * A number of zero-length barrier requests that will be submitted
194 * to the target for the purpose of flushing cache. 191 * to the target for the purpose of flushing cache.
195 * 192 *
196 * The request number will be placed in union map_info->target_request_nr. 193 * The request number can be accessed with dm_bio_get_target_request_nr.
197 * It is a responsibility of the target driver to remap these requests 194 * It is a responsibility of the target driver to remap these requests
198 * to the real underlying devices. 195 * to the real underlying devices.
199 */ 196 */
200 unsigned num_flush_requests; 197 unsigned num_flush_requests;
201 198
202 /* 199 /*
203 * The number of discard requests that will be submitted to the 200 * The number of discard requests that will be submitted to the target.
204 * target. map_info->request_nr is used just like num_flush_requests. 201 * The request number can be accessed with dm_bio_get_target_request_nr.
205 */ 202 */
206 unsigned num_discard_requests; 203 unsigned num_discard_requests;
207 204
205 /*
206 * The number of WRITE SAME requests that will be submitted to the target.
207 * The request number can be accessed with dm_bio_get_target_request_nr.
208 */
209 unsigned num_write_same_requests;
210
211 /*
212 * The minimum number of extra bytes allocated in each bio for the
213 * target to use. dm_per_bio_data returns the data location.
214 */
215 unsigned per_bio_data_size;
216
208 /* target specific data */ 217 /* target specific data */
209 void *private; 218 void *private;
210 219
@@ -241,6 +250,36 @@ struct dm_target_callbacks {
241 int (*congested_fn) (struct dm_target_callbacks *, int); 250 int (*congested_fn) (struct dm_target_callbacks *, int);
242}; 251};
243 252
253/*
254 * For bio-based dm.
255 * One of these is allocated for each bio.
256 * This structure shouldn't be touched directly by target drivers.
257 * It is here so that we can inline dm_per_bio_data and
258 * dm_bio_from_per_bio_data
259 */
260struct dm_target_io {
261 struct dm_io *io;
262 struct dm_target *ti;
263 union map_info info;
264 unsigned target_request_nr;
265 struct bio clone;
266};
267
268static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
269{
270 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
271}
272
273static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
274{
275 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
276}
277
278static inline unsigned dm_bio_get_target_request_nr(const struct bio *bio)
279{
280 return container_of(bio, struct dm_target_io, clone)->target_request_nr;
281}
282
244int dm_register_target(struct target_type *t); 283int dm_register_target(struct target_type *t);
245void dm_unregister_target(struct target_type *t); 284void dm_unregister_target(struct target_type *t);
246 285
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
/* SCTP kernel reference Implementation
 * (C) Copyright IBM Corp. 2001, 2004
 * Copyright (c) 1999-2000 Cisco, Inc.
 * Copyright (c) 1999-2001 Motorola, Inc.
 * Copyright (c) 2001 Intel Corp.
 * Copyright (c) 2001 Nokia, Inc.
 * Copyright (c) 2001 La Monte H.P. Yarroll
 *
 * This file is part of the SCTP kernel reference Implementation
 *
 * Initialization/cleanup for SCTP protocol support.
 *
 * The SCTP reference implementation is free software;
 * you can redistribute it and/or modify it under the terms of
 * the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * The SCTP reference implementation is distributed in the hope that it
 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
 *                 ************************
 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 * See the GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with GNU CC; see the file COPYING.  If not, write to
 * the Free Software Foundation, 59 Temple Place - Suite 330,
 * Boston, MA 02111-1307, USA.
 *
 * Please send any bug reports or fixes you make to the
 * email address(es):
 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
 *
 * Or submit a bug report through the following website:
 *    http://www.sf.net/projects/lksctp
 *
 * Written or modified by:
 *    La Monte H.P. Yarroll <piggy@acm.org>
 *    Karl Knutson <karl@athena.chicago.il.us>
 *    Jon Grimm <jgrimm@us.ibm.com>
 *    Sridhar Samudrala <sri@us.ibm.com>
 *    Daisy Chang <daisyc@us.ibm.com>
 *    Ardelle Fan <ardelle.fan@intel.com>
 *
 * Any bugs reported given to us we will try to fix... any fixes shared will
 * be incorporated into the next SCTP release.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/seq_file.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/sctp/sctp.h>
#include <net/addrconf.h>
#include <net/inet_common.h>
#include <net/inet_ecn.h>

/* Global data structures. */
struct sctp_globals sctp_globals;
struct proc_dir_entry	*proc_net_sctp;
DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);

struct idr sctp_assocs_id;
DEFINE_SPINLOCK(sctp_assocs_id_lock);

/* This is the global socket data structure used for responding to
 * the Out-of-the-blue (OOTB) packets.  A control sock will be created
 * for this socket at the initialization time.
 */
static struct socket *sctp_ctl_socket;

static struct sctp_pf *sctp_pf_inet6_specific;
static struct sctp_pf *sctp_pf_inet_specific;
static struct sctp_af *sctp_af_v4_specific;
static struct sctp_af *sctp_af_v6_specific;

kmem_cache_t *sctp_chunk_cachep;
kmem_cache_t *sctp_bucket_cachep;

extern int sctp_snmp_proc_init(void);
extern int sctp_snmp_proc_exit(void);
extern int sctp_eps_proc_init(void);
extern int sctp_eps_proc_exit(void);
extern int sctp_assocs_proc_init(void);
extern int sctp_assocs_proc_exit(void);

/* Return the address of the control sock. */
struct sock *sctp_get_ctl_sock(void)
{
	return sctp_ctl_socket->sk;
}

/* Set up the proc fs entry for the SCTP protocol. */
static __init int sctp_proc_init(void)
{
	if (!proc_net_sctp) {
		struct proc_dir_entry *ent;
		ent = proc_mkdir("net/sctp", NULL);
		if (ent) {
			ent->owner = THIS_MODULE;
			proc_net_sctp = ent;
		} else
			goto out_nomem;
	}

	if (sctp_snmp_proc_init())
		goto out_nomem;	
	if (sctp_eps_proc_init())
		goto out_nomem;	
	if (sctp_assocs_proc_init())
		goto out_nomem;	

	return 0;

out_nomem:
	return -ENOMEM;
}

/* Clean up the proc fs entry for the SCTP protocol. 
 * Note: Do not make this __exit as it is used in the init error
 * path.
 */
static void sctp_proc_exit(void)
{
	sctp_snmp_proc_exit();
	sctp_eps_proc_exit();
	sctp_assocs_proc_exit();

	if (proc_net_sctp) {
		proc_net_sctp = NULL;
		remove_proc_entry("net/sctp", NULL);
	}
}

/* Private helper to extract ipv4 address and stash them in
 * the protocol structure.
 */
static void sctp_v4_copy_addrlist(struct list_head *addrlist,
				  struct net_device *dev)
{
	struct in_device *in_dev;
	struct in_ifaddr *ifa;
	struct sctp_sockaddr_entry *addr;

	rcu_read_lock();
	if ((in_dev = __in_dev_get(dev)) == NULL) {
		rcu_read_unlock();
		return;
	}

	for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
		/* Add the address to the local list.  */
		addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
		if (addr) {
			addr->a.v4.sin_family = AF_INET;
			addr->a.v4.sin_port = 0;
			addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
			list_add_tail(&addr->list, addrlist);
		}
	}

	rcu_read_unlock();
}

/* Extract our IP addresses from the system and stash them in the
 * protocol structure.
 */
static void __sctp_get_local_addr_list(void)
{
	struct net_device *dev;
	struct list_head *pos;
	struct sctp_af *af;

	read_lock(&dev_base_lock);
	for (dev = dev_base; dev; dev = dev->next) {
		__list_for_each(pos, &sctp_address_families) {
			af = list_entry(pos, struct sctp_af, list);
			af->copy_addrlist(&sctp_local_addr_list, dev);
		}
	}
	read_unlock(&dev_base_lock);
}

static void sctp_get_local_addr_list(void)
{
	unsigned long flags;

	sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
	__sctp_get_local_addr_list();
	sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
}

/* Free the existing local addresses.  */
static void __sctp_free_local_addr_list(void)
{
	struct sctp_sockaddr_entry *addr;
	struct list_head *pos, *temp;

	list_for_each_safe(pos, temp, &sctp_local_addr_list) {
		addr = list_entry(pos, struct sctp_sockaddr_entry, list);
		list_del(pos);
		kfree(addr);
	}
}

/* Free the existing local addresses.  */
static void sctp_free_local_addr_list(void)
{
	unsigned long flags;

	sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
	__sctp_free_local_addr_list();
	sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
}

/* Copy the local addresses which are valid for 'scope' into 'bp'.  */
int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
			      int gfp, int copy_flags)
{
	struct sctp_sockaddr_entry *addr;
	int error = 0;
	struct list_head *pos;
	unsigned long flags;

	sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
	list_for_each(pos, &sctp_local_addr_list) {
		addr = list_entry(pos, struct sctp_sockaddr_entry, list);
		if (sctp_in_scope(&addr->a, scope)) {
			/* Now that the address is in scope, check to see if
			 * the address type is really supported by the local
			 * sock as well as the remote peer.
			 */
			if ((((AF_INET == addr->a.sa.sa_family) &&
			      (copy_flags & SCTP_ADDR4_PEERSUPP))) ||
			    (((AF_INET6 == addr->a.sa.sa_family) &&
			      (copy_flags & SCTP_ADDR6_ALLOWED) &&
			      (copy_flags & SCTP_ADDR6_PEERSUPP)))) {
				error = sctp_add_bind_addr(bp, &addr->a, 
							   GFP_ATOMIC);
				if (error)
					goto end_copy;
			}
		}
	}

end_copy:
	sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
	return error;
}

/* Initialize a sctp_addr from in incoming skb.  */
static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb,
			     int is_saddr)
{
	void *from;
	__u16 *port;
	struct sctphdr *sh;

	port = &addr->v4.sin_port;
	addr->v4.sin_family = AF_INET;

	sh = (struct sctphdr *) skb->h.raw;
	if (is_saddr) {
		*port  = ntohs(sh->source);
		from = &skb->nh.iph->saddr;
	} else {
		*port = ntohs(sh->dest);
		from = &skb->nh.iph->daddr;
	}
	memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr));
}

/* Initialize an sctp_addr from a socket. */
static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
{
	addr->v4.sin_family = AF_INET;
	addr->v4.sin_port = inet_sk(sk)->num;
	addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr;
}

/* Initialize sk->sk_rcv_saddr from sctp_addr. */
static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
{
	inet_sk(sk)->rcv_saddr = addr->v4.sin_addr.s_addr;
}

/* Initialize sk->sk_daddr from sctp_addr. */
static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
{
	inet_sk(sk)->daddr = addr->v4.sin_addr.s_addr;
}

/* Initialize a sctp_addr from an address parameter. */
static void sctp_v4_from_addr_param(union sctp_addr *addr,
				    union sctp_addr_param *param,
				    __u16 port, int iif)
{
	addr->v4.sin_family = AF_INET;
	addr->v4.sin_port = port;
	addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
}

/* Initialize an address parameter from a sctp_addr and return the length
 * of the address parameter.
 */
static int sctp_v4_to_addr_param(const union sctp_addr *addr,
				 union sctp_addr_param *param)
{
	int length = sizeof(sctp_ipv4addr_param_t);

	param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS;
	param->v4.param_hdr.length = ntohs(length);
	param->v4.addr.s_addr = addr->v4.sin_addr.s_addr;	

	return length;
}

/* Initialize a sctp_addr from a dst_entry. */
static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct dst_entry *dst,
			      unsigned short port)
{
	struct rtable *rt = (struct rtable *)dst;
	saddr->v4.sin_family = AF_INET;
	saddr->v4.sin_port = port;
	saddr->v4.sin_addr.s_addr = rt->rt_src;
}

/* Compare two addresses exactly. */
static int sctp_v4_cmp_addr(const union sctp_addr *addr1,
			    const union sctp_addr *addr2)
{
	if (addr1->sa.sa_family != addr2->sa.sa_family)
		return 0;
	if (addr1->v4.sin_port != addr2->v4.sin_port)
		return 0;
	if (addr1->v4.sin_addr.s_addr != addr2->v4.sin_addr.s_addr)
		return 0;

	return 1;
}

/* Initialize addr struct to INADDR_ANY. */
static void sctp_v4_inaddr_any(union sctp_addr *addr, unsigned short port)
{
	addr->v4.sin_family = AF_INET;
	addr->v4.sin_addr.s_addr = INADDR_ANY;
	addr->v4.sin_port = port;
}

/* Is this a wildcard address? */
static int sctp_v4_is_any(const union sctp_addr *addr)
{
	return INADDR_ANY == addr->v4.sin_addr.s_addr;
}

/* This function checks if the address is a valid address to be used for
 * SCTP binding.
 *
 * Output:
 * Return 0 - If the address is a non-unicast or an illegal address.
 * Return 1 - If the address is a unicast.
 */
static int sctp_v4_addr_valid(union sctp_addr *addr, struct sctp_sock *sp)
{
	/* Is this a non-unicast address or a unusable SCTP address? */
	if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr))
		return 0;

	return 1;
}

/* Should this be available for binding?   */
static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
{
	int ret = inet_addr_type(addr->v4.sin_addr.s_addr);


	if (addr->v4.sin_addr.s_addr != INADDR_ANY &&
	   ret != RTN_LOCAL &&
	   !sp->inet.freebind &&
	   !sysctl_ip_nonlocal_bind)
		return 0;

	return 1;
}

/* Checking the loopback, private and other address scopes as defined in
 * RFC 1918.   The IPv4 scoping is based on the draft for SCTP IPv4
 * scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>.
 *
 * Level 0 - unusable SCTP addresses
 * Level 1 - loopback address
 * Level 2 - link-local addresses
 * Level 3 - private addresses.
 * Level 4 - global addresses
 * For INIT and INIT-ACK address list, let L be the level of
 * of requested destination address, sender and receiver
 * SHOULD include all of its addresses with level greater
 * than or equal to L.
 */
static sctp_scope_t sctp_v4_scope(union sctp_addr *addr)
{
	sctp_scope_t retval;

	/* Should IPv4 scoping be a sysctl configurable option
	 * so users can turn it off (default on) for certain
	 * unconventional networking environments?
	 */

	/* Check for unusable SCTP addresses. */
	if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr)) {
		retval =  SCTP_SCOPE_UNUSABLE;
	} else if (LOOPBACK(addr->v4.sin_addr.s_addr)) {
		retval = SCTP_SCOPE_LOOPBACK;
	} else if (IS_IPV4_LINK_ADDRESS(&addr->v4.sin_addr.s_addr)) {
		retval = SCTP_SCOPE_LINK;
	} else if (IS_IPV4_PRIVATE_ADDRESS(&addr->v4.sin_addr.s_addr)) {
		retval = SCTP_SCOPE_PRIVATE;
	} else {
		retval = SCTP_SCOPE_GLOBAL;
	}

	return retval;
}

/* Returns a valid dst cache entry for the given source and destination ip
 * addresses. If an association is passed, trys to get a dst entry with a
 * source address that matches an address in the bind address list.
 */
static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
					 union sctp_addr *daddr,
					 union sctp_addr *saddr)
{
	struct rtable *rt;
	struct flowi fl;
	struct sctp_bind_addr *bp;
	rwlock_t *addr_lock;
	struct sctp_sockaddr_entry *laddr;
	struct list_head *pos;
	struct dst_entry *dst = NULL;
	union sctp_addr dst_saddr;

	memset(&fl, 0x0, sizeof(struct flowi));
	fl.fl4_dst  = daddr->v4.sin_addr.s_addr;
	fl.proto = IPPROTO_SCTP;
	if (asoc) {
		fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk);
		fl.oif = asoc->base.sk->sk_bound_dev_if;
	}
	if (saddr)
		fl.fl4_src = saddr->v4.sin_addr.s_addr;

	SCTP_DEBUG_PRINTK("%s: DST:%u.%u.%u.%u, SRC:%u.%u.%u.%u - ",
			  __FUNCTION__, NIPQUAD(fl.fl4_dst),
			  NIPQUAD(fl.fl4_src));

	if (!ip_route_output_key(&rt, &fl)) {
		dst = &rt->u.dst;
	}

	/* If there is no association or if a source address is passed, no
	 * more validation is required.
	 */
	if (!asoc || saddr)
		goto out;

	bp = &asoc->base.bind_addr;
	addr_lock = &asoc->base.addr_lock;

	if (dst) {
		/* Walk through the bind address list and look for a bind
		 * address that matches the source address of the returned dst.
		 */
		sctp_read_lock(addr_lock);
		list_for_each(pos, &bp->address_list) {
			laddr = list_entry(pos, struct sctp_sockaddr_entry,
					   list);
			sctp_v4_dst_saddr(&dst_saddr, dst, bp->port);
			if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
				goto out_unlock;
		}
		sctp_read_unlock(addr_lock);

		/* None of the bound addresses match the source address of the
		 * dst. So release it.
		 */
		dst_release(dst);
		dst = NULL;
	}

	/* Walk through the bind address list and try to get a dst that
	 * matches a bind address as the source address.
	 */
	sctp_read_lock(addr_lock);
	list_for_each(pos, &bp->address_list) {
		laddr = list_entry(pos, struct sctp_sockaddr_entry, list);

		if (AF_INET == laddr->a.sa.sa_family) {
			fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
			if (!ip_route_output_key(&rt, &fl)) {
				dst = &rt->u.dst;
				goto out_unlock;
			}
		}
	}

out_unlock:
	sctp_read_unlock(addr_lock);
out:
	if (dst)
		SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n",
			  	  NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_src));
	else
		SCTP_DEBUG_PRINTK("NO ROUTE\n");

	return dst;
}

/* For v4, the source address is cached in the route entry(dst). So no need
 * to cache it separately and hence this is an empty routine.
 */
static void sctp_v4_get_saddr(struct sctp_association *asoc,
			      struct dst_entry *dst,
			      union sctp_addr *daddr,
			      union sctp_addr *saddr)
{
	struct rtable *rt = (struct rtable *)dst;

	if (rt) {
		saddr->v4.sin_family = AF_INET;
		saddr->v4.sin_port = asoc->base.bind_addr.port;  
		saddr->v4.sin_addr.s_addr = rt->rt_src; 
	}
}

/* What interface did this skb arrive on? */
static int sctp_v4_skb_iif(const struct sk_buff *skb)
{
     	return ((struct rtable *)skb->dst)->rt_iif;
}

/* Was this packet marked by Explicit Congestion Notification? */
static int sctp_v4_is_ce(const struct sk_buff *skb)
{
	return INET_ECN_is_ce(skb->nh.iph->tos);
}

/* Create and initialize a new sk for the socket returned by accept(). */
static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
					     struct sctp_association *asoc)
{
	struct inet_sock *inet = inet_sk(sk);
	struct inet_sock *newinet;
	struct sock *newsk = sk_alloc(PF_INET, GFP_KERNEL, sk->sk_prot, 1);

	if (!newsk)
		goto out;

	sock_init_data(NULL, newsk);

	newsk->sk_type = SOCK_STREAM;

	newsk->sk_no_check = sk->sk_no_check;
	newsk->sk_reuse = sk->sk_reuse;
	newsk->sk_shutdown = sk->sk_shutdown;

	newsk->sk_destruct = inet_sock_destruct;
	newsk->sk_family = PF_INET;
	newsk->sk_protocol = IPPROTO_SCTP;
	newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
	sock_reset_flag(newsk, SOCK_ZAPPED);

	newinet = inet_sk(newsk);

	/* Initialize sk's sport, dport, rcv_saddr and daddr for
	 * getsockname() and getpeername()
	 */
	newinet->sport = inet->sport;
	newinet->saddr = inet->saddr;
	newinet->rcv_saddr = inet->rcv_saddr;
	newinet->dport = htons(asoc->peer.port);
	newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
	newinet->pmtudisc = inet->pmtudisc;
      	newinet->id = 0;

	newinet->uc_ttl = -1;
	newinet->mc_loop = 1;
	newinet->mc_ttl = 1;
	newinet->mc_index = 0;
	newinet->mc_list = NULL;

#ifdef INET_REFCNT_DEBUG
	atomic_inc(&inet_sock_nr);
#endif

	if (newsk->sk_prot->init(newsk)) {
		sk_common_release(newsk);
		newsk = NULL;
	}

out:
	return newsk;
}

/* Map address, empty for v4 family */
static void sctp_v4_addr_v4map(struct sctp_sock *sp, union sctp_addr *addr)
{
	/* Empty */
}

/* Dump the v4 addr to the seq file. */
static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
{
	seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr));
}

/* Event handler for inet address addition/deletion events.
 * Basically, whenever there is an event, we re-build our local address list.
 */