aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>2010-04-14 22:29:04 -0400
committerRoland Dreier <rolandd@cisco.com>2010-04-21 19:18:40 -0400
commit5d7220e8dc24feed4bbd66667b7696906a147ac4 (patch)
treee3600986a87cb1f4edf02b8fb65093cdcd1b16a8 /drivers/infiniband/core
parent0eddb519b9127c73d53db4bf3ec1d45b13f844d1 (diff)
RDMA/cma: Randomize local port allocation
Randomize local port allocation in the way sctp_get_port_local() does. Update rover at the end of loop since we're likely to pick a valid port on the first try. Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Reviewed-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/cma.c70
1 files changed, 25 insertions, 45 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6d777069d86d..6ae418e81d82 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -79,7 +79,6 @@ static DEFINE_IDR(sdp_ps);
79static DEFINE_IDR(tcp_ps); 79static DEFINE_IDR(tcp_ps);
80static DEFINE_IDR(udp_ps); 80static DEFINE_IDR(udp_ps);
81static DEFINE_IDR(ipoib_ps); 81static DEFINE_IDR(ipoib_ps);
82static int next_port;
83 82
84struct cma_device { 83struct cma_device {
85 struct list_head list; 84 struct list_head list;
@@ -1970,47 +1969,33 @@ err1:
1970 1969
1971static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 1970static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1972{ 1971{
1973 struct rdma_bind_list *bind_list; 1972 static unsigned int last_used_port;
1974 int port, ret, low, high; 1973 int low, high, remaining;
1975 1974 unsigned int rover;
1976 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1977 if (!bind_list)
1978 return -ENOMEM;
1979
1980retry:
1981 /* FIXME: add proper port randomization per like inet_csk_get_port */
1982 do {
1983 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1984 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1985
1986 if (ret)
1987 goto err1;
1988 1975
1989 inet_get_local_port_range(&low, &high); 1976 inet_get_local_port_range(&low, &high);
1990 if (port > high) { 1977 remaining = (high - low) + 1;
1991 if (next_port != low) { 1978 rover = net_random() % remaining + low;
1992 idr_remove(ps, port); 1979retry:
1993 next_port = low; 1980 if (last_used_port != rover &&
1994 goto retry; 1981 !idr_find(ps, (unsigned short) rover)) {
1995 } 1982 int ret = cma_alloc_port(ps, id_priv, rover);
1996 ret = -EADDRNOTAVAIL; 1983 /*
1997 goto err2; 1984 * Remember previously used port number in order to avoid
1985 * re-using same port immediately after it is closed.
1986 */
1987 if (!ret)
1988 last_used_port = rover;
1989 if (ret != -EADDRNOTAVAIL)
1990 return ret;
1998 } 1991 }
1999 1992 if (--remaining) {
2000 if (port == high) 1993 rover++;
2001 next_port = low; 1994 if ((rover < low) || (rover > high))
2002 else 1995 rover = low;
2003 next_port = port + 1; 1996 goto retry;
2004 1997 }
2005 bind_list->ps = ps; 1998 return -EADDRNOTAVAIL;
2006 bind_list->port = (unsigned short) port;
2007 cma_bind_port(bind_list, id_priv);
2008 return 0;
2009err2:
2010 idr_remove(ps, port);
2011err1:
2012 kfree(bind_list);
2013 return ret;
2014} 1999}
2015 2000
2016static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2001static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2995,12 +2980,7 @@ static void cma_remove_one(struct ib_device *device)
2995 2980
2996static int __init cma_init(void) 2981static int __init cma_init(void)
2997{ 2982{
2998 int ret, low, high, remaining; 2983 int ret;
2999
3000 get_random_bytes(&next_port, sizeof next_port);
3001 inet_get_local_port_range(&low, &high);
3002 remaining = (high - low) + 1;
3003 next_port = ((unsigned int) next_port % remaining) + low;
3004 2984
3005 cma_wq = create_singlethread_workqueue("rdma_cm"); 2985 cma_wq = create_singlethread_workqueue("rdma_cm");
3006 if (!cma_wq) 2986 if (!cma_wq)