diff options
author | Sean Hefty <sean.hefty@intel.com> | 2007-01-29 19:41:23 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-02-10 11:00:50 -0500 |
commit | aedec08050255db1989a38b59616dd973dfe660b (patch) | |
tree | 69c31d3db3c876005a0e5b05096b9a562d790db6 /drivers | |
parent | 65e5c0262169a92bdec71a8bb9edb32dab2d8d1f (diff) |
RDMA/cma: Increment port number after close to avoid re-use
Randomize the starting port number and avoid re-using port values
immediately after they are closed. Instead keep track of the last
port value used and increment it every time a new port number is
assigned, to better replicate other port spaces.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/core/cma.c | 66 |
1 files changed, 56 insertions, 10 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 9e0ab048c878..bc31b54e9cac 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -71,6 +71,7 @@ static struct workqueue_struct *cma_wq; | |||
71 | static DEFINE_IDR(sdp_ps); | 71 | static DEFINE_IDR(sdp_ps); |
72 | static DEFINE_IDR(tcp_ps); | 72 | static DEFINE_IDR(tcp_ps); |
73 | static DEFINE_IDR(udp_ps); | 73 | static DEFINE_IDR(udp_ps); |
74 | static int next_port; | ||
74 | 75 | ||
75 | struct cma_device { | 76 | struct cma_device { |
76 | struct list_head list; | 77 | struct list_head list; |
@@ -1722,33 +1723,74 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, | |||
1722 | unsigned short snum) | 1723 | unsigned short snum) |
1723 | { | 1724 | { |
1724 | struct rdma_bind_list *bind_list; | 1725 | struct rdma_bind_list *bind_list; |
1725 | int port, start, ret; | 1726 | int port, ret; |
1726 | 1727 | ||
1727 | bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); | 1728 | bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); |
1728 | if (!bind_list) | 1729 | if (!bind_list) |
1729 | return -ENOMEM; | 1730 | return -ENOMEM; |
1730 | 1731 | ||
1731 | start = snum ? snum : sysctl_local_port_range[0]; | 1732 | do { |
1733 | ret = idr_get_new_above(ps, bind_list, snum, &port); | ||
1734 | } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); | ||
1735 | |||
1736 | if (ret) | ||
1737 | goto err1; | ||
1738 | |||
1739 | if (port != snum) { | ||
1740 | ret = -EADDRNOTAVAIL; | ||
1741 | goto err2; | ||
1742 | } | ||
1743 | |||
1744 | bind_list->ps = ps; | ||
1745 | bind_list->port = (unsigned short) port; | ||
1746 | cma_bind_port(bind_list, id_priv); | ||
1747 | return 0; | ||
1748 | err2: | ||
1749 | idr_remove(ps, port); | ||
1750 | err1: | ||
1751 | kfree(bind_list); | ||
1752 | return ret; | ||
1753 | } | ||
1732 | 1754 | ||
1755 | static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) | ||
1756 | { | ||
1757 | struct rdma_bind_list *bind_list; | ||
1758 | int port, ret; | ||
1759 | |||
1760 | bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); | ||
1761 | if (!bind_list) | ||
1762 | return -ENOMEM; | ||
1763 | |||
1764 | retry: | ||
1733 | do { | 1765 | do { |
1734 | ret = idr_get_new_above(ps, bind_list, start, &port); | 1766 | ret = idr_get_new_above(ps, bind_list, next_port, &port); |
1735 | } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); | 1767 | } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); |
1736 | 1768 | ||
1737 | if (ret) | 1769 | if (ret) |
1738 | goto err; | 1770 | goto err1; |
1739 | 1771 | ||
1740 | if ((snum && port != snum) || | 1772 | if (port > sysctl_local_port_range[1]) { |
1741 | (!snum && port > sysctl_local_port_range[1])) { | 1773 | if (next_port != sysctl_local_port_range[0]) { |
1742 | idr_remove(ps, port); | 1774 | idr_remove(ps, port); |
1775 | next_port = sysctl_local_port_range[0]; | ||
1776 | goto retry; | ||
1777 | } | ||
1743 | ret = -EADDRNOTAVAIL; | 1778 | ret = -EADDRNOTAVAIL; |
1744 | goto err; | 1779 | goto err2; |
1745 | } | 1780 | } |
1746 | 1781 | ||
1782 | if (port == sysctl_local_port_range[1]) | ||
1783 | next_port = sysctl_local_port_range[0]; | ||
1784 | else | ||
1785 | next_port = port + 1; | ||
1786 | |||
1747 | bind_list->ps = ps; | 1787 | bind_list->ps = ps; |
1748 | bind_list->port = (unsigned short) port; | 1788 | bind_list->port = (unsigned short) port; |
1749 | cma_bind_port(bind_list, id_priv); | 1789 | cma_bind_port(bind_list, id_priv); |
1750 | return 0; | 1790 | return 0; |
1751 | err: | 1791 | err2: |
1792 | idr_remove(ps, port); | ||
1793 | err1: | ||
1752 | kfree(bind_list); | 1794 | kfree(bind_list); |
1753 | return ret; | 1795 | return ret; |
1754 | } | 1796 | } |
@@ -1811,7 +1853,7 @@ static int cma_get_port(struct rdma_id_private *id_priv) | |||
1811 | 1853 | ||
1812 | mutex_lock(&lock); | 1854 | mutex_lock(&lock); |
1813 | if (cma_any_port(&id_priv->id.route.addr.src_addr)) | 1855 | if (cma_any_port(&id_priv->id.route.addr.src_addr)) |
1814 | ret = cma_alloc_port(ps, id_priv, 0); | 1856 | ret = cma_alloc_any_port(ps, id_priv); |
1815 | else | 1857 | else |
1816 | ret = cma_use_port(ps, id_priv); | 1858 | ret = cma_use_port(ps, id_priv); |
1817 | mutex_unlock(&lock); | 1859 | mutex_unlock(&lock); |
@@ -2448,6 +2490,10 @@ static int cma_init(void) | |||
2448 | { | 2490 | { |
2449 | int ret; | 2491 | int ret; |
2450 | 2492 | ||
2493 | get_random_bytes(&next_port, sizeof next_port); | ||
2494 | next_port = (next_port % (sysctl_local_port_range[1] - | ||
2495 | sysctl_local_port_range[0])) + | ||
2496 | sysctl_local_port_range[0]; | ||
2451 | cma_wq = create_singlethread_workqueue("rdma_cm_wq"); | 2497 | cma_wq = create_singlethread_workqueue("rdma_cm_wq"); |
2452 | if (!cma_wq) | 2498 | if (!cma_wq) |
2453 | return -ENOMEM; | 2499 | return -ENOMEM; |