aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/qp.c
diff options
context:
space:
mode:
authorHaggai Eran <haggaie@mellanox.com>2014-12-11 10:04:11 -0500
committerRoland Dreier <roland@purestorage.com>2014-12-15 21:13:35 -0500
commit968e78dd96443e2cc963c493070574778805e76a (patch)
treeac394ba85a24548c24c2c3f1a41093c1d0fac642 /drivers/infiniband/hw/mlx5/qp.c
parent21af2c3ebfd551660ae0016ecc5bc9afcc24f116 (diff)
IB/mlx5: Enhance UMR support to allow partial page table update
The current UMR interface doesn't allow partial updates to a memory region's page tables. This patch changes the interface to allow that. It also changes the way the UMR operation validates the memory region's state. When set, IB_SEND_UMR_FAIL_IF_FREE will cause the UMR operation to fail if the MKEY is in the free state. When it is unchecked the operation will check that it isn't in the free state. Signed-off-by: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Shachar Raindel <raindel@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c100
1 files changed, 64 insertions, 36 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 1cae1c7132b4..36e2cfe1c2fe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -70,15 +70,6 @@ static const u32 mlx5_ib_opcode[] = {
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR, 70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
71}; 71};
72 72
73struct umr_wr {
74 u64 virt_addr;
75 struct ib_pd *pd;
76 unsigned int page_shift;
77 unsigned int npages;
78 u32 length;
79 int access_flags;
80 u32 mkey;
81};
82 73
83static int is_qp0(enum ib_qp_type qp_type) 74static int is_qp0(enum ib_qp_type qp_type)
84{ 75{
@@ -1848,37 +1839,70 @@ static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1848 umr->mkey_mask = frwr_mkey_mask(); 1839 umr->mkey_mask = frwr_mkey_mask();
1849} 1840}
1850 1841
1842static __be64 get_umr_reg_mr_mask(void)
1843{
1844 u64 result;
1845
1846 result = MLX5_MKEY_MASK_LEN |
1847 MLX5_MKEY_MASK_PAGE_SIZE |
1848 MLX5_MKEY_MASK_START_ADDR |
1849 MLX5_MKEY_MASK_PD |
1850 MLX5_MKEY_MASK_LR |
1851 MLX5_MKEY_MASK_LW |
1852 MLX5_MKEY_MASK_KEY |
1853 MLX5_MKEY_MASK_RR |
1854 MLX5_MKEY_MASK_RW |
1855 MLX5_MKEY_MASK_A |
1856 MLX5_MKEY_MASK_FREE;
1857
1858 return cpu_to_be64(result);
1859}
1860
1861static __be64 get_umr_unreg_mr_mask(void)
1862{
1863 u64 result;
1864
1865 result = MLX5_MKEY_MASK_FREE;
1866
1867 return cpu_to_be64(result);
1868}
1869
1870static __be64 get_umr_update_mtt_mask(void)
1871{
1872 u64 result;
1873
1874 result = MLX5_MKEY_MASK_FREE;
1875
1876 return cpu_to_be64(result);
1877}
1878
1851static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 1879static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1852 struct ib_send_wr *wr) 1880 struct ib_send_wr *wr)
1853{ 1881{
1854 struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg; 1882 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
1855 u64 mask;
1856 1883
1857 memset(umr, 0, sizeof(*umr)); 1884 memset(umr, 0, sizeof(*umr));
1858 1885
1886 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
1887 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
1888 else
1889 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
1890
1859 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { 1891 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1860 umr->flags = 1 << 5; /* fail if not free */
1861 umr->klm_octowords = get_klm_octo(umrwr->npages); 1892 umr->klm_octowords = get_klm_octo(umrwr->npages);
1862 mask = MLX5_MKEY_MASK_LEN | 1893 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
1863 MLX5_MKEY_MASK_PAGE_SIZE | 1894 umr->mkey_mask = get_umr_update_mtt_mask();
1864 MLX5_MKEY_MASK_START_ADDR | 1895 umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
1865 MLX5_MKEY_MASK_PD | 1896 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
1866 MLX5_MKEY_MASK_LR | 1897 } else {
1867 MLX5_MKEY_MASK_LW | 1898 umr->mkey_mask = get_umr_reg_mr_mask();
1868 MLX5_MKEY_MASK_KEY | 1899 }
1869 MLX5_MKEY_MASK_RR |
1870 MLX5_MKEY_MASK_RW |
1871 MLX5_MKEY_MASK_A |
1872 MLX5_MKEY_MASK_FREE;
1873 umr->mkey_mask = cpu_to_be64(mask);
1874 } else { 1900 } else {
1875 umr->flags = 2 << 5; /* fail if free */ 1901 umr->mkey_mask = get_umr_unreg_mr_mask();
1876 mask = MLX5_MKEY_MASK_FREE;
1877 umr->mkey_mask = cpu_to_be64(mask);
1878 } 1902 }
1879 1903
1880 if (!wr->num_sge) 1904 if (!wr->num_sge)
1881 umr->flags |= (1 << 7); /* inline */ 1905 umr->flags |= MLX5_UMR_INLINE;
1882} 1906}
1883 1907
1884static u8 get_umr_flags(int acc) 1908static u8 get_umr_flags(int acc)
@@ -1895,7 +1919,7 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1895{ 1919{
1896 memset(seg, 0, sizeof(*seg)); 1920 memset(seg, 0, sizeof(*seg));
1897 if (li) { 1921 if (li) {
1898 seg->status = 1 << 6; 1922 seg->status = MLX5_MKEY_STATUS_FREE;
1899 return; 1923 return;
1900 } 1924 }
1901 1925
@@ -1912,19 +1936,23 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1912 1936
1913static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) 1937static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
1914{ 1938{
1939 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
1940
1915 memset(seg, 0, sizeof(*seg)); 1941 memset(seg, 0, sizeof(*seg));
1916 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { 1942 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
1917 seg->status = 1 << 6; 1943 seg->status = MLX5_MKEY_STATUS_FREE;
1918 return; 1944 return;
1919 } 1945 }
1920 1946
1921 seg->flags = convert_access(wr->wr.fast_reg.access_flags); 1947 seg->flags = convert_access(umrwr->access_flags);
1922 seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn); 1948 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
1923 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); 1949 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
1924 seg->len = cpu_to_be64(wr->wr.fast_reg.length); 1950 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
1925 seg->log2_page_size = wr->wr.fast_reg.page_shift; 1951 }
1952 seg->len = cpu_to_be64(umrwr->length);
1953 seg->log2_page_size = umrwr->page_shift;
1926 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | 1954 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
1927 mlx5_mkey_variant(wr->wr.fast_reg.rkey)); 1955 mlx5_mkey_variant(umrwr->mkey));
1928} 1956}
1929 1957
1930static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, 1958static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,