aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 15:56:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 15:56:07 -0400
commit5c402355adf8f920531f02099f4ec0d2bccd4c64 (patch)
treeeba326f8df01efc0d384874839384040401f5b45 /drivers/net/mlx4
parentecc8b655b38a880b578146895e0e1e2d477ca2c0 (diff)
parent2cc177364e4746becdf421f926fb967c047ccc32 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: MAINTAINERS: Remove Glenn Streiff from NetEffect entry mlx4_core: Improve error message when not enough UAR pages are available IB/mlx4: Add support for memory management extensions and local DMA L_Key IB/mthca: Keep free count for MTT buddy allocator mlx4_core: Keep free count for MTT buddy allocator mlx4_code: Add missing FW status return code IB/mlx4: Rename struct mlx4_lso_seg to mlx4_wqe_lso_seg mlx4_core: Add module parameter to enable QoS support RDMA/iwcm: Remove IB_ACCESS_LOCAL_WRITE from remote QP attributes IPoIB: Include err code in trace message for ib_sa_path_rec_get() failures IB/sa_query: Check if sm_ah is NULL in ib_sa_remove_one() IB/ehca: Release mutex in error path of alloc_small_queue_page() IB/ehca: Use default value for Local CA ACK Delay if FW returns 0 IB/ehca: Filter PATH_MIG events if QP was never armed IB/iser: Add support for RDMA_CM_EVENT_ADDR_CHANGE event RDMA/cma: Add RDMA_CM_EVENT_TIMEWAIT_EXIT event RDMA/cma: Add RDMA_CM_EVENT_ADDR_CHANGE event
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/cmd.c3
-rw-r--r--drivers/net/mlx4/fw.c18
-rw-r--r--drivers/net/mlx4/fw.h2
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/mr.c49
-rw-r--r--drivers/net/mlx4/pd.c7
7 files changed, 61 insertions, 21 deletions
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index 70dff94a8bc6..04d5bc69a6f8 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -67,6 +67,8 @@ enum {
67 CMD_STAT_BAD_INDEX = 0x0a, 67 CMD_STAT_BAD_INDEX = 0x0a,
68 /* FW image corrupted: */ 68 /* FW image corrupted: */
69 CMD_STAT_BAD_NVMEM = 0x0b, 69 CMD_STAT_BAD_NVMEM = 0x0b,
70 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
71 CMD_STAT_ICM_ERROR = 0x0c,
70 /* Attempt to modify a QP/EE which is not in the presumed state: */ 72 /* Attempt to modify a QP/EE which is not in the presumed state: */
71 CMD_STAT_BAD_QP_STATE = 0x10, 73 CMD_STAT_BAD_QP_STATE = 0x10,
72 /* Bad segment parameters (Address/Size): */ 74 /* Bad segment parameters (Address/Size): */
@@ -119,6 +121,7 @@ static int mlx4_status_to_errno(u8 status)
119 [CMD_STAT_BAD_RES_STATE] = -EBADF, 121 [CMD_STAT_BAD_RES_STATE] = -EBADF,
120 [CMD_STAT_BAD_INDEX] = -EBADF, 122 [CMD_STAT_BAD_INDEX] = -EBADF,
121 [CMD_STAT_BAD_NVMEM] = -EFAULT, 123 [CMD_STAT_BAD_NVMEM] = -EFAULT,
124 [CMD_STAT_ICM_ERROR] = -ENFILE,
122 [CMD_STAT_BAD_QP_STATE] = -EINVAL, 125 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
123 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, 126 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
124 [CMD_STAT_REG_BOUND] = -EBUSY, 127 [CMD_STAT_REG_BOUND] = -EBUSY,
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 2b5006b9be67..57278224ba1e 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -46,6 +46,10 @@ enum {
46extern void __buggy_use_of_MLX4_GET(void); 46extern void __buggy_use_of_MLX4_GET(void);
47extern void __buggy_use_of_MLX4_PUT(void); 47extern void __buggy_use_of_MLX4_PUT(void);
48 48
49static int enable_qos;
50module_param(enable_qos, bool, 0444);
51MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
52
49#define MLX4_GET(dest, source, offset) \ 53#define MLX4_GET(dest, source, offset) \
50 do { \ 54 do { \
51 void *__p = (char *) (source) + (offset); \ 55 void *__p = (char *) (source) + (offset); \
@@ -198,7 +202,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
198#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e 202#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
199#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 203#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
200#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 204#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
201#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97 205#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
202#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 206#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
203#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 207#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
204 208
@@ -373,12 +377,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
373 } 377 }
374 } 378 }
375 379
376 if (dev_cap->bmme_flags & 1) 380 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
377 mlx4_dbg(dev, "Base MM extensions: yes " 381 dev_cap->bmme_flags, dev_cap->reserved_lkey);
378 "(flags %d, rsvd L_Key %08x)\n",
379 dev_cap->bmme_flags, dev_cap->reserved_lkey);
380 else
381 mlx4_dbg(dev, "Base MM extensions: no\n");
382 382
383 /* 383 /*
384 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then 384 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
@@ -737,6 +737,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
737 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 737 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
738 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 738 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
739 739
740 /* Enable QoS support if module parameter set */
741 if (enable_qos)
742 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
743
740 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 744 /* QPC/EEC/CQC/EQC/RDMARC attributes */
741 745
742 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 746 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index a0e046c149b7..fbf0e22be122 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -98,7 +98,7 @@ struct mlx4_dev_cap {
98 int cmpt_entry_sz; 98 int cmpt_entry_sz;
99 int mtt_entry_sz; 99 int mtt_entry_sz;
100 int resize_srq; 100 int resize_srq;
101 u8 bmme_flags; 101 u32 bmme_flags;
102 u32 reserved_lkey; 102 u32 reserved_lkey;
103 u64 max_icm_sz; 103 u64 max_icm_sz;
104 int max_gso_sz; 104 int max_gso_sz;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index d3736013fe9b..8e1d24cda1b0 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -158,6 +158,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
158 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 158 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
160 dev->caps.flags = dev_cap->flags; 160 dev->caps.flags = dev_cap->flags;
161 dev->caps.bmme_flags = dev_cap->bmme_flags;
162 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
161 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 163 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
162 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 164 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
163 165
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index a4023c2dd050..78038499cff5 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -118,6 +118,7 @@ struct mlx4_bitmap {
118 118
119struct mlx4_buddy { 119struct mlx4_buddy {
120 unsigned long **bits; 120 unsigned long **bits;
121 unsigned int *num_free;
121 int max_order; 122 int max_order;
122 spinlock_t lock; 123 spinlock_t lock;
123}; 124};
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 03a9abcce524..a3c04c5f12c2 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -47,7 +47,7 @@ struct mlx4_mpt_entry {
47 __be32 flags; 47 __be32 flags;
48 __be32 qpn; 48 __be32 qpn;
49 __be32 key; 49 __be32 key;
50 __be32 pd; 50 __be32 pd_flags;
51 __be64 start; 51 __be64 start;
52 __be64 length; 52 __be64 length;
53 __be32 lkey; 53 __be32 lkey;
@@ -61,11 +61,15 @@ struct mlx4_mpt_entry {
61} __attribute__((packed)); 61} __attribute__((packed));
62 62
63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
64#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
64#define MLX4_MPT_FLAG_MIO (1 << 17) 65#define MLX4_MPT_FLAG_MIO (1 << 17)
65#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) 66#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
66#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 67#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
67#define MLX4_MPT_FLAG_REGION (1 << 8) 68#define MLX4_MPT_FLAG_REGION (1 << 8)
68 69
70#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 26)
71#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
72
69#define MLX4_MTT_FLAG_PRESENT 1 73#define MLX4_MTT_FLAG_PRESENT 1
70 74
71#define MLX4_MPT_STATUS_SW 0xF0 75#define MLX4_MPT_STATUS_SW 0xF0
@@ -79,23 +83,26 @@ static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
79 83
80 spin_lock(&buddy->lock); 84 spin_lock(&buddy->lock);
81 85
82 for (o = order; o <= buddy->max_order; ++o) { 86 for (o = order; o <= buddy->max_order; ++o)
83 m = 1 << (buddy->max_order - o); 87 if (buddy->num_free[o]) {
84 seg = find_first_bit(buddy->bits[o], m); 88 m = 1 << (buddy->max_order - o);
85 if (seg < m) 89 seg = find_first_bit(buddy->bits[o], m);
86 goto found; 90 if (seg < m)
87 } 91 goto found;
92 }
88 93
89 spin_unlock(&buddy->lock); 94 spin_unlock(&buddy->lock);
90 return -1; 95 return -1;
91 96
92 found: 97 found:
93 clear_bit(seg, buddy->bits[o]); 98 clear_bit(seg, buddy->bits[o]);
99 --buddy->num_free[o];
94 100
95 while (o > order) { 101 while (o > order) {
96 --o; 102 --o;
97 seg <<= 1; 103 seg <<= 1;
98 set_bit(seg ^ 1, buddy->bits[o]); 104 set_bit(seg ^ 1, buddy->bits[o]);
105 ++buddy->num_free[o];
99 } 106 }
100 107
101 spin_unlock(&buddy->lock); 108 spin_unlock(&buddy->lock);
@@ -113,11 +120,13 @@ static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
113 120
114 while (test_bit(seg ^ 1, buddy->bits[order])) { 121 while (test_bit(seg ^ 1, buddy->bits[order])) {
115 clear_bit(seg ^ 1, buddy->bits[order]); 122 clear_bit(seg ^ 1, buddy->bits[order]);
123 --buddy->num_free[order];
116 seg >>= 1; 124 seg >>= 1;
117 ++order; 125 ++order;
118 } 126 }
119 127
120 set_bit(seg, buddy->bits[order]); 128 set_bit(seg, buddy->bits[order]);
129 ++buddy->num_free[order];
121 130
122 spin_unlock(&buddy->lock); 131 spin_unlock(&buddy->lock);
123} 132}
@@ -131,7 +140,9 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
131 140
132 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), 141 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
133 GFP_KERNEL); 142 GFP_KERNEL);
134 if (!buddy->bits) 143 buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
144 GFP_KERNEL);
145 if (!buddy->bits || !buddy->num_free)
135 goto err_out; 146 goto err_out;
136 147
137 for (i = 0; i <= buddy->max_order; ++i) { 148 for (i = 0; i <= buddy->max_order; ++i) {
@@ -143,6 +154,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
143 } 154 }
144 155
145 set_bit(0, buddy->bits[buddy->max_order]); 156 set_bit(0, buddy->bits[buddy->max_order]);
157 buddy->num_free[buddy->max_order] = 1;
146 158
147 return 0; 159 return 0;
148 160
@@ -150,9 +162,10 @@ err_out_free:
150 for (i = 0; i <= buddy->max_order; ++i) 162 for (i = 0; i <= buddy->max_order; ++i)
151 kfree(buddy->bits[i]); 163 kfree(buddy->bits[i]);
152 164
165err_out:
153 kfree(buddy->bits); 166 kfree(buddy->bits);
167 kfree(buddy->num_free);
154 168
155err_out:
156 return -ENOMEM; 169 return -ENOMEM;
157} 170}
158 171
@@ -164,6 +177,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
164 kfree(buddy->bits[i]); 177 kfree(buddy->bits[i]);
165 178
166 kfree(buddy->bits); 179 kfree(buddy->bits);
180 kfree(buddy->num_free);
167} 181}
168 182
169static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 183static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
@@ -314,21 +328,30 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
314 328
315 memset(mpt_entry, 0, sizeof *mpt_entry); 329 memset(mpt_entry, 0, sizeof *mpt_entry);
316 330
317 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS | 331 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
318 MLX4_MPT_FLAG_MIO |
319 MLX4_MPT_FLAG_REGION | 332 MLX4_MPT_FLAG_REGION |
320 mr->access); 333 mr->access);
321 334
322 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); 335 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
323 mpt_entry->pd = cpu_to_be32(mr->pd); 336 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
324 mpt_entry->start = cpu_to_be64(mr->iova); 337 mpt_entry->start = cpu_to_be64(mr->iova);
325 mpt_entry->length = cpu_to_be64(mr->size); 338 mpt_entry->length = cpu_to_be64(mr->size);
326 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 339 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
340
327 if (mr->mtt.order < 0) { 341 if (mr->mtt.order < 0) {
328 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 342 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
329 mpt_entry->mtt_seg = 0; 343 mpt_entry->mtt_seg = 0;
330 } else 344 } else {
331 mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); 345 mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
346 }
347
348 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
349 /* fast register MR in free state */
350 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
351 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG);
352 } else {
353 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
354 }
332 355
333 err = mlx4_SW2HW_MPT(dev, mailbox, 356 err = mlx4_SW2HW_MPT(dev, mailbox,
334 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); 357 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index 3a93c5f0f7ab..aa616892d09c 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -91,6 +91,13 @@ EXPORT_SYMBOL_GPL(mlx4_uar_free);
91 91
92int mlx4_init_uar_table(struct mlx4_dev *dev) 92int mlx4_init_uar_table(struct mlx4_dev *dev)
93{ 93{
94 if (dev->caps.num_uars <= 128) {
95 mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
96 dev->caps.num_uars);
97 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
98 return -ENODEV;
99 }
100
94 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, 101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
95 dev->caps.num_uars, dev->caps.num_uars - 1, 102 dev->caps.num_uars, dev->caps.num_uars - 1,
96 max(128, dev->caps.reserved_uars)); 103 max(128, dev->caps.reserved_uars));