aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_mr.c
diff options
context:
space:
mode:
authorRoland Dreier <roland@topspin.com>2005-04-16 18:26:13 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:26:13 -0400
commit86562a139182bb19c984347f9625b61f3e6f7815 (patch)
tree97984c9e04c31a5d2055e4bcac16bb11c923eca1 /drivers/infiniband/hw/mthca/mthca_mr.c
parent79b61dceafce696d72661d23a02393566b1899ab (diff)
[PATCH] IB/mthca: map MPT/MTT context in mem-free mode
In mem-free mode, when allocating memory regions, make sure that the HCA has context memory mapped to cover the virtual space used for the MPT and MTTs being used. Signed-off-by: Roland Dreier <roland@topspin.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_mr.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c79
1 files changed, 68 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 80a0cd97881b..5eb6e07f35bb 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -38,6 +38,7 @@
38 38
39#include "mthca_dev.h" 39#include "mthca_dev.h"
40#include "mthca_cmd.h" 40#include "mthca_cmd.h"
41#include "mthca_memfree.h"
41 42
42/* 43/*
43 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 44 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
@@ -71,7 +72,7 @@ struct mthca_mpt_entry {
71 * through the bitmaps) 72 * through the bitmaps)
72 */ 73 */
73 74
74static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order) 75static u32 __mthca_alloc_mtt(struct mthca_dev *dev, int order)
75{ 76{
76 int o; 77 int o;
77 int m; 78 int m;
@@ -105,7 +106,7 @@ static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order)
105 return seg; 106 return seg;
106} 107}
107 108
108static void mthca_free_mtt(struct mthca_dev *dev, u32 seg, int order) 109static void __mthca_free_mtt(struct mthca_dev *dev, u32 seg, int order)
109{ 110{
110 seg >>= order; 111 seg >>= order;
111 112
@@ -122,6 +123,32 @@ static void mthca_free_mtt(struct mthca_dev *dev, u32 seg, int order)
122 spin_unlock(&dev->mr_table.mpt_alloc.lock); 123 spin_unlock(&dev->mr_table.mpt_alloc.lock);
123} 124}
124 125
126static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order)
127{
128 u32 seg = __mthca_alloc_mtt(dev, order);
129
130 if (seg == -1)
131 return -1;
132
133 if (dev->hca_type == ARBEL_NATIVE)
134 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
135 seg + (1 << order) - 1)) {
136 __mthca_free_mtt(dev, seg, order);
137 seg = -1;
138 }
139
140 return seg;
141}
142
143static void mthca_free_mtt(struct mthca_dev *dev, u32 seg, int order)
144{
145 __mthca_free_mtt(dev, seg, order);
146
147 if (dev->hca_type == ARBEL_NATIVE)
148 mthca_table_put_range(dev, dev->mr_table.mtt_table, seg,
149 seg + (1 << order) - 1);
150}
151
125static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind) 152static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
126{ 153{
127 if (dev->hca_type == ARBEL_NATIVE) 154 if (dev->hca_type == ARBEL_NATIVE)
@@ -141,7 +168,7 @@ static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
141int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, 168int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
142 u32 access, struct mthca_mr *mr) 169 u32 access, struct mthca_mr *mr)
143{ 170{
144 void *mailbox; 171 void *mailbox = NULL;
145 struct mthca_mpt_entry *mpt_entry; 172 struct mthca_mpt_entry *mpt_entry;
146 u32 key; 173 u32 key;
147 int err; 174 int err;
@@ -155,11 +182,17 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
155 return -ENOMEM; 182 return -ENOMEM;
156 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); 183 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
157 184
185 if (dev->hca_type == ARBEL_NATIVE) {
186 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
187 if (err)
188 goto err_out_mpt_free;
189 }
190
158 mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA, 191 mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA,
159 GFP_KERNEL); 192 GFP_KERNEL);
160 if (!mailbox) { 193 if (!mailbox) {
161 mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey); 194 err = -ENOMEM;
162 return -ENOMEM; 195 goto err_out_table;
163 } 196 }
164 mpt_entry = MAILBOX_ALIGN(mailbox); 197 mpt_entry = MAILBOX_ALIGN(mailbox);
165 198
@@ -180,16 +213,27 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
180 err = mthca_SW2HW_MPT(dev, mpt_entry, 213 err = mthca_SW2HW_MPT(dev, mpt_entry,
181 key & (dev->limits.num_mpts - 1), 214 key & (dev->limits.num_mpts - 1),
182 &status); 215 &status);
183 if (err) 216 if (err) {
184 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); 217 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
185 else if (status) { 218 goto err_out_table;
219 } else if (status) {
186 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n", 220 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
187 status); 221 status);
188 err = -EINVAL; 222 err = -EINVAL;
223 goto err_out_table;
189 } 224 }
190 225
191 kfree(mailbox); 226 kfree(mailbox);
192 return err; 227 return err;
228
229err_out_table:
230 if (dev->hca_type == ARBEL_NATIVE)
231 mthca_table_put(dev, dev->mr_table.mpt_table, key);
232
233err_out_mpt_free:
234 mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey);
235 kfree(mailbox);
236 return err;
193} 237}
194 238
195int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, 239int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
@@ -213,6 +257,12 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
213 return -ENOMEM; 257 return -ENOMEM;
214 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); 258 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
215 259
260 if (dev->hca_type == ARBEL_NATIVE) {
261 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
262 if (err)
263 goto err_out_mpt_free;
264 }
265
216 for (i = dev->limits.mtt_seg_size / 8, mr->order = 0; 266 for (i = dev->limits.mtt_seg_size / 8, mr->order = 0;
217 i < list_len; 267 i < list_len;
218 i <<= 1, ++mr->order) 268 i <<= 1, ++mr->order)
@@ -220,7 +270,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
220 270
221 mr->first_seg = mthca_alloc_mtt(dev, mr->order); 271 mr->first_seg = mthca_alloc_mtt(dev, mr->order);
222 if (mr->first_seg == -1) 272 if (mr->first_seg == -1)
223 goto err_out_mpt_free; 273 goto err_out_table;
224 274
225 /* 275 /*
226 * If list_len is odd, we add one more dummy entry for 276 * If list_len is odd, we add one more dummy entry for
@@ -307,13 +357,17 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
307 kfree(mailbox); 357 kfree(mailbox);
308 return err; 358 return err;
309 359
310 err_out_mailbox_free: 360err_out_mailbox_free:
311 kfree(mailbox); 361 kfree(mailbox);
312 362
313 err_out_free_mtt: 363err_out_free_mtt:
314 mthca_free_mtt(dev, mr->first_seg, mr->order); 364 mthca_free_mtt(dev, mr->first_seg, mr->order);
315 365
316 err_out_mpt_free: 366err_out_table:
367 if (dev->hca_type == ARBEL_NATIVE)
368 mthca_table_put(dev, dev->mr_table.mpt_table, key);
369
370err_out_mpt_free:
317 mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey); 371 mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey);
318 return err; 372 return err;
319} 373}
@@ -338,6 +392,9 @@ void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
338 if (mr->order >= 0) 392 if (mr->order >= 0)
339 mthca_free_mtt(dev, mr->first_seg, mr->order); 393 mthca_free_mtt(dev, mr->first_seg, mr->order);
340 394
395 if (dev->hca_type == ARBEL_NATIVE)
396 mthca_table_put(dev, dev->mr_table.mpt_table,
397 key_to_hw_index(dev, mr->ibmr.lkey));
341 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, mr->ibmr.lkey)); 398 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, mr->ibmr.lkey));
342} 399}
343 400