diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/device.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/device.c | 191 |
1 files changed, 111 insertions, 80 deletions
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 9bbf491d5d9e..54fbc1118abe 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -49,29 +49,33 @@ static DEFINE_MUTEX(dev_mutex); | |||
49 | 49 | ||
50 | static struct dentry *c4iw_debugfs_root; | 50 | static struct dentry *c4iw_debugfs_root; |
51 | 51 | ||
52 | struct debugfs_qp_data { | 52 | struct c4iw_debugfs_data { |
53 | struct c4iw_dev *devp; | 53 | struct c4iw_dev *devp; |
54 | char *buf; | 54 | char *buf; |
55 | int bufsize; | 55 | int bufsize; |
56 | int pos; | 56 | int pos; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static int count_qps(int id, void *p, void *data) | 59 | static int count_idrs(int id, void *p, void *data) |
60 | { | 60 | { |
61 | struct c4iw_qp *qp = p; | ||
62 | int *countp = data; | 61 | int *countp = data; |
63 | 62 | ||
64 | if (id != qp->wq.sq.qid) | ||
65 | return 0; | ||
66 | |||
67 | *countp = *countp + 1; | 63 | *countp = *countp + 1; |
68 | return 0; | 64 | return 0; |
69 | } | 65 | } |
70 | 66 | ||
71 | static int dump_qps(int id, void *p, void *data) | 67 | static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, |
68 | loff_t *ppos) | ||
69 | { | ||
70 | struct c4iw_debugfs_data *d = file->private_data; | ||
71 | |||
72 | return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); | ||
73 | } | ||
74 | |||
75 | static int dump_qp(int id, void *p, void *data) | ||
72 | { | 76 | { |
73 | struct c4iw_qp *qp = p; | 77 | struct c4iw_qp *qp = p; |
74 | struct debugfs_qp_data *qpd = data; | 78 | struct c4iw_debugfs_data *qpd = data; |
75 | int space; | 79 | int space; |
76 | int cc; | 80 | int cc; |
77 | 81 | ||
@@ -101,7 +105,7 @@ static int dump_qps(int id, void *p, void *data) | |||
101 | 105 | ||
102 | static int qp_release(struct inode *inode, struct file *file) | 106 | static int qp_release(struct inode *inode, struct file *file) |
103 | { | 107 | { |
104 | struct debugfs_qp_data *qpd = file->private_data; | 108 | struct c4iw_debugfs_data *qpd = file->private_data; |
105 | if (!qpd) { | 109 | if (!qpd) { |
106 | printk(KERN_INFO "%s null qpd?\n", __func__); | 110 | printk(KERN_INFO "%s null qpd?\n", __func__); |
107 | return 0; | 111 | return 0; |
@@ -113,7 +117,7 @@ static int qp_release(struct inode *inode, struct file *file) | |||
113 | 117 | ||
114 | static int qp_open(struct inode *inode, struct file *file) | 118 | static int qp_open(struct inode *inode, struct file *file) |
115 | { | 119 | { |
116 | struct debugfs_qp_data *qpd; | 120 | struct c4iw_debugfs_data *qpd; |
117 | int ret = 0; | 121 | int ret = 0; |
118 | int count = 1; | 122 | int count = 1; |
119 | 123 | ||
@@ -126,7 +130,7 @@ static int qp_open(struct inode *inode, struct file *file) | |||
126 | qpd->pos = 0; | 130 | qpd->pos = 0; |
127 | 131 | ||
128 | spin_lock_irq(&qpd->devp->lock); | 132 | spin_lock_irq(&qpd->devp->lock); |
129 | idr_for_each(&qpd->devp->qpidr, count_qps, &count); | 133 | idr_for_each(&qpd->devp->qpidr, count_idrs, &count); |
130 | spin_unlock_irq(&qpd->devp->lock); | 134 | spin_unlock_irq(&qpd->devp->lock); |
131 | 135 | ||
132 | qpd->bufsize = count * 128; | 136 | qpd->bufsize = count * 128; |
@@ -137,7 +141,7 @@ static int qp_open(struct inode *inode, struct file *file) | |||
137 | } | 141 | } |
138 | 142 | ||
139 | spin_lock_irq(&qpd->devp->lock); | 143 | spin_lock_irq(&qpd->devp->lock); |
140 | idr_for_each(&qpd->devp->qpidr, dump_qps, qpd); | 144 | idr_for_each(&qpd->devp->qpidr, dump_qp, qpd); |
141 | spin_unlock_irq(&qpd->devp->lock); | 145 | spin_unlock_irq(&qpd->devp->lock); |
142 | 146 | ||
143 | qpd->buf[qpd->pos++] = 0; | 147 | qpd->buf[qpd->pos++] = 0; |
@@ -149,43 +153,86 @@ out: | |||
149 | return ret; | 153 | return ret; |
150 | } | 154 | } |
151 | 155 | ||
152 | static ssize_t qp_read(struct file *file, char __user *buf, size_t count, | 156 | static const struct file_operations qp_debugfs_fops = { |
153 | loff_t *ppos) | 157 | .owner = THIS_MODULE, |
158 | .open = qp_open, | ||
159 | .release = qp_release, | ||
160 | .read = debugfs_read, | ||
161 | .llseek = default_llseek, | ||
162 | }; | ||
163 | |||
164 | static int dump_stag(int id, void *p, void *data) | ||
154 | { | 165 | { |
155 | struct debugfs_qp_data *qpd = file->private_data; | 166 | struct c4iw_debugfs_data *stagd = data; |
156 | loff_t pos = *ppos; | 167 | int space; |
157 | loff_t avail = qpd->pos; | 168 | int cc; |
158 | 169 | ||
159 | if (pos < 0) | 170 | space = stagd->bufsize - stagd->pos - 1; |
160 | return -EINVAL; | 171 | if (space == 0) |
161 | if (pos >= avail) | 172 | return 1; |
173 | |||
174 | cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8); | ||
175 | if (cc < space) | ||
176 | stagd->pos += cc; | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | static int stag_release(struct inode *inode, struct file *file) | ||
181 | { | ||
182 | struct c4iw_debugfs_data *stagd = file->private_data; | ||
183 | if (!stagd) { | ||
184 | printk(KERN_INFO "%s null stagd?\n", __func__); | ||
162 | return 0; | 185 | return 0; |
163 | if (count > avail - pos) | 186 | } |
164 | count = avail - pos; | 187 | kfree(stagd->buf); |
188 | kfree(stagd); | ||
189 | return 0; | ||
190 | } | ||
165 | 191 | ||
166 | while (count) { | 192 | static int stag_open(struct inode *inode, struct file *file) |
167 | size_t len = 0; | 193 | { |
194 | struct c4iw_debugfs_data *stagd; | ||
195 | int ret = 0; | ||
196 | int count = 1; | ||
168 | 197 | ||
169 | len = min((int)count, (int)qpd->pos - (int)pos); | 198 | stagd = kmalloc(sizeof *stagd, GFP_KERNEL); |
170 | if (copy_to_user(buf, qpd->buf + pos, len)) | 199 | if (!stagd) { |
171 | return -EFAULT; | 200 | ret = -ENOMEM; |
172 | if (len == 0) | 201 | goto out; |
173 | return -EINVAL; | 202 | } |
203 | stagd->devp = inode->i_private; | ||
204 | stagd->pos = 0; | ||
205 | |||
206 | spin_lock_irq(&stagd->devp->lock); | ||
207 | idr_for_each(&stagd->devp->mmidr, count_idrs, &count); | ||
208 | spin_unlock_irq(&stagd->devp->lock); | ||
174 | 209 | ||
175 | buf += len; | 210 | stagd->bufsize = count * sizeof("0x12345678\n"); |
176 | pos += len; | 211 | stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL); |
177 | count -= len; | 212 | if (!stagd->buf) { |
213 | ret = -ENOMEM; | ||
214 | goto err1; | ||
178 | } | 215 | } |
179 | count = pos - *ppos; | 216 | |
180 | *ppos = pos; | 217 | spin_lock_irq(&stagd->devp->lock); |
181 | return count; | 218 | idr_for_each(&stagd->devp->mmidr, dump_stag, stagd); |
219 | spin_unlock_irq(&stagd->devp->lock); | ||
220 | |||
221 | stagd->buf[stagd->pos++] = 0; | ||
222 | file->private_data = stagd; | ||
223 | goto out; | ||
224 | err1: | ||
225 | kfree(stagd); | ||
226 | out: | ||
227 | return ret; | ||
182 | } | 228 | } |
183 | 229 | ||
184 | static const struct file_operations qp_debugfs_fops = { | 230 | static const struct file_operations stag_debugfs_fops = { |
185 | .owner = THIS_MODULE, | 231 | .owner = THIS_MODULE, |
186 | .open = qp_open, | 232 | .open = stag_open, |
187 | .release = qp_release, | 233 | .release = stag_release, |
188 | .read = qp_read, | 234 | .read = debugfs_read, |
235 | .llseek = default_llseek, | ||
189 | }; | 236 | }; |
190 | 237 | ||
191 | static int setup_debugfs(struct c4iw_dev *devp) | 238 | static int setup_debugfs(struct c4iw_dev *devp) |
@@ -199,6 +246,11 @@ static int setup_debugfs(struct c4iw_dev *devp) | |||
199 | (void *)devp, &qp_debugfs_fops); | 246 | (void *)devp, &qp_debugfs_fops); |
200 | if (de && de->d_inode) | 247 | if (de && de->d_inode) |
201 | de->d_inode->i_size = 4096; | 248 | de->d_inode->i_size = 4096; |
249 | |||
250 | de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root, | ||
251 | (void *)devp, &stag_debugfs_fops); | ||
252 | if (de && de->d_inode) | ||
253 | de->d_inode->i_size = 4096; | ||
202 | return 0; | 254 | return 0; |
203 | } | 255 | } |
204 | 256 | ||
@@ -290,7 +342,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
290 | printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); | 342 | printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); |
291 | goto err3; | 343 | goto err3; |
292 | } | 344 | } |
345 | err = c4iw_ocqp_pool_create(rdev); | ||
346 | if (err) { | ||
347 | printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err); | ||
348 | goto err4; | ||
349 | } | ||
293 | return 0; | 350 | return 0; |
351 | err4: | ||
352 | c4iw_rqtpool_destroy(rdev); | ||
294 | err3: | 353 | err3: |
295 | c4iw_pblpool_destroy(rdev); | 354 | c4iw_pblpool_destroy(rdev); |
296 | err2: | 355 | err2: |
@@ -317,6 +376,7 @@ static void c4iw_remove(struct c4iw_dev *dev) | |||
317 | idr_destroy(&dev->cqidr); | 376 | idr_destroy(&dev->cqidr); |
318 | idr_destroy(&dev->qpidr); | 377 | idr_destroy(&dev->qpidr); |
319 | idr_destroy(&dev->mmidr); | 378 | idr_destroy(&dev->mmidr); |
379 | iounmap(dev->rdev.oc_mw_kva); | ||
320 | ib_dealloc_device(&dev->ibdev); | 380 | ib_dealloc_device(&dev->ibdev); |
321 | } | 381 | } |
322 | 382 | ||
@@ -332,6 +392,17 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
332 | } | 392 | } |
333 | devp->rdev.lldi = *infop; | 393 | devp->rdev.lldi = *infop; |
334 | 394 | ||
395 | devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) + | ||
396 | (pci_resource_len(devp->rdev.lldi.pdev, 2) - | ||
397 | roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size)); | ||
398 | devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, | ||
399 | devp->rdev.lldi.vr->ocq.size); | ||
400 | |||
401 | printk(KERN_INFO MOD "ocq memory: " | ||
402 | "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", | ||
403 | devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, | ||
404 | devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); | ||
405 | |||
335 | mutex_lock(&dev_mutex); | 406 | mutex_lock(&dev_mutex); |
336 | 407 | ||
337 | ret = c4iw_rdev_open(&devp->rdev); | 408 | ret = c4iw_rdev_open(&devp->rdev); |
@@ -383,46 +454,6 @@ out: | |||
383 | return dev; | 454 | return dev; |
384 | } | 455 | } |
385 | 456 | ||
386 | static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl, | ||
387 | unsigned int skb_len, | ||
388 | unsigned int pull_len) | ||
389 | { | ||
390 | struct sk_buff *skb; | ||
391 | struct skb_shared_info *ssi; | ||
392 | |||
393 | if (gl->tot_len <= 512) { | ||
394 | skb = alloc_skb(gl->tot_len, GFP_ATOMIC); | ||
395 | if (unlikely(!skb)) | ||
396 | goto out; | ||
397 | __skb_put(skb, gl->tot_len); | ||
398 | skb_copy_to_linear_data(skb, gl->va, gl->tot_len); | ||
399 | } else { | ||
400 | skb = alloc_skb(skb_len, GFP_ATOMIC); | ||
401 | if (unlikely(!skb)) | ||
402 | goto out; | ||
403 | __skb_put(skb, pull_len); | ||
404 | skb_copy_to_linear_data(skb, gl->va, pull_len); | ||
405 | |||
406 | ssi = skb_shinfo(skb); | ||
407 | ssi->frags[0].page = gl->frags[0].page; | ||
408 | ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; | ||
409 | ssi->frags[0].size = gl->frags[0].size - pull_len; | ||
410 | if (gl->nfrags > 1) | ||
411 | memcpy(&ssi->frags[1], &gl->frags[1], | ||
412 | (gl->nfrags - 1) * sizeof(skb_frag_t)); | ||
413 | ssi->nr_frags = gl->nfrags; | ||
414 | |||
415 | skb->len = gl->tot_len; | ||
416 | skb->data_len = skb->len - pull_len; | ||
417 | skb->truesize += skb->data_len; | ||
418 | |||
419 | /* Get a reference for the last page, we don't own it */ | ||
420 | get_page(gl->frags[gl->nfrags - 1].page); | ||
421 | } | ||
422 | out: | ||
423 | return skb; | ||
424 | } | ||
425 | |||
426 | static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, | 457 | static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, |
427 | const struct pkt_gl *gl) | 458 | const struct pkt_gl *gl) |
428 | { | 459 | { |
@@ -447,7 +478,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, | |||
447 | c4iw_ev_handler(dev, qid); | 478 | c4iw_ev_handler(dev, qid); |
448 | return 0; | 479 | return 0; |
449 | } else { | 480 | } else { |
450 | skb = t4_pktgl_to_skb(gl, 128, 128); | 481 | skb = cxgb4_pktgl_to_skb(gl, 128, 128); |
451 | if (unlikely(!skb)) | 482 | if (unlikely(!skb)) |
452 | goto nomem; | 483 | goto nomem; |
453 | } | 484 | } |