diff options
author | kxie@chelsio.com <kxie@chelsio.com> | 2010-08-16 23:55:53 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-09-05 13:29:23 -0400 |
commit | 6f7efaabefebfbc523ea9776e3663a2d81b86399 (patch) | |
tree | b77eb425e2320e0a15d149723b190ac09062b5ee /drivers/scsi/cxgb3i/cxgb3i_ddp.c | |
parent | 7b36b6e03b0d6cee0948593a6a11841a457695b9 (diff) |
[SCSI] cxgb3i: change cxgb3i to use libcxgbi
Signed-off-by: Karen Xie <kxie@chelsio.com>
Reviewed-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/cxgb3i/cxgb3i_ddp.c')
-rw-r--r-- | drivers/scsi/cxgb3i/cxgb3i_ddp.c | 773 |
1 files changed, 0 insertions, 773 deletions
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c deleted file mode 100644 index be0e23042c76..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c +++ /dev/null | |||
@@ -1,773 +0,0 @@ | |||
1 | /* | ||
2 | * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager. | ||
3 | * | ||
4 | * Copyright (c) 2008 Chelsio Communications, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * Written by: Karen Xie (kxie@chelsio.com) | ||
11 | */ | ||
12 | |||
13 | #include <linux/slab.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/scatterlist.h> | ||
16 | |||
17 | /* from cxgb3 LLD */ | ||
18 | #include "common.h" | ||
19 | #include "t3_cpl.h" | ||
20 | #include "t3cdev.h" | ||
21 | #include "cxgb3_ctl_defs.h" | ||
22 | #include "cxgb3_offload.h" | ||
23 | #include "firmware_exports.h" | ||
24 | |||
25 | #include "cxgb3i_ddp.h" | ||
26 | |||
27 | #define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt) | ||
28 | #define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt) | ||
29 | #define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt) | ||
30 | |||
31 | #ifdef __DEBUG_CXGB3I_DDP__ | ||
32 | #define ddp_log_debug(fmt, args...) \ | ||
33 | printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args) | ||
34 | #else | ||
35 | #define ddp_log_debug(fmt...) | ||
36 | #endif | ||
37 | |||
38 | /* | ||
39 | * iSCSI Direct Data Placement | ||
40 | * | ||
41 | * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into | ||
42 | * pre-posted final destination host-memory buffers based on the Initiator | ||
43 | * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs. | ||
44 | * | ||
45 | * The host memory address is programmed into h/w in the format of pagepod | ||
46 | * entries. | ||
47 | * The location of the pagepod entry is encoded into ddp tag which is used or | ||
48 | * is the base for ITT/TTT. | ||
49 | */ | ||
50 | |||
51 | #define DDP_PGIDX_MAX 4 | ||
52 | #define DDP_THRESHOLD 2048 | ||
53 | static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; | ||
54 | static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; | ||
55 | static unsigned char page_idx = DDP_PGIDX_MAX; | ||
56 | |||
57 | /* | ||
58 | * functions to program the pagepod in h/w | ||
59 | */ | ||
60 | static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) | ||
61 | { | ||
62 | struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; | ||
63 | |||
64 | req->wr.wr_lo = 0; | ||
65 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); | ||
66 | req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | | ||
67 | V_ULPTX_CMD(ULP_MEM_WRITE)); | ||
68 | req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) | | ||
69 | V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1)); | ||
70 | } | ||
71 | |||
72 | static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr, | ||
73 | unsigned int idx, unsigned int npods, | ||
74 | struct cxgb3i_gather_list *gl) | ||
75 | { | ||
76 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | ||
77 | int i; | ||
78 | |||
79 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | ||
80 | struct sk_buff *skb = ddp->gl_skb[idx]; | ||
81 | struct pagepod *ppod; | ||
82 | int j, pidx; | ||
83 | |||
84 | /* hold on to the skb until we clear the ddp mapping */ | ||
85 | skb_get(skb); | ||
86 | |||
87 | ulp_mem_io_set_hdr(skb, pm_addr); | ||
88 | ppod = (struct pagepod *) | ||
89 | (skb->head + sizeof(struct ulp_mem_io)); | ||
90 | memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod)); | ||
91 | for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx) | ||
92 | ppod->addr[j] = pidx < gl->nelem ? | ||
93 | cpu_to_be64(gl->phys_addr[pidx]) : 0UL; | ||
94 | |||
95 | skb->priority = CPL_PRIORITY_CONTROL; | ||
96 | cxgb3_ofld_send(ddp->tdev, skb); | ||
97 | } | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | static void clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int tag, | ||
102 | unsigned int idx, unsigned int npods) | ||
103 | { | ||
104 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | ||
105 | int i; | ||
106 | |||
107 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | ||
108 | struct sk_buff *skb = ddp->gl_skb[idx]; | ||
109 | |||
110 | if (!skb) { | ||
111 | ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n", | ||
112 | tag, idx, i, npods); | ||
113 | continue; | ||
114 | } | ||
115 | ddp->gl_skb[idx] = NULL; | ||
116 | memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE); | ||
117 | ulp_mem_io_set_hdr(skb, pm_addr); | ||
118 | skb->priority = CPL_PRIORITY_CONTROL; | ||
119 | cxgb3_ofld_send(ddp->tdev, skb); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp, | ||
124 | unsigned int start, unsigned int max, | ||
125 | unsigned int count, | ||
126 | struct cxgb3i_gather_list *gl) | ||
127 | { | ||
128 | unsigned int i, j, k; | ||
129 | |||
130 | /* not enough entries */ | ||
131 | if ((max - start) < count) | ||
132 | return -EBUSY; | ||
133 | |||
134 | max -= count; | ||
135 | spin_lock(&ddp->map_lock); | ||
136 | for (i = start; i < max;) { | ||
137 | for (j = 0, k = i; j < count; j++, k++) { | ||
138 | if (ddp->gl_map[k]) | ||
139 | break; | ||
140 | } | ||
141 | if (j == count) { | ||
142 | for (j = 0, k = i; j < count; j++, k++) | ||
143 | ddp->gl_map[k] = gl; | ||
144 | spin_unlock(&ddp->map_lock); | ||
145 | return i; | ||
146 | } | ||
147 | i += j + 1; | ||
148 | } | ||
149 | spin_unlock(&ddp->map_lock); | ||
150 | return -EBUSY; | ||
151 | } | ||
152 | |||
153 | static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp, | ||
154 | int start, int count) | ||
155 | { | ||
156 | spin_lock(&ddp->map_lock); | ||
157 | memset(&ddp->gl_map[start], 0, | ||
158 | count * sizeof(struct cxgb3i_gather_list *)); | ||
159 | spin_unlock(&ddp->map_lock); | ||
160 | } | ||
161 | |||
162 | static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp, | ||
163 | int idx, int count) | ||
164 | { | ||
165 | int i; | ||
166 | |||
167 | for (i = 0; i < count; i++, idx++) | ||
168 | if (ddp->gl_skb[idx]) { | ||
169 | kfree_skb(ddp->gl_skb[idx]); | ||
170 | ddp->gl_skb[idx] = NULL; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx, | ||
175 | int count, gfp_t gfp) | ||
176 | { | ||
177 | int i; | ||
178 | |||
179 | for (i = 0; i < count; i++) { | ||
180 | struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) + | ||
181 | PPOD_SIZE, gfp); | ||
182 | if (skb) { | ||
183 | ddp->gl_skb[idx + i] = skb; | ||
184 | skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE); | ||
185 | } else { | ||
186 | ddp_free_gl_skb(ddp, idx, i); | ||
187 | return -ENOMEM; | ||
188 | } | ||
189 | } | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * cxgb3i_ddp_find_page_index - return ddp page index for a given page size | ||
195 | * @pgsz: page size | ||
196 | * return the ddp page index, if no match is found return DDP_PGIDX_MAX. | ||
197 | */ | ||
198 | int cxgb3i_ddp_find_page_index(unsigned long pgsz) | ||
199 | { | ||
200 | int i; | ||
201 | |||
202 | for (i = 0; i < DDP_PGIDX_MAX; i++) { | ||
203 | if (pgsz == (1UL << ddp_page_shift[i])) | ||
204 | return i; | ||
205 | } | ||
206 | ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz); | ||
207 | return DDP_PGIDX_MAX; | ||
208 | } | ||
209 | |||
210 | /** | ||
211 | * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE | ||
212 | * return the ddp page index, if no match is found return DDP_PGIDX_MAX. | ||
213 | */ | ||
214 | int cxgb3i_ddp_adjust_page_table(void) | ||
215 | { | ||
216 | int i; | ||
217 | unsigned int base_order, order; | ||
218 | |||
219 | if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { | ||
220 | ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n", | ||
221 | PAGE_SIZE, 1UL << ddp_page_shift[0]); | ||
222 | return -EINVAL; | ||
223 | } | ||
224 | |||
225 | base_order = get_order(1UL << ddp_page_shift[0]); | ||
226 | order = get_order(1 << PAGE_SHIFT); | ||
227 | for (i = 0; i < DDP_PGIDX_MAX; i++) { | ||
228 | /* first is the kernel page size, then just doubling the size */ | ||
229 | ddp_page_order[i] = order - base_order + i; | ||
230 | ddp_page_shift[i] = PAGE_SHIFT + i; | ||
231 | } | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static inline void ddp_gl_unmap(struct pci_dev *pdev, | ||
236 | struct cxgb3i_gather_list *gl) | ||
237 | { | ||
238 | int i; | ||
239 | |||
240 | for (i = 0; i < gl->nelem; i++) | ||
241 | pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE, | ||
242 | PCI_DMA_FROMDEVICE); | ||
243 | } | ||
244 | |||
245 | static inline int ddp_gl_map(struct pci_dev *pdev, | ||
246 | struct cxgb3i_gather_list *gl) | ||
247 | { | ||
248 | int i; | ||
249 | |||
250 | for (i = 0; i < gl->nelem; i++) { | ||
251 | gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0, | ||
252 | PAGE_SIZE, | ||
253 | PCI_DMA_FROMDEVICE); | ||
254 | if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i]))) | ||
255 | goto unmap; | ||
256 | } | ||
257 | |||
258 | return i; | ||
259 | |||
260 | unmap: | ||
261 | if (i) { | ||
262 | unsigned int nelem = gl->nelem; | ||
263 | |||
264 | gl->nelem = i; | ||
265 | ddp_gl_unmap(pdev, gl); | ||
266 | gl->nelem = nelem; | ||
267 | } | ||
268 | return -ENOMEM; | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * cxgb3i_ddp_make_gl - build ddp page buffer list | ||
273 | * @xferlen: total buffer length | ||
274 | * @sgl: page buffer scatter-gather list | ||
275 | * @sgcnt: # of page buffers | ||
276 | * @pdev: pci_dev, used for pci map | ||
277 | * @gfp: allocation mode | ||
278 | * | ||
279 | * construct a ddp page buffer list from the scsi scattergather list. | ||
280 | * coalesce buffers as much as possible, and obtain dma addresses for | ||
281 | * each page. | ||
282 | * | ||
283 | * Return the cxgb3i_gather_list constructed from the page buffers if the | ||
284 | * memory can be used for ddp. Return NULL otherwise. | ||
285 | */ | ||
286 | struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen, | ||
287 | struct scatterlist *sgl, | ||
288 | unsigned int sgcnt, | ||
289 | struct pci_dev *pdev, | ||
290 | gfp_t gfp) | ||
291 | { | ||
292 | struct cxgb3i_gather_list *gl; | ||
293 | struct scatterlist *sg = sgl; | ||
294 | struct page *sgpage = sg_page(sg); | ||
295 | unsigned int sglen = sg->length; | ||
296 | unsigned int sgoffset = sg->offset; | ||
297 | unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> | ||
298 | PAGE_SHIFT; | ||
299 | int i = 1, j = 0; | ||
300 | |||
301 | if (xferlen < DDP_THRESHOLD) { | ||
302 | ddp_log_debug("xfer %u < threshold %u, no ddp.\n", | ||
303 | xferlen, DDP_THRESHOLD); | ||
304 | return NULL; | ||
305 | } | ||
306 | |||
307 | gl = kzalloc(sizeof(struct cxgb3i_gather_list) + | ||
308 | npages * (sizeof(dma_addr_t) + sizeof(struct page *)), | ||
309 | gfp); | ||
310 | if (!gl) | ||
311 | return NULL; | ||
312 | |||
313 | gl->pages = (struct page **)&gl->phys_addr[npages]; | ||
314 | gl->length = xferlen; | ||
315 | gl->offset = sgoffset; | ||
316 | gl->pages[0] = sgpage; | ||
317 | |||
318 | sg = sg_next(sg); | ||
319 | while (sg) { | ||
320 | struct page *page = sg_page(sg); | ||
321 | |||
322 | if (sgpage == page && sg->offset == sgoffset + sglen) | ||
323 | sglen += sg->length; | ||
324 | else { | ||
325 | /* make sure the sgl is fit for ddp: | ||
326 | * each has the same page size, and | ||
327 | * all of the middle pages are used completely | ||
328 | */ | ||
329 | if ((j && sgoffset) || | ||
330 | ((i != sgcnt - 1) && | ||
331 | ((sglen + sgoffset) & ~PAGE_MASK))) | ||
332 | goto error_out; | ||
333 | |||
334 | j++; | ||
335 | if (j == gl->nelem || sg->offset) | ||
336 | goto error_out; | ||
337 | gl->pages[j] = page; | ||
338 | sglen = sg->length; | ||
339 | sgoffset = sg->offset; | ||
340 | sgpage = page; | ||
341 | } | ||
342 | i++; | ||
343 | sg = sg_next(sg); | ||
344 | } | ||
345 | gl->nelem = ++j; | ||
346 | |||
347 | if (ddp_gl_map(pdev, gl) < 0) | ||
348 | goto error_out; | ||
349 | |||
350 | return gl; | ||
351 | |||
352 | error_out: | ||
353 | kfree(gl); | ||
354 | return NULL; | ||
355 | } | ||
356 | |||
357 | /** | ||
358 | * cxgb3i_ddp_release_gl - release a page buffer list | ||
359 | * @gl: a ddp page buffer list | ||
360 | * @pdev: pci_dev used for pci_unmap | ||
361 | * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl(). | ||
362 | */ | ||
363 | void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl, | ||
364 | struct pci_dev *pdev) | ||
365 | { | ||
366 | ddp_gl_unmap(pdev, gl); | ||
367 | kfree(gl); | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer | ||
372 | * @tdev: t3cdev adapter | ||
373 | * @tid: connection id | ||
374 | * @tformat: tag format | ||
375 | * @tagp: contains s/w tag initially, will be updated with ddp/hw tag | ||
376 | * @gl: the page momory list | ||
377 | * @gfp: allocation mode | ||
378 | * | ||
379 | * ddp setup for a given page buffer list and construct the ddp tag. | ||
380 | * return 0 if success, < 0 otherwise. | ||
381 | */ | ||
382 | int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid, | ||
383 | struct cxgb3i_tag_format *tformat, u32 *tagp, | ||
384 | struct cxgb3i_gather_list *gl, gfp_t gfp) | ||
385 | { | ||
386 | struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; | ||
387 | struct pagepod_hdr hdr; | ||
388 | unsigned int npods; | ||
389 | int idx = -1; | ||
390 | int err = -ENOMEM; | ||
391 | u32 sw_tag = *tagp; | ||
392 | u32 tag; | ||
393 | |||
394 | if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem || | ||
395 | gl->length < DDP_THRESHOLD) { | ||
396 | ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n", | ||
397 | page_idx, gl->length, DDP_THRESHOLD); | ||
398 | return -EINVAL; | ||
399 | } | ||
400 | |||
401 | npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; | ||
402 | |||
403 | if (ddp->idx_last == ddp->nppods) | ||
404 | idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, npods, gl); | ||
405 | else { | ||
406 | idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, | ||
407 | ddp->nppods, npods, gl); | ||
408 | if (idx < 0 && ddp->idx_last >= npods) { | ||
409 | idx = ddp_find_unused_entries(ddp, 0, | ||
410 | min(ddp->idx_last + npods, ddp->nppods), | ||
411 | npods, gl); | ||
412 | } | ||
413 | } | ||
414 | if (idx < 0) { | ||
415 | ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n", | ||
416 | gl->length, gl->nelem, npods); | ||
417 | return idx; | ||
418 | } | ||
419 | |||
420 | err = ddp_alloc_gl_skb(ddp, idx, npods, gfp); | ||
421 | if (err < 0) | ||
422 | goto unmark_entries; | ||
423 | |||
424 | tag = cxgb3i_ddp_tag_base(tformat, sw_tag); | ||
425 | tag |= idx << PPOD_IDX_SHIFT; | ||
426 | |||
427 | hdr.rsvd = 0; | ||
428 | hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid)); | ||
429 | hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); | ||
430 | hdr.maxoffset = htonl(gl->length); | ||
431 | hdr.pgoffset = htonl(gl->offset); | ||
432 | |||
433 | err = set_ddp_map(ddp, &hdr, idx, npods, gl); | ||
434 | if (err < 0) | ||
435 | goto free_gl_skb; | ||
436 | |||
437 | ddp->idx_last = idx; | ||
438 | ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n", | ||
439 | gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, | ||
440 | idx, npods); | ||
441 | *tagp = tag; | ||
442 | return 0; | ||
443 | |||
444 | free_gl_skb: | ||
445 | ddp_free_gl_skb(ddp, idx, npods); | ||
446 | unmark_entries: | ||
447 | ddp_unmark_entries(ddp, idx, npods); | ||
448 | return err; | ||
449 | } | ||
450 | |||
451 | /** | ||
452 | * cxgb3i_ddp_tag_release - release a ddp tag | ||
453 | * @tdev: t3cdev adapter | ||
454 | * @tag: ddp tag | ||
455 | * ddp cleanup for a given ddp tag and release all the resources held | ||
456 | */ | ||
457 | void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag) | ||
458 | { | ||
459 | struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; | ||
460 | u32 idx; | ||
461 | |||
462 | if (!ddp) { | ||
463 | ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag); | ||
464 | return; | ||
465 | } | ||
466 | |||
467 | idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; | ||
468 | if (idx < ddp->nppods) { | ||
469 | struct cxgb3i_gather_list *gl = ddp->gl_map[idx]; | ||
470 | unsigned int npods; | ||
471 | |||
472 | if (!gl || !gl->nelem) { | ||
473 | ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n", | ||
474 | tag, idx, gl, gl ? gl->nelem : 0); | ||
475 | return; | ||
476 | } | ||
477 | npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; | ||
478 | ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n", | ||
479 | tag, idx, npods); | ||
480 | clear_ddp_map(ddp, tag, idx, npods); | ||
481 | ddp_unmark_entries(ddp, idx, npods); | ||
482 | cxgb3i_ddp_release_gl(gl, ddp->pdev); | ||
483 | } else | ||
484 | ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n", | ||
485 | tag, idx, ddp->nppods); | ||
486 | } | ||
487 | |||
488 | static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx, | ||
489 | int reply) | ||
490 | { | ||
491 | struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field), | ||
492 | GFP_KERNEL); | ||
493 | struct cpl_set_tcb_field *req; | ||
494 | u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; | ||
495 | |||
496 | if (!skb) | ||
497 | return -ENOMEM; | ||
498 | |||
499 | /* set up ulp submode and page size */ | ||
500 | req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); | ||
501 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
502 | req->wr.wr_lo = 0; | ||
503 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | ||
504 | req->reply = V_NO_REPLY(reply ? 0 : 1); | ||
505 | req->cpu_idx = 0; | ||
506 | req->word = htons(31); | ||
507 | req->mask = cpu_to_be64(0xF0000000); | ||
508 | req->val = cpu_to_be64(val << 28); | ||
509 | skb->priority = CPL_PRIORITY_CONTROL; | ||
510 | |||
511 | cxgb3_ofld_send(tdev, skb); | ||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | /** | ||
516 | * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size | ||
517 | * @tdev: t3cdev adapter | ||
518 | * @tid: connection id | ||
519 | * @reply: request reply from h/w | ||
520 | * set up the ddp page size based on the host PAGE_SIZE for a connection | ||
521 | * identified by tid | ||
522 | */ | ||
523 | int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid, | ||
524 | int reply) | ||
525 | { | ||
526 | return setup_conn_pgidx(tdev, tid, page_idx, reply); | ||
527 | } | ||
528 | |||
529 | /** | ||
530 | * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size | ||
531 | * @tdev: t3cdev adapter | ||
532 | * @tid: connection id | ||
533 | * @reply: request reply from h/w | ||
534 | * @pgsz: ddp page size | ||
535 | * set up the ddp page size for a connection identified by tid | ||
536 | */ | ||
537 | int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid, | ||
538 | int reply, unsigned long pgsz) | ||
539 | { | ||
540 | int pgidx = cxgb3i_ddp_find_page_index(pgsz); | ||
541 | |||
542 | return setup_conn_pgidx(tdev, tid, pgidx, reply); | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * cxgb3i_setup_conn_digest - setup conn. digest setting | ||
547 | * @tdev: t3cdev adapter | ||
548 | * @tid: connection id | ||
549 | * @hcrc: header digest enabled | ||
550 | * @dcrc: data digest enabled | ||
551 | * @reply: request reply from h/w | ||
552 | * set up the iscsi digest settings for a connection identified by tid | ||
553 | */ | ||
554 | int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid, | ||
555 | int hcrc, int dcrc, int reply) | ||
556 | { | ||
557 | struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field), | ||
558 | GFP_KERNEL); | ||
559 | struct cpl_set_tcb_field *req; | ||
560 | u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); | ||
561 | |||
562 | if (!skb) | ||
563 | return -ENOMEM; | ||
564 | |||
565 | /* set up ulp submode and page size */ | ||
566 | req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); | ||
567 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
568 | req->wr.wr_lo = 0; | ||
569 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | ||
570 | req->reply = V_NO_REPLY(reply ? 0 : 1); | ||
571 | req->cpu_idx = 0; | ||
572 | req->word = htons(31); | ||
573 | req->mask = cpu_to_be64(0x0F000000); | ||
574 | req->val = cpu_to_be64(val << 24); | ||
575 | skb->priority = CPL_PRIORITY_CONTROL; | ||
576 | |||
577 | cxgb3_ofld_send(tdev, skb); | ||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | |||
582 | /** | ||
583 | * cxgb3i_adapter_ddp_info - read the adapter's ddp information | ||
584 | * @tdev: t3cdev adapter | ||
585 | * @tformat: tag format | ||
586 | * @txsz: max tx pdu payload size, filled in by this func. | ||
587 | * @rxsz: max rx pdu payload size, filled in by this func. | ||
588 | * setup the tag format for a given iscsi entity | ||
589 | */ | ||
590 | int cxgb3i_adapter_ddp_info(struct t3cdev *tdev, | ||
591 | struct cxgb3i_tag_format *tformat, | ||
592 | unsigned int *txsz, unsigned int *rxsz) | ||
593 | { | ||
594 | struct cxgb3i_ddp_info *ddp; | ||
595 | unsigned char idx_bits; | ||
596 | |||
597 | if (!tformat) | ||
598 | return -EINVAL; | ||
599 | |||
600 | if (!tdev->ulp_iscsi) | ||
601 | return -EINVAL; | ||
602 | |||
603 | ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; | ||
604 | |||
605 | idx_bits = 32 - tformat->sw_bits; | ||
606 | tformat->rsvd_bits = ddp->idx_bits; | ||
607 | tformat->rsvd_shift = PPOD_IDX_SHIFT; | ||
608 | tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1; | ||
609 | |||
610 | ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n", | ||
611 | tformat->sw_bits, tformat->rsvd_bits, | ||
612 | tformat->rsvd_shift, tformat->rsvd_mask); | ||
613 | |||
614 | *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | ||
615 | ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); | ||
616 | *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | ||
617 | ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); | ||
618 | ddp_log_info("max payload size: %u/%u, %u/%u.\n", | ||
619 | *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz); | ||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | /** | ||
624 | * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource | ||
625 | * @tdev: t3cdev adapter | ||
626 | * release all the resource held by the ddp pagepod manager for a given | ||
627 | * adapter if needed | ||
628 | */ | ||
629 | |||
630 | static void ddp_cleanup(struct kref *kref) | ||
631 | { | ||
632 | struct cxgb3i_ddp_info *ddp = container_of(kref, | ||
633 | struct cxgb3i_ddp_info, | ||
634 | refcnt); | ||
635 | int i = 0; | ||
636 | |||
637 | ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev); | ||
638 | |||
639 | ddp->tdev->ulp_iscsi = NULL; | ||
640 | while (i < ddp->nppods) { | ||
641 | struct cxgb3i_gather_list *gl = ddp->gl_map[i]; | ||
642 | if (gl) { | ||
643 | int npods = (gl->nelem + PPOD_PAGES_MAX - 1) | ||
644 | >> PPOD_PAGES_SHIFT; | ||
645 | ddp_log_info("t3dev 0x%p, ddp %d + %d.\n", | ||
646 | ddp->tdev, i, npods); | ||
647 | kfree(gl); | ||
648 | ddp_free_gl_skb(ddp, i, npods); | ||
649 | i += npods; | ||
650 | } else | ||
651 | i++; | ||
652 | } | ||
653 | cxgb3i_free_big_mem(ddp); | ||
654 | } | ||
655 | |||
656 | void cxgb3i_ddp_cleanup(struct t3cdev *tdev) | ||
657 | { | ||
658 | struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; | ||
659 | |||
660 | ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp); | ||
661 | if (ddp) | ||
662 | kref_put(&ddp->refcnt, ddp_cleanup); | ||
663 | } | ||
664 | |||
665 | /** | ||
666 | * ddp_init - initialize the cxgb3 adapter's ddp resource | ||
667 | * @tdev: t3cdev adapter | ||
668 | * initialize the ddp pagepod manager for a given adapter | ||
669 | */ | ||
670 | static void ddp_init(struct t3cdev *tdev) | ||
671 | { | ||
672 | struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; | ||
673 | struct ulp_iscsi_info uinfo; | ||
674 | unsigned int ppmax, bits; | ||
675 | int i, err; | ||
676 | |||
677 | if (ddp) { | ||
678 | kref_get(&ddp->refcnt); | ||
679 | ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n", | ||
680 | tdev, tdev->ulp_iscsi); | ||
681 | return; | ||
682 | } | ||
683 | |||
684 | err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); | ||
685 | if (err < 0) { | ||
686 | ddp_log_error("%s, failed to get iscsi param err=%d.\n", | ||
687 | tdev->name, err); | ||
688 | return; | ||
689 | } | ||
690 | |||
691 | ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT; | ||
692 | bits = __ilog2_u32(ppmax) + 1; | ||
693 | if (bits > PPOD_IDX_MAX_SIZE) | ||
694 | bits = PPOD_IDX_MAX_SIZE; | ||
695 | ppmax = (1 << (bits - 1)) - 1; | ||
696 | |||
697 | ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) + | ||
698 | ppmax * | ||
699 | (sizeof(struct cxgb3i_gather_list *) + | ||
700 | sizeof(struct sk_buff *)), | ||
701 | GFP_KERNEL); | ||
702 | if (!ddp) { | ||
703 | ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n", | ||
704 | tdev->name, ppmax); | ||
705 | return; | ||
706 | } | ||
707 | ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1); | ||
708 | ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) + | ||
709 | ppmax * | ||
710 | sizeof(struct cxgb3i_gather_list *)); | ||
711 | spin_lock_init(&ddp->map_lock); | ||
712 | kref_init(&ddp->refcnt); | ||
713 | |||
714 | ddp->tdev = tdev; | ||
715 | ddp->pdev = uinfo.pdev; | ||
716 | ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE); | ||
717 | ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE); | ||
718 | ddp->llimit = uinfo.llimit; | ||
719 | ddp->ulimit = uinfo.ulimit; | ||
720 | ddp->nppods = ppmax; | ||
721 | ddp->idx_last = ppmax; | ||
722 | ddp->idx_bits = bits; | ||
723 | ddp->idx_mask = (1 << bits) - 1; | ||
724 | ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; | ||
725 | |||
726 | uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; | ||
727 | for (i = 0; i < DDP_PGIDX_MAX; i++) | ||
728 | uinfo.pgsz_factor[i] = ddp_page_order[i]; | ||
729 | uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT); | ||
730 | |||
731 | err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); | ||
732 | if (err < 0) { | ||
733 | ddp_log_warn("%s unable to set iscsi param err=%d, " | ||
734 | "ddp disabled.\n", tdev->name, err); | ||
735 | goto free_ddp_map; | ||
736 | } | ||
737 | |||
738 | tdev->ulp_iscsi = ddp; | ||
739 | |||
740 | ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u," | ||
741 | " %u/%u.\n", | ||
742 | tdev, ppmax, ddp->idx_bits, ddp->idx_mask, | ||
743 | ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz, | ||
744 | ddp->max_rxsz, uinfo.max_rxsz); | ||
745 | return; | ||
746 | |||
747 | free_ddp_map: | ||
748 | cxgb3i_free_big_mem(ddp); | ||
749 | } | ||
750 | |||
751 | /** | ||
752 | * cxgb3i_ddp_init - initialize ddp functions | ||
753 | */ | ||
754 | void cxgb3i_ddp_init(struct t3cdev *tdev) | ||
755 | { | ||
756 | if (page_idx == DDP_PGIDX_MAX) { | ||
757 | page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); | ||
758 | |||
759 | if (page_idx == DDP_PGIDX_MAX) { | ||
760 | ddp_log_info("system PAGE_SIZE %lu, update hw.\n", | ||
761 | PAGE_SIZE); | ||
762 | if (cxgb3i_ddp_adjust_page_table() < 0) { | ||
763 | ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n", | ||
764 | PAGE_SIZE); | ||
765 | return; | ||
766 | } | ||
767 | page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); | ||
768 | } | ||
769 | ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n", | ||
770 | PAGE_SIZE, page_idx); | ||
771 | } | ||
772 | ddp_init(tdev); | ||
773 | } | ||