diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-07-24 08:41:13 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 05:20:59 -0400 |
commit | 53d412fce05e73dd0b25b0ebfa83c7ee94f16451 (patch) | |
tree | 3aca42d6408bfcded0d5e829a9446a1c04af9b21 | |
parent | 51cf22495ae2fe60ba28123e04059cff4ddd9461 (diff) |
infiniband: sg chaining support
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_dma.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 75 |
2 files changed, 47 insertions, 38 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c index f87f003e3ef8..22709a4f8fc8 100644 --- a/drivers/infiniband/hw/ipath/ipath_dma.c +++ b/drivers/infiniband/hw/ipath/ipath_dma.c | |||
@@ -30,6 +30,7 @@ | |||
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/scatterlist.h> | ||
33 | #include <rdma/ib_verbs.h> | 34 | #include <rdma/ib_verbs.h> |
34 | 35 | ||
35 | #include "ipath_verbs.h" | 36 | #include "ipath_verbs.h" |
@@ -96,17 +97,18 @@ static void ipath_dma_unmap_page(struct ib_device *dev, | |||
96 | BUG_ON(!valid_dma_direction(direction)); | 97 | BUG_ON(!valid_dma_direction(direction)); |
97 | } | 98 | } |
98 | 99 | ||
99 | static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, | 100 | static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl, |
100 | enum dma_data_direction direction) | 101 | int nents, enum dma_data_direction direction) |
101 | { | 102 | { |
103 | struct scatterlist *sg; | ||
102 | u64 addr; | 104 | u64 addr; |
103 | int i; | 105 | int i; |
104 | int ret = nents; | 106 | int ret = nents; |
105 | 107 | ||
106 | BUG_ON(!valid_dma_direction(direction)); | 108 | BUG_ON(!valid_dma_direction(direction)); |
107 | 109 | ||
108 | for (i = 0; i < nents; i++) { | 110 | for_each_sg(sgl, sg, nents, i) { |
109 | addr = (u64) page_address(sg[i].page); | 111 | addr = (u64) page_address(sg->page); |
110 | /* TODO: handle highmem pages */ | 112 | /* TODO: handle highmem pages */ |
111 | if (!addr) { | 113 | if (!addr) { |
112 | ret = 0; | 114 | ret = 0; |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index e05690e3592f..f3529b6f0a33 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -124,17 +124,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
124 | 124 | ||
125 | if (cmd_dir == ISER_DIR_OUT) { | 125 | if (cmd_dir == ISER_DIR_OUT) { |
126 | /* copy the unaligned sg the buffer which is used for RDMA */ | 126 | /* copy the unaligned sg the buffer which is used for RDMA */ |
127 | struct scatterlist *sg = (struct scatterlist *)data->buf; | 127 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
128 | struct scatterlist *sg; | ||
128 | int i; | 129 | int i; |
129 | char *p, *from; | 130 | char *p, *from; |
130 | 131 | ||
131 | for (p = mem, i = 0; i < data->size; i++) { | 132 | p = mem; |
132 | from = kmap_atomic(sg[i].page, KM_USER0); | 133 | for_each_sg(sgl, sg, data->size, i) { |
134 | from = kmap_atomic(sg->page, KM_USER0); | ||
133 | memcpy(p, | 135 | memcpy(p, |
134 | from + sg[i].offset, | 136 | from + sg->offset, |
135 | sg[i].length); | 137 | sg->length); |
136 | kunmap_atomic(from, KM_USER0); | 138 | kunmap_atomic(from, KM_USER0); |
137 | p += sg[i].length; | 139 | p += sg->length; |
138 | } | 140 | } |
139 | } | 141 | } |
140 | 142 | ||
@@ -176,7 +178,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
176 | 178 | ||
177 | if (cmd_dir == ISER_DIR_IN) { | 179 | if (cmd_dir == ISER_DIR_IN) { |
178 | char *mem; | 180 | char *mem; |
179 | struct scatterlist *sg; | 181 | struct scatterlist *sgl, *sg; |
180 | unsigned char *p, *to; | 182 | unsigned char *p, *to; |
181 | unsigned int sg_size; | 183 | unsigned int sg_size; |
182 | int i; | 184 | int i; |
@@ -184,16 +186,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
184 | /* copy back read RDMA to unaligned sg */ | 186 | /* copy back read RDMA to unaligned sg */ |
185 | mem = mem_copy->copy_buf; | 187 | mem = mem_copy->copy_buf; |
186 | 188 | ||
187 | sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; | 189 | sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; |
188 | sg_size = iser_ctask->data[ISER_DIR_IN].size; | 190 | sg_size = iser_ctask->data[ISER_DIR_IN].size; |
189 | 191 | ||
190 | for (p = mem, i = 0; i < sg_size; i++){ | 192 | p = mem; |
191 | to = kmap_atomic(sg[i].page, KM_SOFTIRQ0); | 193 | for_each_sg(sgl, sg, sg_size, i) { |
192 | memcpy(to + sg[i].offset, | 194 | to = kmap_atomic(sg->page, KM_SOFTIRQ0); |
195 | memcpy(to + sg->offset, | ||
193 | p, | 196 | p, |
194 | sg[i].length); | 197 | sg->length); |
195 | kunmap_atomic(to, KM_SOFTIRQ0); | 198 | kunmap_atomic(to, KM_SOFTIRQ0); |
196 | p += sg[i].length; | 199 | p += sg->length; |
197 | } | 200 | } |
198 | } | 201 | } |
199 | 202 | ||
@@ -224,7 +227,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
224 | struct iser_page_vec *page_vec, | 227 | struct iser_page_vec *page_vec, |
225 | struct ib_device *ibdev) | 228 | struct ib_device *ibdev) |
226 | { | 229 | { |
227 | struct scatterlist *sg = (struct scatterlist *)data->buf; | 230 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
231 | struct scatterlist *sg; | ||
228 | u64 first_addr, last_addr, page; | 232 | u64 first_addr, last_addr, page; |
229 | int end_aligned; | 233 | int end_aligned; |
230 | unsigned int cur_page = 0; | 234 | unsigned int cur_page = 0; |
@@ -232,24 +236,25 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
232 | int i; | 236 | int i; |
233 | 237 | ||
234 | /* compute the offset of first element */ | 238 | /* compute the offset of first element */ |
235 | page_vec->offset = (u64) sg[0].offset & ~MASK_4K; | 239 | page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; |
236 | 240 | ||
237 | for (i = 0; i < data->dma_nents; i++) { | 241 | for_each_sg(sgl, sg, data->dma_nents, i) { |
238 | unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); | 242 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); |
239 | 243 | ||
240 | total_sz += dma_len; | 244 | total_sz += dma_len; |
241 | 245 | ||
242 | first_addr = ib_sg_dma_address(ibdev, &sg[i]); | 246 | first_addr = ib_sg_dma_address(ibdev, sg); |
243 | last_addr = first_addr + dma_len; | 247 | last_addr = first_addr + dma_len; |
244 | 248 | ||
245 | end_aligned = !(last_addr & ~MASK_4K); | 249 | end_aligned = !(last_addr & ~MASK_4K); |
246 | 250 | ||
247 | /* continue to collect page fragments till aligned or SG ends */ | 251 | /* continue to collect page fragments till aligned or SG ends */ |
248 | while (!end_aligned && (i + 1 < data->dma_nents)) { | 252 | while (!end_aligned && (i + 1 < data->dma_nents)) { |
253 | sg = sg_next(sg); | ||
249 | i++; | 254 | i++; |
250 | dma_len = ib_sg_dma_len(ibdev, &sg[i]); | 255 | dma_len = ib_sg_dma_len(ibdev, sg); |
251 | total_sz += dma_len; | 256 | total_sz += dma_len; |
252 | last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; | 257 | last_addr = ib_sg_dma_address(ibdev, sg) + dma_len; |
253 | end_aligned = !(last_addr & ~MASK_4K); | 258 | end_aligned = !(last_addr & ~MASK_4K); |
254 | } | 259 | } |
255 | 260 | ||
@@ -284,25 +289,26 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
284 | static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, | 289 | static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, |
285 | struct ib_device *ibdev) | 290 | struct ib_device *ibdev) |
286 | { | 291 | { |
287 | struct scatterlist *sg; | 292 | struct scatterlist *sgl, *sg; |
288 | u64 end_addr, next_addr; | 293 | u64 end_addr, next_addr; |
289 | int i, cnt; | 294 | int i, cnt; |
290 | unsigned int ret_len = 0; | 295 | unsigned int ret_len = 0; |
291 | 296 | ||
292 | sg = (struct scatterlist *)data->buf; | 297 | sgl = (struct scatterlist *)data->buf; |
293 | 298 | ||
294 | for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) { | 299 | cnt = 0; |
300 | for_each_sg(sgl, sg, data->dma_nents, i) { | ||
295 | /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " | 301 | /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " |
296 | "offset: %ld sz: %ld\n", i, | 302 | "offset: %ld sz: %ld\n", i, |
297 | (unsigned long)page_to_phys(sg[i].page), | 303 | (unsigned long)page_to_phys(sg->page), |
298 | (unsigned long)sg[i].offset, | 304 | (unsigned long)sg->offset, |
299 | (unsigned long)sg[i].length); */ | 305 | (unsigned long)sg->length); */ |
300 | end_addr = ib_sg_dma_address(ibdev, &sg[i]) + | 306 | end_addr = ib_sg_dma_address(ibdev, sg) + |
301 | ib_sg_dma_len(ibdev, &sg[i]); | 307 | ib_sg_dma_len(ibdev, sg); |
302 | /* iser_dbg("Checking sg iobuf end address " | 308 | /* iser_dbg("Checking sg iobuf end address " |
303 | "0x%08lX\n", end_addr); */ | 309 | "0x%08lX\n", end_addr); */ |
304 | if (i + 1 < data->dma_nents) { | 310 | if (i + 1 < data->dma_nents) { |
305 | next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); | 311 | next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); |
306 | /* are i, i+1 fragments of the same page? */ | 312 | /* are i, i+1 fragments of the same page? */ |
307 | if (end_addr == next_addr) | 313 | if (end_addr == next_addr) |
308 | continue; | 314 | continue; |
@@ -322,15 +328,16 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, | |||
322 | static void iser_data_buf_dump(struct iser_data_buf *data, | 328 | static void iser_data_buf_dump(struct iser_data_buf *data, |
323 | struct ib_device *ibdev) | 329 | struct ib_device *ibdev) |
324 | { | 330 | { |
325 | struct scatterlist *sg = (struct scatterlist *)data->buf; | 331 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
332 | struct scatterlist *sg; | ||
326 | int i; | 333 | int i; |
327 | 334 | ||
328 | for (i = 0; i < data->dma_nents; i++) | 335 | for_each_sg(sgl, sg, data->dma_nents, i) |
329 | iser_err("sg[%d] dma_addr:0x%lX page:0x%p " | 336 | iser_err("sg[%d] dma_addr:0x%lX page:0x%p " |
330 | "off:0x%x sz:0x%x dma_len:0x%x\n", | 337 | "off:0x%x sz:0x%x dma_len:0x%x\n", |
331 | i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), | 338 | i, (unsigned long)ib_sg_dma_address(ibdev, sg), |
332 | sg[i].page, sg[i].offset, | 339 | sg->page, sg->offset, |
333 | sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); | 340 | sg->length, ib_sg_dma_len(ibdev, sg)); |
334 | } | 341 | } |
335 | 342 | ||
336 | static void iser_dump_page_vec(struct iser_page_vec *page_vec) | 343 | static void iser_dump_page_vec(struct iser_page_vec *page_vec) |