aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-03-17 11:15:03 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-04-07 13:15:45 -0400
commitb1adaf65ba0398c9a1adc8f3a274533165a4df61 (patch)
tree70c6c5944eb978c6eff55eab1e1b1df7049477ec
parent78b4b05db57b04b3ed17dc71259bf1402c04abfa (diff)
[SCSI] block: add sg buffer copy helper functions
This patch adds new three helper functions to copy data between an SG list and a linear buffer. - sg_copy_from_buffer copies data from linear buffer to an SG list - sg_copy_to_buffer copies data from an SG list to a linear buffer When the APIs copy data from a linear buffer to an SG list, flush_kernel_dcache_page is called. It's not necessary for everyone but it's a no-op on most architectures and in general the API is not used in performance critical path. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r--include/linux/scatterlist.h5
-rw-r--r--lib/scatterlist.c102
2 files changed, 107 insertions, 0 deletions
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index a3d567a974e8..71fc81360048 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -213,6 +213,11 @@ int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
213 sg_alloc_fn *); 213 sg_alloc_fn *);
214int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); 214int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
215 215
216size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
217 void *buf, size_t buflen);
218size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
219 void *buf, size_t buflen);
220
216/* 221/*
217 * Maximum number of entries that will be allocated in one piece, if 222 * Maximum number of entries that will be allocated in one piece, if
218 * a list larger than this is required then chaining will be utilized. 223 * a list larger than this is required then chaining will be utilized.
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index acca4901046c..b80c21100d78 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/scatterlist.h> 10#include <linux/scatterlist.h>
11#include <linux/highmem.h>
11 12
12/** 13/**
13 * sg_next - return the next scatterlist entry in a list 14 * sg_next - return the next scatterlist entry in a list
@@ -292,3 +293,104 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
292 return ret; 293 return ret;
293} 294}
294EXPORT_SYMBOL(sg_alloc_table); 295EXPORT_SYMBOL(sg_alloc_table);
296
297/**
298 * sg_copy_buffer - Copy data between a linear buffer and an SG list
299 * @sgl: The SG list
300 * @nents: Number of SG entries
301 * @buf: Where to copy from
302 * @buflen: The number of bytes to copy
303 * @to_buffer: transfer direction (non zero == from an sg list to a
304 * buffer, 0 == from a buffer to an sg list
305 *
306 * Returns the number of copied bytes.
307 *
308 **/
309static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
310 void *buf, size_t buflen, int to_buffer)
311{
312 struct scatterlist *sg;
313 size_t buf_off = 0;
314 int i;
315
316 WARN_ON(!irqs_disabled());
317
318 for_each_sg(sgl, sg, nents, i) {
319 struct page *page;
320 int n = 0;
321 unsigned int sg_off = sg->offset;
322 unsigned int sg_copy = sg->length;
323
324 if (sg_copy > buflen)
325 sg_copy = buflen;
326 buflen -= sg_copy;
327
328 while (sg_copy > 0) {
329 unsigned int page_copy;
330 void *p;
331
332 page_copy = PAGE_SIZE - sg_off;
333 if (page_copy > sg_copy)
334 page_copy = sg_copy;
335
336 page = nth_page(sg_page(sg), n);
337 p = kmap_atomic(page, KM_BIO_SRC_IRQ);
338
339 if (to_buffer)
340 memcpy(buf + buf_off, p + sg_off, page_copy);
341 else {
342 memcpy(p + sg_off, buf + buf_off, page_copy);
343 flush_kernel_dcache_page(page);
344 }
345
346 kunmap_atomic(p, KM_BIO_SRC_IRQ);
347
348 buf_off += page_copy;
349 sg_off += page_copy;
350 if (sg_off == PAGE_SIZE) {
351 sg_off = 0;
352 n++;
353 }
354 sg_copy -= page_copy;
355 }
356
357 if (!buflen)
358 break;
359 }
360
361 return buf_off;
362}
363
364/**
365 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
366 * @sgl: The SG list
367 * @nents: Number of SG entries
368 * @buf: Where to copy from
369 * @buflen: The number of bytes to copy
370 *
371 * Returns the number of copied bytes.
372 *
373 **/
374size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
375 void *buf, size_t buflen)
376{
377 return sg_copy_buffer(sgl, nents, buf, buflen, 0);
378}
379EXPORT_SYMBOL(sg_copy_from_buffer);
380
381/**
382 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
383 * @sgl: The SG list
384 * @nents: Number of SG entries
385 * @buf: Where to copy to
386 * @buflen: The number of bytes to copy
387 *
388 * Returns the number of copied bytes.
389 *
390 **/
391size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
392 void *buf, size_t buflen)
393{
394 return sg_copy_buffer(sgl, nents, buf, buflen, 1);
395}
396EXPORT_SYMBOL(sg_copy_to_buffer);