aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/scsi_lib.c140
-rw-r--r--drivers/scsi/scsi_tgt_lib.c3
-rw-r--r--include/linux/scatterlist.h126
-rw-r--r--include/scsi/scsi_cmnd.h7
-rw-r--r--lib/Makefile2
-rw-r--r--lib/scatterlist.c294
6 files changed, 347 insertions, 225 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4cf902efbdbf..eb4911a61641 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -739,138 +739,43 @@ static inline unsigned int scsi_sgtable_index(unsigned short nents)
739 return index; 739 return index;
740} 740}
741 741
742struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 742static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
743{ 743{
744 struct scsi_host_sg_pool *sgp; 744 struct scsi_host_sg_pool *sgp;
745 struct scatterlist *sgl, *prev, *ret;
746 unsigned int index;
747 int this, left;
748
749 BUG_ON(!cmd->use_sg);
750
751 left = cmd->use_sg;
752 ret = prev = NULL;
753 do {
754 this = left;
755 if (this > SCSI_MAX_SG_SEGMENTS) {
756 this = SCSI_MAX_SG_SEGMENTS - 1;
757 index = SG_MEMPOOL_NR - 1;
758 } else
759 index = scsi_sgtable_index(this);
760 745
761 left -= this; 746 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
762 747 mempool_free(sgl, sgp->pool);
763 sgp = scsi_sg_pools + index; 748}
764 749
765 sgl = mempool_alloc(sgp->pool, gfp_mask); 750static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
766 if (unlikely(!sgl)) 751{
767 goto enomem; 752 struct scsi_host_sg_pool *sgp;
768 753
769 sg_init_table(sgl, sgp->size); 754 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
755 return mempool_alloc(sgp->pool, gfp_mask);
756}
770 757
771 /* 758int scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
772 * first loop through, set initial index and return value 759{
773 */ 760 int ret;
774 if (!ret)
775 ret = sgl;
776 761
777 /* 762 BUG_ON(!cmd->use_sg);
778 * chain previous sglist, if any. we know the previous
779 * sglist must be the biggest one, or we would not have
780 * ended up doing another loop.
781 */
782 if (prev)
783 sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
784 763
785 /* 764 ret = __sg_alloc_table(&cmd->sg_table, cmd->use_sg,
786 * if we have nothing left, mark the last segment as 765 SCSI_MAX_SG_SEGMENTS, gfp_mask, scsi_sg_alloc);
787 * end-of-list 766 if (unlikely(ret))
788 */ 767 __sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS,
789 if (!left) 768 scsi_sg_free);
790 sg_mark_end(&sgl[this - 1]);
791 769
792 /* 770 cmd->request_buffer = cmd->sg_table.sgl;
793 * don't allow subsequent mempool allocs to sleep, it would
794 * violate the mempool principle.
795 */
796 gfp_mask &= ~__GFP_WAIT;
797 gfp_mask |= __GFP_HIGH;
798 prev = sgl;
799 } while (left);
800
801 /*
802 * ->use_sg may get modified after dma mapping has potentially
803 * shrunk the number of segments, so keep a copy of it for free.
804 */
805 cmd->__use_sg = cmd->use_sg;
806 return ret; 771 return ret;
807enomem:
808 if (ret) {
809 /*
810 * Free entries chained off ret. Since we were trying to
811 * allocate another sglist, we know that all entries are of
812 * the max size.
813 */
814 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
815 prev = ret;
816 ret = &ret[SCSI_MAX_SG_SEGMENTS - 1];
817
818 while ((sgl = sg_chain_ptr(ret)) != NULL) {
819 ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1];
820 mempool_free(sgl, sgp->pool);
821 }
822
823 mempool_free(prev, sgp->pool);
824 }
825 return NULL;
826} 772}
827 773
828EXPORT_SYMBOL(scsi_alloc_sgtable); 774EXPORT_SYMBOL(scsi_alloc_sgtable);
829 775
830void scsi_free_sgtable(struct scsi_cmnd *cmd) 776void scsi_free_sgtable(struct scsi_cmnd *cmd)
831{ 777{
832 struct scatterlist *sgl = cmd->request_buffer; 778 __sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
833 struct scsi_host_sg_pool *sgp;
834
835 /*
836 * if this is the biggest size sglist, check if we have
837 * chained parts we need to free
838 */
839 if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) {
840 unsigned short this, left;
841 struct scatterlist *next;
842 unsigned int index;
843
844 left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1);
845 next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]);
846 while (left && next) {
847 sgl = next;
848 this = left;
849 if (this > SCSI_MAX_SG_SEGMENTS) {
850 this = SCSI_MAX_SG_SEGMENTS - 1;
851 index = SG_MEMPOOL_NR - 1;
852 } else
853 index = scsi_sgtable_index(this);
854
855 left -= this;
856
857 sgp = scsi_sg_pools + index;
858
859 if (left)
860 next = sg_chain_ptr(&sgl[sgp->size - 1]);
861
862 mempool_free(sgl, sgp->pool);
863 }
864
865 /*
866 * Restore original, will be freed below
867 */
868 sgl = cmd->request_buffer;
869 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
870 } else
871 sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg);
872
873 mempool_free(sgl, sgp->pool);
874} 779}
875 780
876EXPORT_SYMBOL(scsi_free_sgtable); 781EXPORT_SYMBOL(scsi_free_sgtable);
@@ -1120,8 +1025,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1120 /* 1025 /*
1121 * If sg table allocation fails, requeue request later. 1026 * If sg table allocation fails, requeue request later.
1122 */ 1027 */
1123 cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1028 if (unlikely(scsi_alloc_sgtable(cmd, GFP_ATOMIC))) {
1124 if (unlikely(!cmd->request_buffer)) {
1125 scsi_unprep_request(req); 1029 scsi_unprep_request(req);
1126 return BLKPREP_DEFER; 1030 return BLKPREP_DEFER;
1127 } 1031 }
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 93ece8f4e5de..01e03f3f6ffa 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -359,8 +359,7 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
359 int count; 359 int count;
360 360
361 cmd->use_sg = rq->nr_phys_segments; 361 cmd->use_sg = rq->nr_phys_segments;
362 cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask); 362 if (scsi_alloc_sgtable(cmd, gfp_mask))
363 if (!cmd->request_buffer)
364 return -ENOMEM; 363 return -ENOMEM;
365 364
366 cmd->request_bufflen = rq->data_len; 365 cmd->request_bufflen = rq->data_len;
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index e3ff21dbac53..a3d567a974e8 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -7,6 +7,12 @@
7#include <linux/string.h> 7#include <linux/string.h>
8#include <asm/io.h> 8#include <asm/io.h>
9 9
10struct sg_table {
11 struct scatterlist *sgl; /* the list */
12 unsigned int nents; /* number of mapped entries */
13 unsigned int orig_nents; /* original size of list */
14};
15
10/* 16/*
11 * Notes on SG table design. 17 * Notes on SG table design.
12 * 18 *
@@ -106,31 +112,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
106 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); 112 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
107} 113}
108 114
109/**
110 * sg_next - return the next scatterlist entry in a list
111 * @sg: The current sg entry
112 *
113 * Description:
114 * Usually the next entry will be @sg@ + 1, but if this sg element is part
115 * of a chained scatterlist, it could jump to the start of a new
116 * scatterlist array.
117 *
118 **/
119static inline struct scatterlist *sg_next(struct scatterlist *sg)
120{
121#ifdef CONFIG_DEBUG_SG
122 BUG_ON(sg->sg_magic != SG_MAGIC);
123#endif
124 if (sg_is_last(sg))
125 return NULL;
126
127 sg++;
128 if (unlikely(sg_is_chain(sg)))
129 sg = sg_chain_ptr(sg);
130
131 return sg;
132}
133
134/* 115/*
135 * Loop over each sg element, following the pointer to a new list if necessary 116 * Loop over each sg element, following the pointer to a new list if necessary
136 */ 117 */
@@ -138,40 +119,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
138 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) 119 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
139 120
140/** 121/**
141 * sg_last - return the last scatterlist entry in a list
142 * @sgl: First entry in the scatterlist
143 * @nents: Number of entries in the scatterlist
144 *
145 * Description:
146 * Should only be used casually, it (currently) scan the entire list
147 * to get the last entry.
148 *
149 * Note that the @sgl@ pointer passed in need not be the first one,
150 * the important bit is that @nents@ denotes the number of entries that
151 * exist from @sgl@.
152 *
153 **/
154static inline struct scatterlist *sg_last(struct scatterlist *sgl,
155 unsigned int nents)
156{
157#ifndef ARCH_HAS_SG_CHAIN
158 struct scatterlist *ret = &sgl[nents - 1];
159#else
160 struct scatterlist *sg, *ret = NULL;
161 unsigned int i;
162
163 for_each_sg(sgl, sg, nents, i)
164 ret = sg;
165
166#endif
167#ifdef CONFIG_DEBUG_SG
168 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
169 BUG_ON(!sg_is_last(ret));
170#endif
171 return ret;
172}
173
174/**
175 * sg_chain - Chain two sglists together 122 * sg_chain - Chain two sglists together
176 * @prv: First scatterlist 123 * @prv: First scatterlist
177 * @prv_nents: Number of entries in prv 124 * @prv_nents: Number of entries in prv
@@ -223,47 +170,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
223} 170}
224 171
225/** 172/**
226 * sg_init_table - Initialize SG table
227 * @sgl: The SG table
228 * @nents: Number of entries in table
229 *
230 * Notes:
231 * If this is part of a chained sg table, sg_mark_end() should be
232 * used only on the last table part.
233 *
234 **/
235static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
236{
237 memset(sgl, 0, sizeof(*sgl) * nents);
238#ifdef CONFIG_DEBUG_SG
239 {
240 unsigned int i;
241 for (i = 0; i < nents; i++)
242 sgl[i].sg_magic = SG_MAGIC;
243 }
244#endif
245 sg_mark_end(&sgl[nents - 1]);
246}
247
248/**
249 * sg_init_one - Initialize a single entry sg list
250 * @sg: SG entry
251 * @buf: Virtual address for IO
252 * @buflen: IO length
253 *
254 * Notes:
255 * This should not be used on a single entry that is part of a larger
256 * table. Use sg_init_table() for that.
257 *
258 **/
259static inline void sg_init_one(struct scatterlist *sg, const void *buf,
260 unsigned int buflen)
261{
262 sg_init_table(sg, 1);
263 sg_set_buf(sg, buf, buflen);
264}
265
266/**
267 * sg_phys - Return physical address of an sg entry 173 * sg_phys - Return physical address of an sg entry
268 * @sg: SG entry 174 * @sg: SG entry
269 * 175 *
@@ -293,4 +199,24 @@ static inline void *sg_virt(struct scatterlist *sg)
293 return page_address(sg_page(sg)) + sg->offset; 199 return page_address(sg_page(sg)) + sg->offset;
294} 200}
295 201
202struct scatterlist *sg_next(struct scatterlist *);
203struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
204void sg_init_table(struct scatterlist *, unsigned int);
205void sg_init_one(struct scatterlist *, const void *, unsigned int);
206
207typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
208typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
209
210void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
211void sg_free_table(struct sg_table *);
212int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
213 sg_alloc_fn *);
214int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
215
216/*
217 * Maximum number of entries that will be allocated in one piece, if
218 * a list larger than this is required then chaining will be utilized.
219 */
220#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
221
296#endif /* _LINUX_SCATTERLIST_H */ 222#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index abd7479ff452..a457fca66f61 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -8,7 +8,6 @@
8#include <linux/scatterlist.h> 8#include <linux/scatterlist.h>
9 9
10struct request; 10struct request;
11struct scatterlist;
12struct Scsi_Host; 11struct Scsi_Host;
13struct scsi_device; 12struct scsi_device;
14 13
@@ -68,8 +67,8 @@ struct scsi_cmnd {
68 void *request_buffer; /* Actual requested buffer */ 67 void *request_buffer; /* Actual requested buffer */
69 68
70 /* These elements define the operation we ultimately want to perform */ 69 /* These elements define the operation we ultimately want to perform */
70 struct sg_table sg_table;
71 unsigned short use_sg; /* Number of pieces of scatter-gather */ 71 unsigned short use_sg; /* Number of pieces of scatter-gather */
72 unsigned short __use_sg;
73 72
74 unsigned underflow; /* Return error if less than 73 unsigned underflow; /* Return error if less than
75 this amount is transferred */ 74 this amount is transferred */
@@ -128,14 +127,14 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
128 size_t *offset, size_t *len); 127 size_t *offset, size_t *len);
129extern void scsi_kunmap_atomic_sg(void *virt); 128extern void scsi_kunmap_atomic_sg(void *virt);
130 129
131extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t); 130extern int scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
132extern void scsi_free_sgtable(struct scsi_cmnd *); 131extern void scsi_free_sgtable(struct scsi_cmnd *);
133 132
134extern int scsi_dma_map(struct scsi_cmnd *cmd); 133extern int scsi_dma_map(struct scsi_cmnd *cmd);
135extern void scsi_dma_unmap(struct scsi_cmnd *cmd); 134extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
136 135
137#define scsi_sg_count(cmd) ((cmd)->use_sg) 136#define scsi_sg_count(cmd) ((cmd)->use_sg)
138#define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer) 137#define scsi_sglist(cmd) ((cmd)->sg_table.sgl)
139#define scsi_bufflen(cmd) ((cmd)->request_bufflen) 138#define scsi_bufflen(cmd) ((cmd)->request_bufflen)
140 139
141static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid) 140static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
diff --git a/lib/Makefile b/lib/Makefile
index b6793ed28d84..89841dc9d91c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 rbtree.o radix-tree.o dump_stack.o \ 6 rbtree.o radix-tree.o dump_stack.o \
7 idr.o int_sqrt.o extable.o prio_tree.o \ 7 idr.o int_sqrt.o extable.o prio_tree.o \
8 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 8 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
9 proportions.o prio_heap.o 9 proportions.o prio_heap.o scatterlist.o
10 10
11lib-$(CONFIG_MMU) += ioremap.o 11lib-$(CONFIG_MMU) += ioremap.o
12lib-$(CONFIG_SMP) += cpumask.o 12lib-$(CONFIG_SMP) += cpumask.o
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
new file mode 100644
index 000000000000..acca4901046c
--- /dev/null
+++ b/lib/scatterlist.c
@@ -0,0 +1,294 @@
1/*
2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3 *
4 * Scatterlist handling helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9#include <linux/module.h>
10#include <linux/scatterlist.h>
11
12/**
13 * sg_next - return the next scatterlist entry in a list
14 * @sg: The current sg entry
15 *
16 * Description:
17 * Usually the next entry will be @sg@ + 1, but if this sg element is part
18 * of a chained scatterlist, it could jump to the start of a new
19 * scatterlist array.
20 *
21 **/
22struct scatterlist *sg_next(struct scatterlist *sg)
23{
24#ifdef CONFIG_DEBUG_SG
25 BUG_ON(sg->sg_magic != SG_MAGIC);
26#endif
27 if (sg_is_last(sg))
28 return NULL;
29
30 sg++;
31 if (unlikely(sg_is_chain(sg)))
32 sg = sg_chain_ptr(sg);
33
34 return sg;
35}
36EXPORT_SYMBOL(sg_next);
37
38/**
39 * sg_last - return the last scatterlist entry in a list
40 * @sgl: First entry in the scatterlist
41 * @nents: Number of entries in the scatterlist
42 *
43 * Description:
44 * Should only be used casually, it (currently) scans the entire list
45 * to get the last entry.
46 *
47 * Note that the @sgl@ pointer passed in need not be the first one,
48 * the important bit is that @nents@ denotes the number of entries that
49 * exist from @sgl@.
50 *
51 **/
52struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
53{
54#ifndef ARCH_HAS_SG_CHAIN
55 struct scatterlist *ret = &sgl[nents - 1];
56#else
57 struct scatterlist *sg, *ret = NULL;
58 unsigned int i;
59
60 for_each_sg(sgl, sg, nents, i)
61 ret = sg;
62
63#endif
64#ifdef CONFIG_DEBUG_SG
65 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
66 BUG_ON(!sg_is_last(ret));
67#endif
68 return ret;
69}
70EXPORT_SYMBOL(sg_last);
71
72/**
73 * sg_init_table - Initialize SG table
74 * @sgl: The SG table
75 * @nents: Number of entries in table
76 *
77 * Notes:
78 * If this is part of a chained sg table, sg_mark_end() should be
79 * used only on the last table part.
80 *
81 **/
82void sg_init_table(struct scatterlist *sgl, unsigned int nents)
83{
84 memset(sgl, 0, sizeof(*sgl) * nents);
85#ifdef CONFIG_DEBUG_SG
86 {
87 unsigned int i;
88 for (i = 0; i < nents; i++)
89 sgl[i].sg_magic = SG_MAGIC;
90 }
91#endif
92 sg_mark_end(&sgl[nents - 1]);
93}
94EXPORT_SYMBOL(sg_init_table);
95
96/**
97 * sg_init_one - Initialize a single entry sg list
98 * @sg: SG entry
99 * @buf: Virtual address for IO
100 * @buflen: IO length
101 *
102 **/
103void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
104{
105 sg_init_table(sg, 1);
106 sg_set_buf(sg, buf, buflen);
107}
108EXPORT_SYMBOL(sg_init_one);
109
110/*
111 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
112 * helpers.
113 */
114static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
115{
116 if (nents == SG_MAX_SINGLE_ALLOC)
117 return (struct scatterlist *) __get_free_page(gfp_mask);
118 else
119 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
120}
121
122static void sg_kfree(struct scatterlist *sg, unsigned int nents)
123{
124 if (nents == SG_MAX_SINGLE_ALLOC)
125 free_page((unsigned long) sg);
126 else
127 kfree(sg);
128}
129
130/**
131 * __sg_free_table - Free a previously mapped sg table
132 * @table: The sg table header to use
133 * @max_ents: The maximum number of entries per single scatterlist
134 * @free_fn: Free function
135 *
136 * Description:
137 * Free an sg table previously allocated and setup with
138 * __sg_alloc_table(). The @max_ents value must be identical to
139 * that previously used with __sg_alloc_table().
140 *
141 **/
142void __sg_free_table(struct sg_table *table, unsigned int max_ents,
143 sg_free_fn *free_fn)
144{
145 struct scatterlist *sgl, *next;
146
147 if (unlikely(!table->sgl))
148 return;
149
150 sgl = table->sgl;
151 while (table->orig_nents) {
152 unsigned int alloc_size = table->orig_nents;
153 unsigned int sg_size;
154
155 /*
156 * If we have more than max_ents segments left,
157 * then assign 'next' to the sg table after the current one.
158 * sg_size is then one less than alloc size, since the last
159 * element is the chain pointer.
160 */
161 if (alloc_size > max_ents) {
162 next = sg_chain_ptr(&sgl[max_ents - 1]);
163 alloc_size = max_ents;
164 sg_size = alloc_size - 1;
165 } else {
166 sg_size = alloc_size;
167 next = NULL;
168 }
169
170 table->orig_nents -= sg_size;
171 free_fn(sgl, alloc_size);
172 sgl = next;
173 }
174
175 table->sgl = NULL;
176}
177EXPORT_SYMBOL(__sg_free_table);
178
179/**
180 * sg_free_table - Free a previously allocated sg table
181 * @table: The mapped sg table header
182 *
183 **/
184void sg_free_table(struct sg_table *table)
185{
186 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
187}
188EXPORT_SYMBOL(sg_free_table);
189
190/**
191 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
192 * @table: The sg table header to use
193 * @nents: Number of entries in sg list
194 * @max_ents: The maximum number of entries the allocator returns per call
195 * @gfp_mask: GFP allocation mask
196 * @alloc_fn: Allocator to use
197 *
198 * Description:
199 * This function returns a @table @nents long. The allocator is
200 * defined to return scatterlist chunks of maximum size @max_ents.
201 * Thus if @nents is bigger than @max_ents, the scatterlists will be
202 * chained in units of @max_ents.
203 *
204 * Notes:
205 * If this function returns non-0 (eg failure), the caller must call
206 * __sg_free_table() to cleanup any leftover allocations.
207 *
208 **/
209int __sg_alloc_table(struct sg_table *table, unsigned int nents,
210 unsigned int max_ents, gfp_t gfp_mask,
211 sg_alloc_fn *alloc_fn)
212{
213 struct scatterlist *sg, *prv;
214 unsigned int left;
215
216#ifndef ARCH_HAS_SG_CHAIN
217 BUG_ON(nents > max_ents);
218#endif
219
220 memset(table, 0, sizeof(*table));
221
222 left = nents;
223 prv = NULL;
224 do {
225 unsigned int sg_size, alloc_size = left;
226
227 if (alloc_size > max_ents) {
228 alloc_size = max_ents;
229 sg_size = alloc_size - 1;
230 } else
231 sg_size = alloc_size;
232
233 left -= sg_size;
234
235 sg = alloc_fn(alloc_size, gfp_mask);
236 if (unlikely(!sg))
237 return -ENOMEM;
238
239 sg_init_table(sg, alloc_size);
240 table->nents = table->orig_nents += sg_size;
241
242 /*
243 * If this is the first mapping, assign the sg table header.
244 * If this is not the first mapping, chain previous part.
245 */
246 if (prv)
247 sg_chain(prv, max_ents, sg);
248 else
249 table->sgl = sg;
250
251 /*
252 * If no more entries after this one, mark the end
253 */
254 if (!left)
255 sg_mark_end(&sg[sg_size - 1]);
256
257 /*
258 * only really needed for mempool backed sg allocations (like
259 * SCSI), a possible improvement here would be to pass the
260 * table pointer into the allocator and let that clear these
261 * flags
262 */
263 gfp_mask &= ~__GFP_WAIT;
264 gfp_mask |= __GFP_HIGH;
265 prv = sg;
266 } while (left);
267
268 return 0;
269}
270EXPORT_SYMBOL(__sg_alloc_table);
271
272/**
273 * sg_alloc_table - Allocate and initialize an sg table
274 * @table: The sg table header to use
275 * @nents: Number of entries in sg list
276 * @gfp_mask: GFP allocation mask
277 *
278 * Description:
279 * Allocate and initialize an sg table. If @nents@ is larger than
280 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
281 *
282 **/
283int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
284{
285 int ret;
286
287 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
288 gfp_mask, sg_kmalloc);
289 if (unlikely(ret))
290 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
291
292 return ret;
293}
294EXPORT_SYMBOL(sg_alloc_table);