aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorKyoungil Kim <ki0351.kim@samsung.com>2012-10-22 07:01:00 -0400
committerChris Ball <cjb@laptop.org>2012-12-06 13:54:43 -0500
commit968a64ea638bbd48839b41981ff50197f3412676 (patch)
treea46f677245e219b03e24d4e7c0f0ce405e6b447d /drivers/mmc
parent8fee476b219d1869762d9ef5c189a0c85e919a4d (diff)
mmc: sdio: Use multiple scatter/gather list
Before this patch, we always used only single sg entry for SDIO transfer. This patch switches to using multiple sg entries. In the case of dwmci, it supports only up to 4KB size per single sg entry. So if we want to transfer more than 4KB, we should send more than 1 command. When we tested before applying this patch, it took around 335 us for 5K(5120) bytes transfer with dwmci controller. After applying this patch, it takes 242 us for 5K bytes. So this patch makes around 38% performance improvement for 5K bytes transfer. If the transfer size is bigger, then the performance improvement ratio will be increased. Signed-off-by: Kyoungil Kim <ki0351.kim@samsung.com> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/core/sdio_io.c10
-rw-r--r--drivers/mmc/core/sdio_ops.c32
2 files changed, 31 insertions, 11 deletions
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 8f6f5ac131fc..78cb4d5d9d58 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -188,8 +188,7 @@ EXPORT_SYMBOL_GPL(sdio_set_block_size);
188 */ 188 */
189static inline unsigned int sdio_max_byte_size(struct sdio_func *func) 189static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
190{ 190{
191 unsigned mval = min(func->card->host->max_seg_size, 191 unsigned mval = func->card->host->max_blk_size;
192 func->card->host->max_blk_size);
193 192
194 if (mmc_blksz_for_byte_mode(func->card)) 193 if (mmc_blksz_for_byte_mode(func->card))
195 mval = min(mval, func->cur_blksize); 194 mval = min(mval, func->cur_blksize);
@@ -311,11 +310,8 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
311 /* Do the bulk of the transfer using block mode (if supported). */ 310 /* Do the bulk of the transfer using block mode (if supported). */
312 if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) { 311 if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {
313 /* Blocks per command is limited by host count, host transfer 312 /* Blocks per command is limited by host count, host transfer
314 * size (we only use a single sg entry) and the maximum for 313 * size and the maximum for IO_RW_EXTENDED of 511 blocks. */
315 * IO_RW_EXTENDED of 511 blocks. */ 314 max_blocks = min(func->card->host->max_blk_count, 511u);
316 max_blocks = min(func->card->host->max_blk_count,
317 func->card->host->max_seg_size / func->cur_blksize);
318 max_blocks = min(max_blocks, 511u);
319 315
320 while (remainder >= func->cur_blksize) { 316 while (remainder >= func->cur_blksize) {
321 unsigned blocks; 317 unsigned blocks;
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index d29e20630eed..62508b457c4f 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -124,7 +124,10 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
124 struct mmc_request mrq = {NULL}; 124 struct mmc_request mrq = {NULL};
125 struct mmc_command cmd = {0}; 125 struct mmc_command cmd = {0};
126 struct mmc_data data = {0}; 126 struct mmc_data data = {0};
127 struct scatterlist sg; 127 struct scatterlist sg, *sg_ptr;
128 struct sg_table sgtable;
129 unsigned int nents, left_size, i;
130 unsigned int seg_size = card->host->max_seg_size;
128 131
129 BUG_ON(!card); 132 BUG_ON(!card);
130 BUG_ON(fn > 7); 133 BUG_ON(fn > 7);
@@ -152,15 +155,36 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
152 /* Code in host drivers/fwk assumes that "blocks" always is >=1 */ 155 /* Code in host drivers/fwk assumes that "blocks" always is >=1 */
153 data.blocks = blocks ? blocks : 1; 156 data.blocks = blocks ? blocks : 1;
154 data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 157 data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
155 data.sg = &sg;
156 data.sg_len = 1;
157 158
158 sg_init_one(&sg, buf, data.blksz * data.blocks); 159 left_size = data.blksz * data.blocks;
160 nents = (left_size - 1) / seg_size + 1;
161 if (nents > 1) {
162 if (sg_alloc_table(&sgtable, nents, GFP_KERNEL))
163 return -ENOMEM;
164
165 data.sg = sgtable.sgl;
166 data.sg_len = nents;
167
168 for_each_sg(data.sg, sg_ptr, data.sg_len, i) {
169 sg_set_page(sg_ptr, virt_to_page(buf + (i * seg_size)),
170 min(seg_size, left_size),
171 offset_in_page(buf + (i * seg_size)));
172 left_size = left_size - seg_size;
173 }
174 } else {
175 data.sg = &sg;
176 data.sg_len = 1;
177
178 sg_init_one(&sg, buf, left_size);
179 }
159 180
160 mmc_set_data_timeout(&data, card); 181 mmc_set_data_timeout(&data, card);
161 182
162 mmc_wait_for_req(card->host, &mrq); 183 mmc_wait_for_req(card->host, &mrq);
163 184
185 if (nents > 1)
186 sg_free_table(&sgtable);
187
164 if (cmd.error) 188 if (cmd.error)
165 return cmd.error; 189 return cmd.error;
166 if (data.error) 190 if (data.error)