aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/Kconfig21
-rw-r--r--drivers/mmc/card/Makefile4
-rw-r--r--drivers/mmc/card/block.c781
-rw-r--r--drivers/mmc/card/mmc_test.c811
-rw-r--r--drivers/mmc/card/queue.c37
-rw-r--r--drivers/mmc/card/queue.h3
-rw-r--r--drivers/mmc/card/sdio_uart.c4
7 files changed, 1395 insertions, 266 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3f2a912659af..3b1f783bf924 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -14,6 +14,24 @@ config MMC_BLOCK
14 mount the filesystem. Almost everyone wishing MMC support 14 mount the filesystem. Almost everyone wishing MMC support
15 should say Y or M here. 15 should say Y or M here.
16 16
17config MMC_BLOCK_MINORS
18 int "Number of minors per block device"
19 depends on MMC_BLOCK
20 range 4 256
21 default 8
22 help
23 Number of minors per block device. One is needed for every
24 partition on the disk (plus one for the whole disk).
25
26 Number of total MMC minors available is 256, so your number
27 of supported block devices will be limited to 256 divided
28 by this number.
29
30 Default is 8 to be backwards compatible with previous
31 hardwired device numbering.
32
33 If unsure, say 8 here.
34
17config MMC_BLOCK_BOUNCE 35config MMC_BLOCK_BOUNCE
18 bool "Use bounce buffer for simple hosts" 36 bool "Use bounce buffer for simple hosts"
19 depends on MMC_BLOCK 37 depends on MMC_BLOCK
@@ -40,12 +58,11 @@ config SDIO_UART
40 58
41config MMC_TEST 59config MMC_TEST
42 tristate "MMC host test driver" 60 tristate "MMC host test driver"
43 default n
44 help 61 help
45 Development driver that performs a series of reads and writes 62 Development driver that performs a series of reads and writes
46 to a memory card in order to expose certain well known bugs 63 to a memory card in order to expose certain well known bugs
47 in host controllers. The tests are executed by writing to the 64 in host controllers. The tests are executed by writing to the
48 "test" file in sysfs under each card. Note that whatever is 65 "test" file in debugfs under each card. Note that whatever is
49 on your card will be overwritten by these tests. 66 on your card will be overwritten by these tests.
50 67
51 This driver is only of interest to those developing or 68 This driver is only of interest to those developing or
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index 0d407514f67d..c73b406a06cd 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -2,10 +2,6 @@
2# Makefile for MMC/SD card drivers 2# Makefile for MMC/SD card drivers
3# 3#
4 4
5ifeq ($(CONFIG_MMC_DEBUG),y)
6 EXTRA_CFLAGS += -DDEBUG
7endif
8
9obj-$(CONFIG_MMC_BLOCK) += mmc_block.o 5obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
10mmc_block-objs := block.o queue.o 6mmc_block-objs := block.o queue.o
11obj-$(CONFIG_MMC_TEST) += mmc_test.o 7obj-$(CONFIG_MMC_TEST) += mmc_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index d545f79f6000..f85e42224559 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -29,10 +29,13 @@
29#include <linux/kdev_t.h> 29#include <linux/kdev_t.h>
30#include <linux/blkdev.h> 30#include <linux/blkdev.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/smp_lock.h>
33#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
34#include <linux/string_helpers.h> 33#include <linux/string_helpers.h>
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
35 37
38#include <linux/mmc/ioctl.h>
36#include <linux/mmc/card.h> 39#include <linux/mmc/card.h>
37#include <linux/mmc/host.h> 40#include <linux/mmc/host.h>
38#include <linux/mmc/mmc.h> 41#include <linux/mmc/mmc.h>
@@ -44,14 +47,35 @@
44#include "queue.h" 47#include "queue.h"
45 48
46MODULE_ALIAS("mmc:block"); 49MODULE_ALIAS("mmc:block");
50#ifdef MODULE_PARAM_PREFIX
51#undef MODULE_PARAM_PREFIX
52#endif
53#define MODULE_PARAM_PREFIX "mmcblk."
54
55#define INAND_CMD38_ARG_EXT_CSD 113
56#define INAND_CMD38_ARG_ERASE 0x00
57#define INAND_CMD38_ARG_TRIM 0x01
58#define INAND_CMD38_ARG_SECERASE 0x80
59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88
61
62static DEFINE_MUTEX(block_mutex);
63
64/*
65 * The defaults come from config options but can be overriden by module
66 * or bootarg options.
67 */
68static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
47 69
48/* 70/*
49 * max 8 partitions per card 71 * We've only got one major, so number of mmcblk devices is
72 * limited to 256 / number of minors per device.
50 */ 73 */
51#define MMC_SHIFT 3 74static int max_devices;
52#define MMC_NUM_MINORS (256 >> MMC_SHIFT)
53 75
54static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS); 76/* 256 minors, so at most 256 separate devices */
77static DECLARE_BITMAP(dev_use, 256);
78static DECLARE_BITMAP(name_use, 256);
55 79
56/* 80/*
57 * There is one mmc_blk_data per slot. 81 * There is one mmc_blk_data per slot.
@@ -60,13 +84,31 @@ struct mmc_blk_data {
60 spinlock_t lock; 84 spinlock_t lock;
61 struct gendisk *disk; 85 struct gendisk *disk;
62 struct mmc_queue queue; 86 struct mmc_queue queue;
87 struct list_head part;
88
89 unsigned int flags;
90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
63 92
64 unsigned int usage; 93 unsigned int usage;
65 unsigned int read_only; 94 unsigned int read_only;
95 unsigned int part_type;
96 unsigned int name_idx;
97
98 /*
99 * Only set in main mmc_blk_data associated
100 * with mmc_card with mmc_set_drvdata, and keeps
101 * track of the current selected device partition.
102 */
103 unsigned int part_curr;
104 struct device_attribute force_ro;
66}; 105};
67 106
68static DEFINE_MUTEX(open_lock); 107static DEFINE_MUTEX(open_lock);
69 108
109module_param(perdev_minors, int, 0444);
110MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
111
70static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 112static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
71{ 113{
72 struct mmc_blk_data *md; 114 struct mmc_blk_data *md;
@@ -82,17 +124,22 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
82 return md; 124 return md;
83} 125}
84 126
127static inline int mmc_get_devidx(struct gendisk *disk)
128{
129 int devmaj = MAJOR(disk_devt(disk));
130 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
131
132 if (!devmaj)
133 devidx = disk->first_minor / perdev_minors;
134 return devidx;
135}
136
85static void mmc_blk_put(struct mmc_blk_data *md) 137static void mmc_blk_put(struct mmc_blk_data *md)
86{ 138{
87 mutex_lock(&open_lock); 139 mutex_lock(&open_lock);
88 md->usage--; 140 md->usage--;
89 if (md->usage == 0) { 141 if (md->usage == 0) {
90 int devmaj = MAJOR(disk_devt(md->disk)); 142 int devidx = mmc_get_devidx(md->disk);
91 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
92
93 if (!devmaj)
94 devidx = md->disk->first_minor >> MMC_SHIFT;
95
96 blk_cleanup_queue(md->queue.queue); 143 blk_cleanup_queue(md->queue.queue);
97 144
98 __clear_bit(devidx, dev_use); 145 __clear_bit(devidx, dev_use);
@@ -103,12 +150,44 @@ static void mmc_blk_put(struct mmc_blk_data *md)
103 mutex_unlock(&open_lock); 150 mutex_unlock(&open_lock);
104} 151}
105 152
153static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
154 char *buf)
155{
156 int ret;
157 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
158
159 ret = snprintf(buf, PAGE_SIZE, "%d",
160 get_disk_ro(dev_to_disk(dev)) ^
161 md->read_only);
162 mmc_blk_put(md);
163 return ret;
164}
165
166static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
167 const char *buf, size_t count)
168{
169 int ret;
170 char *end;
171 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
172 unsigned long set = simple_strtoul(buf, &end, 0);
173 if (end == buf) {
174 ret = -EINVAL;
175 goto out;
176 }
177
178 set_disk_ro(dev_to_disk(dev), set || md->read_only);
179 ret = count;
180out:
181 mmc_blk_put(md);
182 return ret;
183}
184
106static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 185static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
107{ 186{
108 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 187 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
109 int ret = -ENXIO; 188 int ret = -ENXIO;
110 189
111 lock_kernel(); 190 mutex_lock(&block_mutex);
112 if (md) { 191 if (md) {
113 if (md->usage == 2) 192 if (md->usage == 2)
114 check_disk_change(bdev); 193 check_disk_change(bdev);
@@ -119,7 +198,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
119 ret = -EROFS; 198 ret = -EROFS;
120 } 199 }
121 } 200 }
122 unlock_kernel(); 201 mutex_unlock(&block_mutex);
123 202
124 return ret; 203 return ret;
125} 204}
@@ -128,9 +207,9 @@ static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
128{ 207{
129 struct mmc_blk_data *md = disk->private_data; 208 struct mmc_blk_data *md = disk->private_data;
130 209
131 lock_kernel(); 210 mutex_lock(&block_mutex);
132 mmc_blk_put(md); 211 mmc_blk_put(md);
133 unlock_kernel(); 212 mutex_unlock(&block_mutex);
134 return 0; 213 return 0;
135} 214}
136 215
@@ -143,35 +222,255 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
143 return 0; 222 return 0;
144} 223}
145 224
225struct mmc_blk_ioc_data {
226 struct mmc_ioc_cmd ic;
227 unsigned char *buf;
228 u64 buf_bytes;
229};
230
231static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
232 struct mmc_ioc_cmd __user *user)
233{
234 struct mmc_blk_ioc_data *idata;
235 int err;
236
237 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
238 if (!idata) {
239 err = -ENOMEM;
240 goto out;
241 }
242
243 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
244 err = -EFAULT;
245 goto idata_err;
246 }
247
248 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
249 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
250 err = -EOVERFLOW;
251 goto idata_err;
252 }
253
254 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
255 if (!idata->buf) {
256 err = -ENOMEM;
257 goto idata_err;
258 }
259
260 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
261 idata->ic.data_ptr, idata->buf_bytes)) {
262 err = -EFAULT;
263 goto copy_err;
264 }
265
266 return idata;
267
268copy_err:
269 kfree(idata->buf);
270idata_err:
271 kfree(idata);
272out:
273 return ERR_PTR(err);
274}
275
276static int mmc_blk_ioctl_cmd(struct block_device *bdev,
277 struct mmc_ioc_cmd __user *ic_ptr)
278{
279 struct mmc_blk_ioc_data *idata;
280 struct mmc_blk_data *md;
281 struct mmc_card *card;
282 struct mmc_command cmd = {0};
283 struct mmc_data data = {0};
284 struct mmc_request mrq = {0};
285 struct scatterlist sg;
286 int err;
287
288 /*
289 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
290 * whole block device, not on a partition. This prevents overspray
291 * between sibling partitions.
292 */
293 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
294 return -EPERM;
295
296 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
297 if (IS_ERR(idata))
298 return PTR_ERR(idata);
299
300 cmd.opcode = idata->ic.opcode;
301 cmd.arg = idata->ic.arg;
302 cmd.flags = idata->ic.flags;
303
304 data.sg = &sg;
305 data.sg_len = 1;
306 data.blksz = idata->ic.blksz;
307 data.blocks = idata->ic.blocks;
308
309 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
310
311 if (idata->ic.write_flag)
312 data.flags = MMC_DATA_WRITE;
313 else
314 data.flags = MMC_DATA_READ;
315
316 mrq.cmd = &cmd;
317 mrq.data = &data;
318
319 md = mmc_blk_get(bdev->bd_disk);
320 if (!md) {
321 err = -EINVAL;
322 goto cmd_done;
323 }
324
325 card = md->queue.card;
326 if (IS_ERR(card)) {
327 err = PTR_ERR(card);
328 goto cmd_done;
329 }
330
331 mmc_claim_host(card->host);
332
333 if (idata->ic.is_acmd) {
334 err = mmc_app_cmd(card->host, card);
335 if (err)
336 goto cmd_rel_host;
337 }
338
339 /* data.flags must already be set before doing this. */
340 mmc_set_data_timeout(&data, card);
341 /* Allow overriding the timeout_ns for empirical tuning. */
342 if (idata->ic.data_timeout_ns)
343 data.timeout_ns = idata->ic.data_timeout_ns;
344
345 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
346 /*
347 * Pretend this is a data transfer and rely on the host driver
348 * to compute timeout. When all host drivers support
349 * cmd.cmd_timeout for R1B, this can be changed to:
350 *
351 * mrq.data = NULL;
352 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
353 */
354 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
355 }
356
357 mmc_wait_for_req(card->host, &mrq);
358
359 if (cmd.error) {
360 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
361 __func__, cmd.error);
362 err = cmd.error;
363 goto cmd_rel_host;
364 }
365 if (data.error) {
366 dev_err(mmc_dev(card->host), "%s: data error %d\n",
367 __func__, data.error);
368 err = data.error;
369 goto cmd_rel_host;
370 }
371
372 /*
373 * According to the SD specs, some commands require a delay after
374 * issuing the command.
375 */
376 if (idata->ic.postsleep_min_us)
377 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
378
379 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
380 err = -EFAULT;
381 goto cmd_rel_host;
382 }
383
384 if (!idata->ic.write_flag) {
385 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
386 idata->buf, idata->buf_bytes)) {
387 err = -EFAULT;
388 goto cmd_rel_host;
389 }
390 }
391
392cmd_rel_host:
393 mmc_release_host(card->host);
394
395cmd_done:
396 mmc_blk_put(md);
397 kfree(idata->buf);
398 kfree(idata);
399 return err;
400}
401
402static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
403 unsigned int cmd, unsigned long arg)
404{
405 int ret = -EINVAL;
406 if (cmd == MMC_IOC_CMD)
407 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
408 return ret;
409}
410
411#ifdef CONFIG_COMPAT
412static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
413 unsigned int cmd, unsigned long arg)
414{
415 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
416}
417#endif
418
146static const struct block_device_operations mmc_bdops = { 419static const struct block_device_operations mmc_bdops = {
147 .open = mmc_blk_open, 420 .open = mmc_blk_open,
148 .release = mmc_blk_release, 421 .release = mmc_blk_release,
149 .getgeo = mmc_blk_getgeo, 422 .getgeo = mmc_blk_getgeo,
150 .owner = THIS_MODULE, 423 .owner = THIS_MODULE,
424 .ioctl = mmc_blk_ioctl,
425#ifdef CONFIG_COMPAT
426 .compat_ioctl = mmc_blk_compat_ioctl,
427#endif
151}; 428};
152 429
153struct mmc_blk_request { 430struct mmc_blk_request {
154 struct mmc_request mrq; 431 struct mmc_request mrq;
432 struct mmc_command sbc;
155 struct mmc_command cmd; 433 struct mmc_command cmd;
156 struct mmc_command stop; 434 struct mmc_command stop;
157 struct mmc_data data; 435 struct mmc_data data;
158}; 436};
159 437
438static inline int mmc_blk_part_switch(struct mmc_card *card,
439 struct mmc_blk_data *md)
440{
441 int ret;
442 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
443 if (main_md->part_curr == md->part_type)
444 return 0;
445
446 if (mmc_card_mmc(card)) {
447 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
448 card->ext_csd.part_config |= md->part_type;
449
450 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
451 EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
452 card->ext_csd.part_time);
453 if (ret)
454 return ret;
455}
456
457 main_md->part_curr = md->part_type;
458 return 0;
459}
460
160static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) 461static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
161{ 462{
162 int err; 463 int err;
163 u32 result; 464 u32 result;
164 __be32 *blocks; 465 __be32 *blocks;
165 466
166 struct mmc_request mrq; 467 struct mmc_request mrq = {0};
167 struct mmc_command cmd; 468 struct mmc_command cmd = {0};
168 struct mmc_data data; 469 struct mmc_data data = {0};
169 unsigned int timeout_us; 470 unsigned int timeout_us;
170 471
171 struct scatterlist sg; 472 struct scatterlist sg;
172 473
173 memset(&cmd, 0, sizeof(struct mmc_command));
174
175 cmd.opcode = MMC_APP_CMD; 474 cmd.opcode = MMC_APP_CMD;
176 cmd.arg = card->rca << 16; 475 cmd.arg = card->rca << 16;
177 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 476 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -188,8 +487,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
188 cmd.arg = 0; 487 cmd.arg = 0;
189 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 488 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
190 489
191 memset(&data, 0, sizeof(struct mmc_data));
192
193 data.timeout_ns = card->csd.tacc_ns * 100; 490 data.timeout_ns = card->csd.tacc_ns * 100;
194 data.timeout_clks = card->csd.tacc_clks * 100; 491 data.timeout_clks = card->csd.tacc_clks * 100;
195 492
@@ -208,8 +505,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
208 data.sg = &sg; 505 data.sg = &sg;
209 data.sg_len = 1; 506 data.sg_len = 1;
210 507
211 memset(&mrq, 0, sizeof(struct mmc_request));
212
213 mrq.cmd = &cmd; 508 mrq.cmd = &cmd;
214 mrq.data = &data; 509 mrq.data = &data;
215 510
@@ -232,17 +527,16 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
232 527
233static u32 get_card_status(struct mmc_card *card, struct request *req) 528static u32 get_card_status(struct mmc_card *card, struct request *req)
234{ 529{
235 struct mmc_command cmd; 530 struct mmc_command cmd = {0};
236 int err; 531 int err;
237 532
238 memset(&cmd, 0, sizeof(struct mmc_command));
239 cmd.opcode = MMC_SEND_STATUS; 533 cmd.opcode = MMC_SEND_STATUS;
240 if (!mmc_host_is_spi(card->host)) 534 if (!mmc_host_is_spi(card->host))
241 cmd.arg = card->rca << 16; 535 cmd.arg = card->rca << 16;
242 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 536 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
243 err = mmc_wait_for_cmd(card->host, &cmd, 0); 537 err = mmc_wait_for_cmd(card->host, &cmd, 0);
244 if (err) 538 if (err)
245 printk(KERN_ERR "%s: error %d sending status comand", 539 printk(KERN_ERR "%s: error %d sending status command",
246 req->rq_disk->disk_name, err); 540 req->rq_disk->disk_name, err);
247 return cmd.resp[0]; 541 return cmd.resp[0];
248} 542}
@@ -254,8 +548,6 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
254 unsigned int from, nr, arg; 548 unsigned int from, nr, arg;
255 int err = 0; 549 int err = 0;
256 550
257 mmc_claim_host(card->host);
258
259 if (!mmc_can_erase(card)) { 551 if (!mmc_can_erase(card)) {
260 err = -EOPNOTSUPP; 552 err = -EOPNOTSUPP;
261 goto out; 553 goto out;
@@ -269,14 +561,22 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
269 else 561 else
270 arg = MMC_ERASE_ARG; 562 arg = MMC_ERASE_ARG;
271 563
564 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
565 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
566 INAND_CMD38_ARG_EXT_CSD,
567 arg == MMC_TRIM_ARG ?
568 INAND_CMD38_ARG_TRIM :
569 INAND_CMD38_ARG_ERASE,
570 0);
571 if (err)
572 goto out;
573 }
272 err = mmc_erase(card, from, nr, arg); 574 err = mmc_erase(card, from, nr, arg);
273out: 575out:
274 spin_lock_irq(&md->lock); 576 spin_lock_irq(&md->lock);
275 __blk_end_request(req, err, blk_rq_bytes(req)); 577 __blk_end_request(req, err, blk_rq_bytes(req));
276 spin_unlock_irq(&md->lock); 578 spin_unlock_irq(&md->lock);
277 579
278 mmc_release_host(card->host);
279
280 return err ? 0 : 1; 580 return err ? 0 : 1;
281} 581}
282 582
@@ -288,8 +588,6 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
288 unsigned int from, nr, arg; 588 unsigned int from, nr, arg;
289 int err = 0; 589 int err = 0;
290 590
291 mmc_claim_host(card->host);
292
293 if (!mmc_can_secure_erase_trim(card)) { 591 if (!mmc_can_secure_erase_trim(card)) {
294 err = -EOPNOTSUPP; 592 err = -EOPNOTSUPP;
295 goto out; 593 goto out;
@@ -303,19 +601,74 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
303 else 601 else
304 arg = MMC_SECURE_ERASE_ARG; 602 arg = MMC_SECURE_ERASE_ARG;
305 603
604 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
605 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
606 INAND_CMD38_ARG_EXT_CSD,
607 arg == MMC_SECURE_TRIM1_ARG ?
608 INAND_CMD38_ARG_SECTRIM1 :
609 INAND_CMD38_ARG_SECERASE,
610 0);
611 if (err)
612 goto out;
613 }
306 err = mmc_erase(card, from, nr, arg); 614 err = mmc_erase(card, from, nr, arg);
307 if (!err && arg == MMC_SECURE_TRIM1_ARG) 615 if (!err && arg == MMC_SECURE_TRIM1_ARG) {
616 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
617 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
618 INAND_CMD38_ARG_EXT_CSD,
619 INAND_CMD38_ARG_SECTRIM2,
620 0);
621 if (err)
622 goto out;
623 }
308 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 624 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
625 }
309out: 626out:
310 spin_lock_irq(&md->lock); 627 spin_lock_irq(&md->lock);
311 __blk_end_request(req, err, blk_rq_bytes(req)); 628 __blk_end_request(req, err, blk_rq_bytes(req));
312 spin_unlock_irq(&md->lock); 629 spin_unlock_irq(&md->lock);
313 630
314 mmc_release_host(card->host);
315
316 return err ? 0 : 1; 631 return err ? 0 : 1;
317} 632}
318 633
634static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
635{
636 struct mmc_blk_data *md = mq->data;
637
638 /*
639 * No-op, only service this because we need REQ_FUA for reliable
640 * writes.
641 */
642 spin_lock_irq(&md->lock);
643 __blk_end_request_all(req, 0);
644 spin_unlock_irq(&md->lock);
645
646 return 1;
647}
648
649/*
650 * Reformat current write as a reliable write, supporting
651 * both legacy and the enhanced reliable write MMC cards.
652 * In each transfer we'll handle only as much as a single
653 * reliable write can handle, thus finish the request in
654 * partial completions.
655 */
656static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
657 struct mmc_card *card,
658 struct request *req)
659{
660 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
661 /* Legacy mode imposes restrictions on transfers. */
662 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
663 brq->data.blocks = 1;
664
665 if (brq->data.blocks > card->ext_csd.rel_sectors)
666 brq->data.blocks = card->ext_csd.rel_sectors;
667 else if (brq->data.blocks < card->ext_csd.rel_sectors)
668 brq->data.blocks = 1;
669 }
670}
671
319static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) 672static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
320{ 673{
321 struct mmc_blk_data *md = mq->data; 674 struct mmc_blk_data *md = mq->data;
@@ -323,10 +676,17 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
323 struct mmc_blk_request brq; 676 struct mmc_blk_request brq;
324 int ret = 1, disable_multi = 0; 677 int ret = 1, disable_multi = 0;
325 678
326 mmc_claim_host(card->host); 679 /*
680 * Reliable writes are used to implement Forced Unit Access and
681 * REQ_META accesses, and are supported only on MMCs.
682 */
683 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
684 (req->cmd_flags & REQ_META)) &&
685 (rq_data_dir(req) == WRITE) &&
686 (md->flags & MMC_BLK_REL_WR);
327 687
328 do { 688 do {
329 struct mmc_command cmd; 689 struct mmc_command cmd = {0};
330 u32 readcmd, writecmd, status = 0; 690 u32 readcmd, writecmd, status = 0;
331 691
332 memset(&brq, 0, sizeof(struct mmc_blk_request)); 692 memset(&brq, 0, sizeof(struct mmc_blk_request));
@@ -359,12 +719,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
359 if (disable_multi && brq.data.blocks > 1) 719 if (disable_multi && brq.data.blocks > 1)
360 brq.data.blocks = 1; 720 brq.data.blocks = 1;
361 721
362 if (brq.data.blocks > 1) { 722 if (brq.data.blocks > 1 || do_rel_wr) {
363 /* SPI multiblock writes terminate using a special 723 /* SPI multiblock writes terminate using a special
364 * token, not a STOP_TRANSMISSION request. 724 * token, not a STOP_TRANSMISSION request.
365 */ 725 */
366 if (!mmc_host_is_spi(card->host) 726 if (!mmc_host_is_spi(card->host) ||
367 || rq_data_dir(req) == READ) 727 rq_data_dir(req) == READ)
368 brq.mrq.stop = &brq.stop; 728 brq.mrq.stop = &brq.stop;
369 readcmd = MMC_READ_MULTIPLE_BLOCK; 729 readcmd = MMC_READ_MULTIPLE_BLOCK;
370 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 730 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
@@ -373,7 +733,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
373 readcmd = MMC_READ_SINGLE_BLOCK; 733 readcmd = MMC_READ_SINGLE_BLOCK;
374 writecmd = MMC_WRITE_BLOCK; 734 writecmd = MMC_WRITE_BLOCK;
375 } 735 }
376
377 if (rq_data_dir(req) == READ) { 736 if (rq_data_dir(req) == READ) {
378 brq.cmd.opcode = readcmd; 737 brq.cmd.opcode = readcmd;
379 brq.data.flags |= MMC_DATA_READ; 738 brq.data.flags |= MMC_DATA_READ;
@@ -382,6 +741,38 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
382 brq.data.flags |= MMC_DATA_WRITE; 741 brq.data.flags |= MMC_DATA_WRITE;
383 } 742 }
384 743
744 if (do_rel_wr)
745 mmc_apply_rel_rw(&brq, card, req);
746
747 /*
748 * Pre-defined multi-block transfers are preferable to
749 * open ended-ones (and necessary for reliable writes).
750 * However, it is not sufficient to just send CMD23,
751 * and avoid the final CMD12, as on an error condition
752 * CMD12 (stop) needs to be sent anyway. This, coupled
753 * with Auto-CMD23 enhancements provided by some
754 * hosts, means that the complexity of dealing
755 * with this is best left to the host. If CMD23 is
756 * supported by card and host, we'll fill sbc in and let
757 * the host deal with handling it correctly. This means
758 * that for hosts that don't expose MMC_CAP_CMD23, no
759 * change of behavior will be observed.
760 *
761 * N.B: Some MMC cards experience perf degradation.
762 * We'll avoid using CMD23-bounded multiblock writes for
763 * these, while retaining features like reliable writes.
764 */
765
766 if ((md->flags & MMC_BLK_CMD23) &&
767 mmc_op_multi(brq.cmd.opcode) &&
768 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
769 brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
770 brq.sbc.arg = brq.data.blocks |
771 (do_rel_wr ? (1 << 31) : 0);
772 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
773 brq.mrq.sbc = &brq.sbc;
774 }
775
385 mmc_set_data_timeout(&brq.data, card); 776 mmc_set_data_timeout(&brq.data, card);
386 777
387 brq.data.sg = mq->sg; 778 brq.data.sg = mq->sg;
@@ -417,7 +808,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
417 * until later as we need to wait for the card to leave 808 * until later as we need to wait for the card to leave
418 * programming mode even when things go wrong. 809 * programming mode even when things go wrong.
419 */ 810 */
420 if (brq.cmd.error || brq.data.error || brq.stop.error) { 811 if (brq.sbc.error || brq.cmd.error ||
812 brq.data.error || brq.stop.error) {
421 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { 813 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
422 /* Redo read one sector at a time */ 814 /* Redo read one sector at a time */
423 printk(KERN_WARNING "%s: retrying using single " 815 printk(KERN_WARNING "%s: retrying using single "
@@ -428,6 +820,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
428 status = get_card_status(card, req); 820 status = get_card_status(card, req);
429 } 821 }
430 822
823 if (brq.sbc.error) {
824 printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
825 "command, response %#x, card status %#x\n",
826 req->rq_disk->disk_name, brq.sbc.error,
827 brq.sbc.resp[0], status);
828 }
829
431 if (brq.cmd.error) { 830 if (brq.cmd.error) {
432 printk(KERN_ERR "%s: error %d sending read/write " 831 printk(KERN_ERR "%s: error %d sending read/write "
433 "command, response %#x, card status %#x\n", 832 "command, response %#x, card status %#x\n",
@@ -506,8 +905,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
506 spin_unlock_irq(&md->lock); 905 spin_unlock_irq(&md->lock);
507 } while (ret); 906 } while (ret);
508 907
509 mmc_release_host(card->host);
510
511 return 1; 908 return 1;
512 909
513 cmd_err: 910 cmd_err:
@@ -534,8 +931,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
534 spin_unlock_irq(&md->lock); 931 spin_unlock_irq(&md->lock);
535 } 932 }
536 933
537 mmc_release_host(card->host);
538
539 spin_lock_irq(&md->lock); 934 spin_lock_irq(&md->lock);
540 while (ret) 935 while (ret)
541 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 936 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
@@ -546,14 +941,31 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
546 941
547static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 942static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
548{ 943{
944 int ret;
945 struct mmc_blk_data *md = mq->data;
946 struct mmc_card *card = md->queue.card;
947
948 mmc_claim_host(card->host);
949 ret = mmc_blk_part_switch(card, md);
950 if (ret) {
951 ret = 0;
952 goto out;
953 }
954
549 if (req->cmd_flags & REQ_DISCARD) { 955 if (req->cmd_flags & REQ_DISCARD) {
550 if (req->cmd_flags & REQ_SECURE) 956 if (req->cmd_flags & REQ_SECURE)
551 return mmc_blk_issue_secdiscard_rq(mq, req); 957 ret = mmc_blk_issue_secdiscard_rq(mq, req);
552 else 958 else
553 return mmc_blk_issue_discard_rq(mq, req); 959 ret = mmc_blk_issue_discard_rq(mq, req);
960 } else if (req->cmd_flags & REQ_FLUSH) {
961 ret = mmc_blk_issue_flush(mq, req);
554 } else { 962 } else {
555 return mmc_blk_issue_rw_rq(mq, req); 963 ret = mmc_blk_issue_rw_rq(mq, req);
556 } 964 }
965
966out:
967 mmc_release_host(card->host);
968 return ret;
557} 969}
558 970
559static inline int mmc_blk_readonly(struct mmc_card *card) 971static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -562,13 +974,17 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
562 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 974 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
563} 975}
564 976
565static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 977static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
978 struct device *parent,
979 sector_t size,
980 bool default_ro,
981 const char *subname)
566{ 982{
567 struct mmc_blk_data *md; 983 struct mmc_blk_data *md;
568 int devidx, ret; 984 int devidx, ret;
569 985
570 devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS); 986 devidx = find_first_zero_bit(dev_use, max_devices);
571 if (devidx >= MMC_NUM_MINORS) 987 if (devidx >= max_devices)
572 return ERR_PTR(-ENOSPC); 988 return ERR_PTR(-ENOSPC);
573 __set_bit(devidx, dev_use); 989 __set_bit(devidx, dev_use);
574 990
@@ -578,6 +994,19 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
578 goto out; 994 goto out;
579 } 995 }
580 996
997 /*
998 * !subname implies we are creating main mmc_blk_data that will be
999 * associated with mmc_card with mmc_set_drvdata. Due to device
1000 * partitions, devidx will not coincide with a per-physical card
1001 * index anymore so we keep track of a name index.
1002 */
1003 if (!subname) {
1004 md->name_idx = find_first_zero_bit(name_use, max_devices);
1005 __set_bit(md->name_idx, name_use);
1006 }
1007 else
1008 md->name_idx = ((struct mmc_blk_data *)
1009 dev_to_disk(parent)->private_data)->name_idx;
581 1010
582 /* 1011 /*
583 * Set the read-only status based on the supported commands 1012 * Set the read-only status based on the supported commands
@@ -585,16 +1014,17 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
585 */ 1014 */
586 md->read_only = mmc_blk_readonly(card); 1015 md->read_only = mmc_blk_readonly(card);
587 1016
588 md->disk = alloc_disk(1 << MMC_SHIFT); 1017 md->disk = alloc_disk(perdev_minors);
589 if (md->disk == NULL) { 1018 if (md->disk == NULL) {
590 ret = -ENOMEM; 1019 ret = -ENOMEM;
591 goto err_kfree; 1020 goto err_kfree;
592 } 1021 }
593 1022
594 spin_lock_init(&md->lock); 1023 spin_lock_init(&md->lock);
1024 INIT_LIST_HEAD(&md->part);
595 md->usage = 1; 1025 md->usage = 1;
596 1026
597 ret = mmc_init_queue(&md->queue, card, &md->lock); 1027 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
598 if (ret) 1028 if (ret)
599 goto err_putdisk; 1029 goto err_putdisk;
600 1030
@@ -602,11 +1032,12 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
602 md->queue.data = md; 1032 md->queue.data = md;
603 1033
604 md->disk->major = MMC_BLOCK_MAJOR; 1034 md->disk->major = MMC_BLOCK_MAJOR;
605 md->disk->first_minor = devidx << MMC_SHIFT; 1035 md->disk->first_minor = devidx * perdev_minors;
606 md->disk->fops = &mmc_bdops; 1036 md->disk->fops = &mmc_bdops;
607 md->disk->private_data = md; 1037 md->disk->private_data = md;
608 md->disk->queue = md->queue.queue; 1038 md->disk->queue = md->queue.queue;
609 md->disk->driverfs_dev = &card->dev; 1039 md->disk->driverfs_dev = parent;
1040 set_disk_ro(md->disk, md->read_only || default_ro);
610 1041
611 /* 1042 /*
612 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1043 * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -620,65 +1051,204 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
620 * messages to tell when the card is present. 1051 * messages to tell when the card is present.
621 */ 1052 */
622 1053
623 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 1054 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1055 "mmcblk%d%s", md->name_idx, subname ? subname : "");
624 1056
625 blk_queue_logical_block_size(md->queue.queue, 512); 1057 blk_queue_logical_block_size(md->queue.queue, 512);
1058 set_capacity(md->disk, size);
1059
1060 if (mmc_host_cmd23(card->host)) {
1061 if (mmc_card_mmc(card) ||
1062 (mmc_card_sd(card) &&
1063 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1064 md->flags |= MMC_BLK_CMD23;
1065 }
1066
1067 if (mmc_card_mmc(card) &&
1068 md->flags & MMC_BLK_CMD23 &&
1069 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1070 card->ext_csd.rel_sectors)) {
1071 md->flags |= MMC_BLK_REL_WR;
1072 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1073 }
1074
1075 return md;
1076
1077 err_putdisk:
1078 put_disk(md->disk);
1079 err_kfree:
1080 kfree(md);
1081 out:
1082 return ERR_PTR(ret);
1083}
1084
1085static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1086{
1087 sector_t size;
1088 struct mmc_blk_data *md;
626 1089
627 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 1090 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
628 /* 1091 /*
629 * The EXT_CSD sector count is in number or 512 byte 1092 * The EXT_CSD sector count is in number or 512 byte
630 * sectors. 1093 * sectors.
631 */ 1094 */
632 set_capacity(md->disk, card->ext_csd.sectors); 1095 size = card->ext_csd.sectors;
633 } else { 1096 } else {
634 /* 1097 /*
635 * The CSD capacity field is in units of read_blkbits. 1098 * The CSD capacity field is in units of read_blkbits.
636 * set_capacity takes units of 512 bytes. 1099 * set_capacity takes units of 512 bytes.
637 */ 1100 */
638 set_capacity(md->disk, 1101 size = card->csd.capacity << (card->csd.read_blkbits - 9);
639 card->csd.capacity << (card->csd.read_blkbits - 9));
640 } 1102 }
1103
1104 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
641 return md; 1105 return md;
1106}
642 1107
643 err_putdisk: 1108static int mmc_blk_alloc_part(struct mmc_card *card,
644 put_disk(md->disk); 1109 struct mmc_blk_data *md,
645 err_kfree: 1110 unsigned int part_type,
646 kfree(md); 1111 sector_t size,
647 out: 1112 bool default_ro,
648 return ERR_PTR(ret); 1113 const char *subname)
1114{
1115 char cap_str[10];
1116 struct mmc_blk_data *part_md;
1117
1118 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1119 subname);
1120 if (IS_ERR(part_md))
1121 return PTR_ERR(part_md);
1122 part_md->part_type = part_type;
1123 list_add(&part_md->part, &md->part);
1124
1125 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1126 cap_str, sizeof(cap_str));
1127 printk(KERN_INFO "%s: %s %s partition %u %s\n",
1128 part_md->disk->disk_name, mmc_card_id(card),
1129 mmc_card_name(card), part_md->part_type, cap_str);
1130 return 0;
1131}
1132
1133static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1134{
1135 int ret = 0;
1136
1137 if (!mmc_card_mmc(card))
1138 return 0;
1139
1140 if (card->ext_csd.boot_size) {
1141 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
1142 card->ext_csd.boot_size >> 9,
1143 true,
1144 "boot0");
1145 if (ret)
1146 return ret;
1147 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
1148 card->ext_csd.boot_size >> 9,
1149 true,
1150 "boot1");
1151 if (ret)
1152 return ret;
1153 }
1154
1155 return ret;
649} 1156}
650 1157
651static int 1158static int
652mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) 1159mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
653{ 1160{
654 struct mmc_command cmd;
655 int err; 1161 int err;
656 1162
657 /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
658 if (mmc_card_blockaddr(card))
659 return 0;
660
661 mmc_claim_host(card->host); 1163 mmc_claim_host(card->host);
662 cmd.opcode = MMC_SET_BLOCKLEN; 1164 err = mmc_set_blocklen(card, 512);
663 cmd.arg = 512;
664 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
665 err = mmc_wait_for_cmd(card->host, &cmd, 5);
666 mmc_release_host(card->host); 1165 mmc_release_host(card->host);
667 1166
668 if (err) { 1167 if (err) {
669 printk(KERN_ERR "%s: unable to set block size to %d: %d\n", 1168 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
670 md->disk->disk_name, cmd.arg, err); 1169 md->disk->disk_name, err);
671 return -EINVAL; 1170 return -EINVAL;
672 } 1171 }
673 1172
674 return 0; 1173 return 0;
675} 1174}
676 1175
1176static void mmc_blk_remove_req(struct mmc_blk_data *md)
1177{
1178 if (md) {
1179 if (md->disk->flags & GENHD_FL_UP) {
1180 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1181
1182 /* Stop new requests from getting into the queue */
1183 del_gendisk(md->disk);
1184 }
1185
1186 /* Then flush out any already in there */
1187 mmc_cleanup_queue(&md->queue);
1188 mmc_blk_put(md);
1189 }
1190}
1191
1192static void mmc_blk_remove_parts(struct mmc_card *card,
1193 struct mmc_blk_data *md)
1194{
1195 struct list_head *pos, *q;
1196 struct mmc_blk_data *part_md;
1197
1198 __clear_bit(md->name_idx, name_use);
1199 list_for_each_safe(pos, q, &md->part) {
1200 part_md = list_entry(pos, struct mmc_blk_data, part);
1201 list_del(pos);
1202 mmc_blk_remove_req(part_md);
1203 }
1204}
1205
1206static int mmc_add_disk(struct mmc_blk_data *md)
1207{
1208 int ret;
1209
1210 add_disk(md->disk);
1211 md->force_ro.show = force_ro_show;
1212 md->force_ro.store = force_ro_store;
1213 sysfs_attr_init(&md->force_ro.attr);
1214 md->force_ro.attr.name = "force_ro";
1215 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1216 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1217 if (ret)
1218 del_gendisk(md->disk);
1219
1220 return ret;
1221}
1222
1223static const struct mmc_fixup blk_fixups[] =
1224{
1225 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1226 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1227 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1228 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1229 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1230
1231 /*
1232 * Some MMC cards experience performance degradation with CMD23
1233 * instead of CMD12-bounded multiblock transfers. For now we'll
1234 * black list what's bad...
1235 * - Certain Toshiba cards.
1236 *
1237 * N.B. This doesn't affect SD cards.
1238 */
1239 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1240 MMC_QUIRK_BLK_NO_CMD23),
1241 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1242 MMC_QUIRK_BLK_NO_CMD23),
1243 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1244 MMC_QUIRK_BLK_NO_CMD23),
1245 END_FIXUP
1246};
1247
677static int mmc_blk_probe(struct mmc_card *card) 1248static int mmc_blk_probe(struct mmc_card *card)
678{ 1249{
679 struct mmc_blk_data *md; 1250 struct mmc_blk_data *md, *part_md;
680 int err; 1251 int err;
681
682 char cap_str[10]; 1252 char cap_str[10];
683 1253
684 /* 1254 /*
@@ -701,14 +1271,24 @@ static int mmc_blk_probe(struct mmc_card *card)
701 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 1271 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
702 cap_str, md->read_only ? "(ro)" : ""); 1272 cap_str, md->read_only ? "(ro)" : "");
703 1273
1274 if (mmc_blk_alloc_parts(card, md))
1275 goto out;
1276
704 mmc_set_drvdata(card, md); 1277 mmc_set_drvdata(card, md);
705 add_disk(md->disk); 1278 mmc_fixup_device(card, blk_fixups);
1279
1280 if (mmc_add_disk(md))
1281 goto out;
1282
1283 list_for_each_entry(part_md, &md->part, part) {
1284 if (mmc_add_disk(part_md))
1285 goto out;
1286 }
706 return 0; 1287 return 0;
707 1288
708 out: 1289 out:
709 mmc_cleanup_queue(&md->queue); 1290 mmc_blk_remove_parts(card, md);
710 mmc_blk_put(md); 1291 mmc_blk_remove_req(md);
711
712 return err; 1292 return err;
713} 1293}
714 1294
@@ -716,36 +1296,46 @@ static void mmc_blk_remove(struct mmc_card *card)
716{ 1296{
717 struct mmc_blk_data *md = mmc_get_drvdata(card); 1297 struct mmc_blk_data *md = mmc_get_drvdata(card);
718 1298
719 if (md) { 1299 mmc_blk_remove_parts(card, md);
720 /* Stop new requests from getting into the queue */ 1300 mmc_claim_host(card->host);
721 del_gendisk(md->disk); 1301 mmc_blk_part_switch(card, md);
722 1302 mmc_release_host(card->host);
723 /* Then flush out any already in there */ 1303 mmc_blk_remove_req(md);
724 mmc_cleanup_queue(&md->queue);
725
726 mmc_blk_put(md);
727 }
728 mmc_set_drvdata(card, NULL); 1304 mmc_set_drvdata(card, NULL);
729} 1305}
730 1306
731#ifdef CONFIG_PM 1307#ifdef CONFIG_PM
732static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) 1308static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
733{ 1309{
1310 struct mmc_blk_data *part_md;
734 struct mmc_blk_data *md = mmc_get_drvdata(card); 1311 struct mmc_blk_data *md = mmc_get_drvdata(card);
735 1312
736 if (md) { 1313 if (md) {
737 mmc_queue_suspend(&md->queue); 1314 mmc_queue_suspend(&md->queue);
1315 list_for_each_entry(part_md, &md->part, part) {
1316 mmc_queue_suspend(&part_md->queue);
1317 }
738 } 1318 }
739 return 0; 1319 return 0;
740} 1320}
741 1321
742static int mmc_blk_resume(struct mmc_card *card) 1322static int mmc_blk_resume(struct mmc_card *card)
743{ 1323{
1324 struct mmc_blk_data *part_md;
744 struct mmc_blk_data *md = mmc_get_drvdata(card); 1325 struct mmc_blk_data *md = mmc_get_drvdata(card);
745 1326
746 if (md) { 1327 if (md) {
747 mmc_blk_set_blksize(md, card); 1328 mmc_blk_set_blksize(md, card);
1329
1330 /*
1331 * Resume involves the card going into idle state,
1332 * so current partition is always the main one.
1333 */
1334 md->part_curr = md->part_type;
748 mmc_queue_resume(&md->queue); 1335 mmc_queue_resume(&md->queue);
1336 list_for_each_entry(part_md, &md->part, part) {
1337 mmc_queue_resume(&part_md->queue);
1338 }
749 } 1339 }
750 return 0; 1340 return 0;
751} 1341}
@@ -768,6 +1358,11 @@ static int __init mmc_blk_init(void)
768{ 1358{
769 int res; 1359 int res;
770 1360
1361 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1362 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1363
1364 max_devices = 256 / perdev_minors;
1365
771 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 1366 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
772 if (res) 1367 if (res)
773 goto out; 1368 goto out;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 5dd8576b5c18..233cdfae92f4 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -17,6 +17,11 @@
17 17
18#include <linux/scatterlist.h> 18#include <linux/scatterlist.h>
19#include <linux/swap.h> /* For nr_free_buffer_pages() */ 19#include <linux/swap.h> /* For nr_free_buffer_pages() */
20#include <linux/list.h>
21
22#include <linux/debugfs.h>
23#include <linux/uaccess.h>
24#include <linux/seq_file.h>
20 25
21#define RESULT_OK 0 26#define RESULT_OK 0
22#define RESULT_FAIL 1 27#define RESULT_FAIL 1
@@ -56,7 +61,9 @@ struct mmc_test_mem {
56 * struct mmc_test_area - information for performance tests. 61 * struct mmc_test_area - information for performance tests.
57 * @max_sz: test area size (in bytes) 62 * @max_sz: test area size (in bytes)
58 * @dev_addr: address on card at which to do performance tests 63 * @dev_addr: address on card at which to do performance tests
59 * @max_segs: maximum segments in scatterlist @sg 64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
60 * @blocks: number of (512 byte) blocks currently mapped by @sg 67 * @blocks: number of (512 byte) blocks currently mapped by @sg
61 * @sg_len: length of currently mapped scatterlist @sg 68 * @sg_len: length of currently mapped scatterlist @sg
62 * @mem: allocated memory 69 * @mem: allocated memory
@@ -65,7 +72,9 @@ struct mmc_test_mem {
65struct mmc_test_area { 72struct mmc_test_area {
66 unsigned long max_sz; 73 unsigned long max_sz;
67 unsigned int dev_addr; 74 unsigned int dev_addr;
75 unsigned int max_tfr;
68 unsigned int max_segs; 76 unsigned int max_segs;
77 unsigned int max_seg_sz;
69 unsigned int blocks; 78 unsigned int blocks;
70 unsigned int sg_len; 79 unsigned int sg_len;
71 struct mmc_test_mem *mem; 80 struct mmc_test_mem *mem;
@@ -73,12 +82,59 @@ struct mmc_test_area {
73}; 82};
74 83
75/** 84/**
85 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
91 * @iops: I/O operations per second (times 100)
92 */
93struct mmc_test_transfer_result {
94 struct list_head link;
95 unsigned int count;
96 unsigned int sectors;
97 struct timespec ts;
98 unsigned int rate;
99 unsigned int iops;
100};
101
102/**
103 * struct mmc_test_general_result - results for tests.
104 * @link: double-linked list
105 * @card: card under test
106 * @testcase: number of test case
107 * @result: result of test run
108 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
109 */
110struct mmc_test_general_result {
111 struct list_head link;
112 struct mmc_card *card;
113 int testcase;
114 int result;
115 struct list_head tr_lst;
116};
117
118/**
119 * struct mmc_test_dbgfs_file - debugfs related file.
120 * @link: double-linked list
121 * @card: card under test
122 * @file: file created under debugfs
123 */
124struct mmc_test_dbgfs_file {
125 struct list_head link;
126 struct mmc_card *card;
127 struct dentry *file;
128};
129
130/**
76 * struct mmc_test_card - test information. 131 * struct mmc_test_card - test information.
77 * @card: card under test 132 * @card: card under test
78 * @scratch: transfer buffer 133 * @scratch: transfer buffer
79 * @buffer: transfer buffer 134 * @buffer: transfer buffer
80 * @highmem: buffer for highmem tests 135 * @highmem: buffer for highmem tests
81 * @area: information for performance tests 136 * @area: information for performance tests
137 * @gr: pointer to results of current testcase
82 */ 138 */
83struct mmc_test_card { 139struct mmc_test_card {
84 struct mmc_card *card; 140 struct mmc_card *card;
@@ -88,7 +144,8 @@ struct mmc_test_card {
88#ifdef CONFIG_HIGHMEM 144#ifdef CONFIG_HIGHMEM
89 struct page *highmem; 145 struct page *highmem;
90#endif 146#endif
91 struct mmc_test_area area; 147 struct mmc_test_area area;
148 struct mmc_test_general_result *gr;
92}; 149};
93 150
94/*******************************************************************/ 151/*******************************************************************/
@@ -100,17 +157,7 @@ struct mmc_test_card {
100 */ 157 */
101static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 158static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
102{ 159{
103 struct mmc_command cmd; 160 return mmc_set_blocklen(test->card, size);
104 int ret;
105
106 cmd.opcode = MMC_SET_BLOCKLEN;
107 cmd.arg = size;
108 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
109 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
110 if (ret)
111 return ret;
112
113 return 0;
114} 161}
115 162
116/* 163/*
@@ -165,7 +212,7 @@ static int mmc_test_busy(struct mmc_command *cmd)
165static int mmc_test_wait_busy(struct mmc_test_card *test) 212static int mmc_test_wait_busy(struct mmc_test_card *test)
166{ 213{
167 int ret, busy; 214 int ret, busy;
168 struct mmc_command cmd; 215 struct mmc_command cmd = {0};
169 216
170 busy = 0; 217 busy = 0;
171 do { 218 do {
@@ -181,9 +228,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
181 228
182 if (!busy && mmc_test_busy(&cmd)) { 229 if (!busy && mmc_test_busy(&cmd)) {
183 busy = 1; 230 busy = 1;
184 printk(KERN_INFO "%s: Warning: Host did not " 231 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
185 "wait for busy state to end.\n", 232 printk(KERN_INFO "%s: Warning: Host did not "
186 mmc_hostname(test->card->host)); 233 "wait for busy state to end.\n",
234 mmc_hostname(test->card->host));
187 } 235 }
188 } while (mmc_test_busy(&cmd)); 236 } while (mmc_test_busy(&cmd));
189 237
@@ -198,18 +246,13 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
198{ 246{
199 int ret; 247 int ret;
200 248
201 struct mmc_request mrq; 249 struct mmc_request mrq = {0};
202 struct mmc_command cmd; 250 struct mmc_command cmd = {0};
203 struct mmc_command stop; 251 struct mmc_command stop = {0};
204 struct mmc_data data; 252 struct mmc_data data = {0};
205 253
206 struct scatterlist sg; 254 struct scatterlist sg;
207 255
208 memset(&mrq, 0, sizeof(struct mmc_request));
209 memset(&cmd, 0, sizeof(struct mmc_command));
210 memset(&data, 0, sizeof(struct mmc_data));
211 memset(&stop, 0, sizeof(struct mmc_command));
212
213 mrq.cmd = &cmd; 256 mrq.cmd = &cmd;
214 mrq.data = &data; 257 mrq.data = &data;
215 mrq.stop = &stop; 258 mrq.stop = &stop;
@@ -244,28 +287,39 @@ static void mmc_test_free_mem(struct mmc_test_mem *mem)
244} 287}
245 288
246/* 289/*
247 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case 290 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
248 * there isn't much memory do not exceed 1/16th total lowmem pages. 291 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
292 * not exceed a maximum number of segments and try not to make segments much
293 * bigger than maximum segment size.
249 */ 294 */
250static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 295static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
251 unsigned long max_sz) 296 unsigned long max_sz,
297 unsigned int max_segs,
298 unsigned int max_seg_sz)
252{ 299{
253 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 300 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
254 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 301 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
302 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
255 unsigned long page_cnt = 0; 303 unsigned long page_cnt = 0;
256 unsigned long limit = nr_free_buffer_pages() >> 4; 304 unsigned long limit = nr_free_buffer_pages() >> 4;
257 struct mmc_test_mem *mem; 305 struct mmc_test_mem *mem;
258 306
259 if (max_page_cnt > limit) 307 if (max_page_cnt > limit)
260 max_page_cnt = limit; 308 max_page_cnt = limit;
261 if (max_page_cnt < min_page_cnt) 309 if (min_page_cnt > max_page_cnt)
262 max_page_cnt = min_page_cnt; 310 min_page_cnt = max_page_cnt;
311
312 if (max_seg_page_cnt > max_page_cnt)
313 max_seg_page_cnt = max_page_cnt;
314
315 if (max_segs > max_page_cnt)
316 max_segs = max_page_cnt;
263 317
264 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL); 318 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
265 if (!mem) 319 if (!mem)
266 return NULL; 320 return NULL;
267 321
268 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt, 322 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
269 GFP_KERNEL); 323 GFP_KERNEL);
270 if (!mem->arr) 324 if (!mem->arr)
271 goto out_free; 325 goto out_free;
@@ -276,7 +330,7 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
276 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 330 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
277 __GFP_NORETRY; 331 __GFP_NORETRY;
278 332
279 order = get_order(max_page_cnt << PAGE_SHIFT); 333 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
280 while (1) { 334 while (1) {
281 page = alloc_pages(flags, order); 335 page = alloc_pages(flags, order);
282 if (page || !order) 336 if (page || !order)
@@ -295,6 +349,11 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
295 break; 349 break;
296 max_page_cnt -= 1UL << order; 350 max_page_cnt -= 1UL << order;
297 page_cnt += 1UL << order; 351 page_cnt += 1UL << order;
352 if (mem->cnt >= max_segs) {
353 if (page_cnt < min_page_cnt)
354 goto out_free;
355 break;
356 }
298 } 357 }
299 358
300 return mem; 359 return mem;
@@ -310,7 +369,8 @@ out_free:
310 */ 369 */
311static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz, 370static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
312 struct scatterlist *sglist, int repeat, 371 struct scatterlist *sglist, int repeat,
313 unsigned int max_segs, unsigned int *sg_len) 372 unsigned int max_segs, unsigned int max_seg_sz,
373 unsigned int *sg_len)
314{ 374{
315 struct scatterlist *sg = NULL; 375 struct scatterlist *sg = NULL;
316 unsigned int i; 376 unsigned int i;
@@ -322,8 +382,10 @@ static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
322 for (i = 0; i < mem->cnt; i++) { 382 for (i = 0; i < mem->cnt; i++) {
323 unsigned long len = PAGE_SIZE << mem->arr[i].order; 383 unsigned long len = PAGE_SIZE << mem->arr[i].order;
324 384
325 if (sz < len) 385 if (len > sz)
326 len = sz; 386 len = sz;
387 if (len > max_seg_sz)
388 len = max_seg_sz;
327 if (sg) 389 if (sg)
328 sg = sg_next(sg); 390 sg = sg_next(sg);
329 else 391 else
@@ -355,6 +417,7 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
355 unsigned long sz, 417 unsigned long sz,
356 struct scatterlist *sglist, 418 struct scatterlist *sglist,
357 unsigned int max_segs, 419 unsigned int max_segs,
420 unsigned int max_seg_sz,
358 unsigned int *sg_len) 421 unsigned int *sg_len)
359{ 422{
360 struct scatterlist *sg = NULL; 423 struct scatterlist *sg = NULL;
@@ -365,7 +428,7 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
365 sg_init_table(sglist, max_segs); 428 sg_init_table(sglist, max_segs);
366 429
367 *sg_len = 0; 430 *sg_len = 0;
368 while (sz && i) { 431 while (sz) {
369 base = page_address(mem->arr[--i].page); 432 base = page_address(mem->arr[--i].page);
370 cnt = 1 << mem->arr[i].order; 433 cnt = 1 << mem->arr[i].order;
371 while (sz && cnt) { 434 while (sz && cnt) {
@@ -374,7 +437,9 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
374 continue; 437 continue;
375 last_addr = addr; 438 last_addr = addr;
376 len = PAGE_SIZE; 439 len = PAGE_SIZE;
377 if (sz < len) 440 if (len > max_seg_sz)
441 len = max_seg_sz;
442 if (len > sz)
378 len = sz; 443 len = sz;
379 if (sg) 444 if (sg)
380 sg = sg_next(sg); 445 sg = sg_next(sg);
@@ -386,6 +451,8 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
386 sz -= len; 451 sz -= len;
387 *sg_len += 1; 452 *sg_len += 1;
388 } 453 }
454 if (i == 0)
455 i = mem->cnt;
389 } 456 }
390 457
391 if (sg) 458 if (sg)
@@ -421,23 +488,52 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
421} 488}
422 489
423/* 490/*
491 * Save transfer results for future usage
492 */
493static void mmc_test_save_transfer_result(struct mmc_test_card *test,
494 unsigned int count, unsigned int sectors, struct timespec ts,
495 unsigned int rate, unsigned int iops)
496{
497 struct mmc_test_transfer_result *tr;
498
499 if (!test->gr)
500 return;
501
502 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
503 if (!tr)
504 return;
505
506 tr->count = count;
507 tr->sectors = sectors;
508 tr->ts = ts;
509 tr->rate = rate;
510 tr->iops = iops;
511
512 list_add_tail(&tr->link, &test->gr->tr_lst);
513}
514
515/*
424 * Print the transfer rate. 516 * Print the transfer rate.
425 */ 517 */
426static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 518static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
427 struct timespec *ts1, struct timespec *ts2) 519 struct timespec *ts1, struct timespec *ts2)
428{ 520{
429 unsigned int rate, sectors = bytes >> 9; 521 unsigned int rate, iops, sectors = bytes >> 9;
430 struct timespec ts; 522 struct timespec ts;
431 523
432 ts = timespec_sub(*ts2, *ts1); 524 ts = timespec_sub(*ts2, *ts1);
433 525
434 rate = mmc_test_rate(bytes, &ts); 526 rate = mmc_test_rate(bytes, &ts);
527 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
435 528
436 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " 529 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
437 "seconds (%u kB/s, %u KiB/s)\n", 530 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
438 mmc_hostname(test->card->host), sectors, sectors >> 1, 531 mmc_hostname(test->card->host), sectors, sectors >> 1,
439 (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec, 532 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
440 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024); 533 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
534 iops / 100, iops % 100);
535
536 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
441} 537}
442 538
443/* 539/*
@@ -447,20 +543,24 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
447 unsigned int count, struct timespec *ts1, 543 unsigned int count, struct timespec *ts1,
448 struct timespec *ts2) 544 struct timespec *ts2)
449{ 545{
450 unsigned int rate, sectors = bytes >> 9; 546 unsigned int rate, iops, sectors = bytes >> 9;
451 uint64_t tot = bytes * count; 547 uint64_t tot = bytes * count;
452 struct timespec ts; 548 struct timespec ts;
453 549
454 ts = timespec_sub(*ts2, *ts1); 550 ts = timespec_sub(*ts2, *ts1);
455 551
456 rate = mmc_test_rate(tot, &ts); 552 rate = mmc_test_rate(tot, &ts);
553 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
457 554
458 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 555 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
459 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n", 556 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
557 "%u.%02u IOPS)\n",
460 mmc_hostname(test->card->host), count, sectors, count, 558 mmc_hostname(test->card->host), count, sectors, count,
461 sectors >> 1, (sectors == 1 ? ".5" : ""), 559 sectors >> 1, (sectors & 1 ? ".5" : ""),
462 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, 560 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
463 rate / 1000, rate / 1024); 561 rate / 1000, rate / 1024, iops / 100, iops % 100);
562
563 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
464} 564}
465 565
466/* 566/*
@@ -626,15 +726,10 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
626 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 726 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
627 unsigned blocks, unsigned blksz, int write) 727 unsigned blocks, unsigned blksz, int write)
628{ 728{
629 struct mmc_request mrq; 729 struct mmc_request mrq = {0};
630 struct mmc_command cmd; 730 struct mmc_command cmd = {0};
631 struct mmc_command stop; 731 struct mmc_command stop = {0};
632 struct mmc_data data; 732 struct mmc_data data = {0};
633
634 memset(&mrq, 0, sizeof(struct mmc_request));
635 memset(&cmd, 0, sizeof(struct mmc_command));
636 memset(&data, 0, sizeof(struct mmc_data));
637 memset(&stop, 0, sizeof(struct mmc_command));
638 733
639 mrq.cmd = &cmd; 734 mrq.cmd = &cmd;
640 mrq.data = &data; 735 mrq.data = &data;
@@ -656,18 +751,13 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
656static int mmc_test_broken_transfer(struct mmc_test_card *test, 751static int mmc_test_broken_transfer(struct mmc_test_card *test,
657 unsigned blocks, unsigned blksz, int write) 752 unsigned blocks, unsigned blksz, int write)
658{ 753{
659 struct mmc_request mrq; 754 struct mmc_request mrq = {0};
660 struct mmc_command cmd; 755 struct mmc_command cmd = {0};
661 struct mmc_command stop; 756 struct mmc_command stop = {0};
662 struct mmc_data data; 757 struct mmc_data data = {0};
663 758
664 struct scatterlist sg; 759 struct scatterlist sg;
665 760
666 memset(&mrq, 0, sizeof(struct mmc_request));
667 memset(&cmd, 0, sizeof(struct mmc_command));
668 memset(&data, 0, sizeof(struct mmc_data));
669 memset(&stop, 0, sizeof(struct mmc_command));
670
671 mrq.cmd = &cmd; 761 mrq.cmd = &cmd;
672 mrq.data = &data; 762 mrq.data = &data;
673 mrq.stop = &stop; 763 mrq.stop = &stop;
@@ -1215,16 +1305,22 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1215 int max_scatter) 1305 int max_scatter)
1216{ 1306{
1217 struct mmc_test_area *t = &test->area; 1307 struct mmc_test_area *t = &test->area;
1308 int err;
1218 1309
1219 t->blocks = sz >> 9; 1310 t->blocks = sz >> 9;
1220 1311
1221 if (max_scatter) { 1312 if (max_scatter) {
1222 return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1313 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1223 t->max_segs, &t->sg_len); 1314 t->max_segs, t->max_seg_sz,
1224 } else {
1225 return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1226 &t->sg_len); 1315 &t->sg_len);
1316 } else {
1317 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1318 t->max_seg_sz, &t->sg_len);
1227 } 1319 }
1320 if (err)
1321 printk(KERN_INFO "%s: Failed to map sg list\n",
1322 mmc_hostname(test->card->host));
1323 return err;
1228} 1324}
1229 1325
1230/* 1326/*
@@ -1249,6 +1345,22 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1249 struct timespec ts1, ts2; 1345 struct timespec ts1, ts2;
1250 int ret; 1346 int ret;
1251 1347
1348 /*
1349 * In the case of a maximally scattered transfer, the maximum transfer
1350 * size is further limited by using PAGE_SIZE segments.
1351 */
1352 if (max_scatter) {
1353 struct mmc_test_area *t = &test->area;
1354 unsigned long max_tfr;
1355
1356 if (t->max_seg_sz >= PAGE_SIZE)
1357 max_tfr = t->max_segs * PAGE_SIZE;
1358 else
1359 max_tfr = t->max_segs * t->max_seg_sz;
1360 if (sz > max_tfr)
1361 sz = max_tfr;
1362 }
1363
1252 ret = mmc_test_area_map(test, sz, max_scatter); 1364 ret = mmc_test_area_map(test, sz, max_scatter);
1253 if (ret) 1365 if (ret)
1254 return ret; 1366 return ret;
@@ -1274,8 +1386,9 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1274 */ 1386 */
1275static int mmc_test_area_fill(struct mmc_test_card *test) 1387static int mmc_test_area_fill(struct mmc_test_card *test)
1276{ 1388{
1277 return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr, 1389 struct mmc_test_area *t = &test->area;
1278 1, 0, 0); 1390
1391 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1279} 1392}
1280 1393
1281/* 1394/*
@@ -1288,7 +1401,7 @@ static int mmc_test_area_erase(struct mmc_test_card *test)
1288 if (!mmc_can_erase(test->card)) 1401 if (!mmc_can_erase(test->card))
1289 return 0; 1402 return 0;
1290 1403
1291 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9, 1404 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1292 MMC_ERASE_ARG); 1405 MMC_ERASE_ARG);
1293} 1406}
1294 1407
@@ -1306,38 +1419,52 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
1306} 1419}
1307 1420
1308/* 1421/*
1309 * Initialize an area for testing large transfers. The size of the area is the 1422 * Initialize an area for testing large transfers. The test area is set to the
1310 * preferred erase size which is a good size for optimal transfer speed. Note 1423 * middle of the card because cards may have different charateristics at the
1311 * that is typically 4MiB for modern cards. The test area is set to the middle 1424 * front (for FAT file system optimization). Optionally, the area is erased
1312 * of the card because cards may have different charateristics at the front 1425 * (if the card supports it) which may improve write performance. Optionally,
1313 * (for FAT file system optimization). Optionally, the area is erased (if the 1426 * the area is filled with data for subsequent read tests.
1314 * card supports it) which may improve write performance. Optionally, the area
1315 * is filled with data for subsequent read tests.
1316 */ 1427 */
1317static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1428static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1318{ 1429{
1319 struct mmc_test_area *t = &test->area; 1430 struct mmc_test_area *t = &test->area;
1320 unsigned long min_sz = 64 * 1024; 1431 unsigned long min_sz = 64 * 1024, sz;
1321 int ret; 1432 int ret;
1322 1433
1323 ret = mmc_test_set_blksize(test, 512); 1434 ret = mmc_test_set_blksize(test, 512);
1324 if (ret) 1435 if (ret)
1325 return ret; 1436 return ret;
1326 1437
1327 if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9) 1438 /* Make the test area size about 4MiB */
1328 t->max_sz = TEST_AREA_MAX_SIZE; 1439 sz = (unsigned long)test->card->pref_erase << 9;
1329 else 1440 t->max_sz = sz;
1330 t->max_sz = (unsigned long)test->card->pref_erase << 9; 1441 while (t->max_sz < 4 * 1024 * 1024)
1442 t->max_sz += sz;
1443 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1444 t->max_sz -= sz;
1445
1446 t->max_segs = test->card->host->max_segs;
1447 t->max_seg_sz = test->card->host->max_seg_size;
1448
1449 t->max_tfr = t->max_sz;
1450 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1451 t->max_tfr = test->card->host->max_blk_count << 9;
1452 if (t->max_tfr > test->card->host->max_req_size)
1453 t->max_tfr = test->card->host->max_req_size;
1454 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1455 t->max_tfr = t->max_segs * t->max_seg_sz;
1456
1331 /* 1457 /*
1332 * Try to allocate enough memory for the whole area. Less is OK 1458 * Try to allocate enough memory for a max. sized transfer. Less is OK
1333 * because the same memory can be mapped into the scatterlist more than 1459 * because the same memory can be mapped into the scatterlist more than
1334 * once. 1460 * once. Also, take into account the limits imposed on scatterlist
1461 * segments by the host driver.
1335 */ 1462 */
1336 t->mem = mmc_test_alloc_mem(min_sz, t->max_sz); 1463 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1464 t->max_seg_sz);
1337 if (!t->mem) 1465 if (!t->mem)
1338 return -ENOMEM; 1466 return -ENOMEM;
1339 1467
1340 t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE);
1341 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL); 1468 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1342 if (!t->sg) { 1469 if (!t->sg) {
1343 ret = -ENOMEM; 1470 ret = -ENOMEM;
@@ -1401,8 +1528,10 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1401static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1528static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1402 int max_scatter) 1529 int max_scatter)
1403{ 1530{
1404 return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr, 1531 struct mmc_test_area *t = &test->area;
1405 write, max_scatter, 1); 1532
1533 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1534 max_scatter, 1);
1406} 1535}
1407 1536
1408/* 1537/*
@@ -1442,17 +1571,19 @@ static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1442 */ 1571 */
1443static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1572static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1444{ 1573{
1574 struct mmc_test_area *t = &test->area;
1445 unsigned long sz; 1575 unsigned long sz;
1446 unsigned int dev_addr; 1576 unsigned int dev_addr;
1447 int ret; 1577 int ret;
1448 1578
1449 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { 1579 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1450 dev_addr = test->area.dev_addr + (sz >> 9); 1580 dev_addr = t->dev_addr + (sz >> 9);
1451 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1581 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1452 if (ret) 1582 if (ret)
1453 return ret; 1583 return ret;
1454 } 1584 }
1455 dev_addr = test->area.dev_addr; 1585 sz = t->max_tfr;
1586 dev_addr = t->dev_addr;
1456 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1587 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1457} 1588}
1458 1589
@@ -1461,6 +1592,7 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1461 */ 1592 */
1462static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1593static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1463{ 1594{
1595 struct mmc_test_area *t = &test->area;
1464 unsigned long sz; 1596 unsigned long sz;
1465 unsigned int dev_addr; 1597 unsigned int dev_addr;
1466 int ret; 1598 int ret;
@@ -1468,8 +1600,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1468 ret = mmc_test_area_erase(test); 1600 ret = mmc_test_area_erase(test);
1469 if (ret) 1601 if (ret)
1470 return ret; 1602 return ret;
1471 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { 1603 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1472 dev_addr = test->area.dev_addr + (sz >> 9); 1604 dev_addr = t->dev_addr + (sz >> 9);
1473 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1605 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1474 if (ret) 1606 if (ret)
1475 return ret; 1607 return ret;
@@ -1477,7 +1609,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1477 ret = mmc_test_area_erase(test); 1609 ret = mmc_test_area_erase(test);
1478 if (ret) 1610 if (ret)
1479 return ret; 1611 return ret;
1480 dev_addr = test->area.dev_addr; 1612 sz = t->max_tfr;
1613 dev_addr = t->dev_addr;
1481 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1614 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1482} 1615}
1483 1616
@@ -1486,6 +1619,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1486 */ 1619 */
1487static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1620static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1488{ 1621{
1622 struct mmc_test_area *t = &test->area;
1489 unsigned long sz; 1623 unsigned long sz;
1490 unsigned int dev_addr; 1624 unsigned int dev_addr;
1491 struct timespec ts1, ts2; 1625 struct timespec ts1, ts2;
@@ -1497,8 +1631,8 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1497 if (!mmc_can_erase(test->card)) 1631 if (!mmc_can_erase(test->card))
1498 return RESULT_UNSUP_HOST; 1632 return RESULT_UNSUP_HOST;
1499 1633
1500 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { 1634 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1501 dev_addr = test->area.dev_addr + (sz >> 9); 1635 dev_addr = t->dev_addr + (sz >> 9);
1502 getnstimeofday(&ts1); 1636 getnstimeofday(&ts1);
1503 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1637 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1504 if (ret) 1638 if (ret)
@@ -1506,7 +1640,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1506 getnstimeofday(&ts2); 1640 getnstimeofday(&ts2);
1507 mmc_test_print_rate(test, sz, &ts1, &ts2); 1641 mmc_test_print_rate(test, sz, &ts1, &ts2);
1508 } 1642 }
1509 dev_addr = test->area.dev_addr; 1643 dev_addr = t->dev_addr;
1510 getnstimeofday(&ts1); 1644 getnstimeofday(&ts1);
1511 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1645 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1512 if (ret) 1646 if (ret)
@@ -1516,29 +1650,66 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1516 return 0; 1650 return 0;
1517} 1651}
1518 1652
1653static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1654{
1655 struct mmc_test_area *t = &test->area;
1656 unsigned int dev_addr, i, cnt;
1657 struct timespec ts1, ts2;
1658 int ret;
1659
1660 cnt = t->max_sz / sz;
1661 dev_addr = t->dev_addr;
1662 getnstimeofday(&ts1);
1663 for (i = 0; i < cnt; i++) {
1664 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1665 if (ret)
1666 return ret;
1667 dev_addr += (sz >> 9);
1668 }
1669 getnstimeofday(&ts2);
1670 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1671 return 0;
1672}
1673
1519/* 1674/*
1520 * Consecutive read performance by transfer size. 1675 * Consecutive read performance by transfer size.
1521 */ 1676 */
1522static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1677static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1523{ 1678{
1679 struct mmc_test_area *t = &test->area;
1524 unsigned long sz; 1680 unsigned long sz;
1681 int ret;
1682
1683 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1684 ret = mmc_test_seq_read_perf(test, sz);
1685 if (ret)
1686 return ret;
1687 }
1688 sz = t->max_tfr;
1689 return mmc_test_seq_read_perf(test, sz);
1690}
1691
1692static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1693{
1694 struct mmc_test_area *t = &test->area;
1525 unsigned int dev_addr, i, cnt; 1695 unsigned int dev_addr, i, cnt;
1526 struct timespec ts1, ts2; 1696 struct timespec ts1, ts2;
1527 int ret; 1697 int ret;
1528 1698
1529 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { 1699 ret = mmc_test_area_erase(test);
1530 cnt = test->area.max_sz / sz; 1700 if (ret)
1531 dev_addr = test->area.dev_addr; 1701 return ret;
1532 getnstimeofday(&ts1); 1702 cnt = t->max_sz / sz;
1533 for (i = 0; i < cnt; i++) { 1703 dev_addr = t->dev_addr;
1534 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1704 getnstimeofday(&ts1);
1535 if (ret) 1705 for (i = 0; i < cnt; i++) {
1536 return ret; 1706 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1537 dev_addr += (sz >> 9); 1707 if (ret)
1538 } 1708 return ret;
1539 getnstimeofday(&ts2); 1709 dev_addr += (sz >> 9);
1540 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1541 } 1710 }
1711 getnstimeofday(&ts2);
1712 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1542 return 0; 1713 return 0;
1543} 1714}
1544 1715
@@ -1547,28 +1718,17 @@ static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1547 */ 1718 */
1548static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1719static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1549{ 1720{
1721 struct mmc_test_area *t = &test->area;
1550 unsigned long sz; 1722 unsigned long sz;
1551 unsigned int dev_addr, i, cnt;
1552 struct timespec ts1, ts2;
1553 int ret; 1723 int ret;
1554 1724
1555 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { 1725 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1556 ret = mmc_test_area_erase(test); 1726 ret = mmc_test_seq_write_perf(test, sz);
1557 if (ret) 1727 if (ret)
1558 return ret; 1728 return ret;
1559 cnt = test->area.max_sz / sz;
1560 dev_addr = test->area.dev_addr;
1561 getnstimeofday(&ts1);
1562 for (i = 0; i < cnt; i++) {
1563 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1564 if (ret)
1565 return ret;
1566 dev_addr += (sz >> 9);
1567 }
1568 getnstimeofday(&ts2);
1569 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1570 } 1729 }
1571 return 0; 1730 sz = t->max_tfr;
1731 return mmc_test_seq_write_perf(test, sz);
1572} 1732}
1573 1733
1574/* 1734/*
@@ -1576,6 +1736,7 @@ static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1576 */ 1736 */
1577static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1737static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1578{ 1738{
1739 struct mmc_test_area *t = &test->area;
1579 unsigned long sz; 1740 unsigned long sz;
1580 unsigned int dev_addr, i, cnt; 1741 unsigned int dev_addr, i, cnt;
1581 struct timespec ts1, ts2; 1742 struct timespec ts1, ts2;
@@ -1587,15 +1748,15 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1587 if (!mmc_can_erase(test->card)) 1748 if (!mmc_can_erase(test->card))
1588 return RESULT_UNSUP_HOST; 1749 return RESULT_UNSUP_HOST;
1589 1750
1590 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { 1751 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1591 ret = mmc_test_area_erase(test); 1752 ret = mmc_test_area_erase(test);
1592 if (ret) 1753 if (ret)
1593 return ret; 1754 return ret;
1594 ret = mmc_test_area_fill(test); 1755 ret = mmc_test_area_fill(test);
1595 if (ret) 1756 if (ret)
1596 return ret; 1757 return ret;
1597 cnt = test->area.max_sz / sz; 1758 cnt = t->max_sz / sz;
1598 dev_addr = test->area.dev_addr; 1759 dev_addr = t->dev_addr;
1599 getnstimeofday(&ts1); 1760 getnstimeofday(&ts1);
1600 for (i = 0; i < cnt; i++) { 1761 for (i = 0; i < cnt; i++) {
1601 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1762 ret = mmc_erase(test->card, dev_addr, sz >> 9,
@@ -1610,6 +1771,189 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1610 return 0; 1771 return 0;
1611} 1772}
1612 1773
1774static unsigned int rnd_next = 1;
1775
1776static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1777{
1778 uint64_t r;
1779
1780 rnd_next = rnd_next * 1103515245 + 12345;
1781 r = (rnd_next >> 16) & 0x7fff;
1782 return (r * rnd_cnt) >> 15;
1783}
1784
1785static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1786 unsigned long sz)
1787{
1788 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1789 unsigned int ssz;
1790 struct timespec ts1, ts2, ts;
1791 int ret;
1792
1793 ssz = sz >> 9;
1794
1795 rnd_addr = mmc_test_capacity(test->card) / 4;
1796 range1 = rnd_addr / test->card->pref_erase;
1797 range2 = range1 / ssz;
1798
1799 getnstimeofday(&ts1);
1800 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1801 getnstimeofday(&ts2);
1802 ts = timespec_sub(ts2, ts1);
1803 if (ts.tv_sec >= 10)
1804 break;
1805 ea = mmc_test_rnd_num(range1);
1806 if (ea == last_ea)
1807 ea -= 1;
1808 last_ea = ea;
1809 dev_addr = rnd_addr + test->card->pref_erase * ea +
1810 ssz * mmc_test_rnd_num(range2);
1811 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1812 if (ret)
1813 return ret;
1814 }
1815 if (print)
1816 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1817 return 0;
1818}
1819
1820static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1821{
1822 struct mmc_test_area *t = &test->area;
1823 unsigned int next;
1824 unsigned long sz;
1825 int ret;
1826
1827 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1828 /*
1829 * When writing, try to get more consistent results by running
1830 * the test twice with exactly the same I/O but outputting the
1831 * results only for the 2nd run.
1832 */
1833 if (write) {
1834 next = rnd_next;
1835 ret = mmc_test_rnd_perf(test, write, 0, sz);
1836 if (ret)
1837 return ret;
1838 rnd_next = next;
1839 }
1840 ret = mmc_test_rnd_perf(test, write, 1, sz);
1841 if (ret)
1842 return ret;
1843 }
1844 sz = t->max_tfr;
1845 if (write) {
1846 next = rnd_next;
1847 ret = mmc_test_rnd_perf(test, write, 0, sz);
1848 if (ret)
1849 return ret;
1850 rnd_next = next;
1851 }
1852 return mmc_test_rnd_perf(test, write, 1, sz);
1853}
1854
1855/*
1856 * Random read performance by transfer size.
1857 */
1858static int mmc_test_random_read_perf(struct mmc_test_card *test)
1859{
1860 return mmc_test_random_perf(test, 0);
1861}
1862
1863/*
1864 * Random write performance by transfer size.
1865 */
1866static int mmc_test_random_write_perf(struct mmc_test_card *test)
1867{
1868 return mmc_test_random_perf(test, 1);
1869}
1870
1871static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1872 unsigned int tot_sz, int max_scatter)
1873{
1874 struct mmc_test_area *t = &test->area;
1875 unsigned int dev_addr, i, cnt, sz, ssz;
1876 struct timespec ts1, ts2;
1877 int ret;
1878
1879 sz = t->max_tfr;
1880
1881 /*
1882 * In the case of a maximally scattered transfer, the maximum transfer
1883 * size is further limited by using PAGE_SIZE segments.
1884 */
1885 if (max_scatter) {
1886 unsigned long max_tfr;
1887
1888 if (t->max_seg_sz >= PAGE_SIZE)
1889 max_tfr = t->max_segs * PAGE_SIZE;
1890 else
1891 max_tfr = t->max_segs * t->max_seg_sz;
1892 if (sz > max_tfr)
1893 sz = max_tfr;
1894 }
1895
1896 ssz = sz >> 9;
1897 dev_addr = mmc_test_capacity(test->card) / 4;
1898 if (tot_sz > dev_addr << 9)
1899 tot_sz = dev_addr << 9;
1900 cnt = tot_sz / sz;
1901 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
1902
1903 getnstimeofday(&ts1);
1904 for (i = 0; i < cnt; i++) {
1905 ret = mmc_test_area_io(test, sz, dev_addr, write,
1906 max_scatter, 0);
1907 if (ret)
1908 return ret;
1909 dev_addr += ssz;
1910 }
1911 getnstimeofday(&ts2);
1912
1913 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1914
1915 return 0;
1916}
1917
1918static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
1919{
1920 int ret, i;
1921
1922 for (i = 0; i < 10; i++) {
1923 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
1924 if (ret)
1925 return ret;
1926 }
1927 for (i = 0; i < 5; i++) {
1928 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
1929 if (ret)
1930 return ret;
1931 }
1932 for (i = 0; i < 3; i++) {
1933 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
1934 if (ret)
1935 return ret;
1936 }
1937
1938 return ret;
1939}
1940
1941/*
1942 * Large sequential read performance.
1943 */
1944static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
1945{
1946 return mmc_test_large_seq_perf(test, 0);
1947}
1948
1949/*
1950 * Large sequential write performance.
1951 */
1952static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
1953{
1954 return mmc_test_large_seq_perf(test, 1);
1955}
1956
1613static const struct mmc_test_case mmc_test_cases[] = { 1957static const struct mmc_test_case mmc_test_cases[] = {
1614 { 1958 {
1615 .name = "Basic write (no data verification)", 1959 .name = "Basic write (no data verification)",
@@ -1849,10 +2193,40 @@ static const struct mmc_test_case mmc_test_cases[] = {
1849 .cleanup = mmc_test_area_cleanup, 2193 .cleanup = mmc_test_area_cleanup,
1850 }, 2194 },
1851 2195
2196 {
2197 .name = "Random read performance by transfer size",
2198 .prepare = mmc_test_area_prepare,
2199 .run = mmc_test_random_read_perf,
2200 .cleanup = mmc_test_area_cleanup,
2201 },
2202
2203 {
2204 .name = "Random write performance by transfer size",
2205 .prepare = mmc_test_area_prepare,
2206 .run = mmc_test_random_write_perf,
2207 .cleanup = mmc_test_area_cleanup,
2208 },
2209
2210 {
2211 .name = "Large sequential read into scattered pages",
2212 .prepare = mmc_test_area_prepare,
2213 .run = mmc_test_large_seq_read_perf,
2214 .cleanup = mmc_test_area_cleanup,
2215 },
2216
2217 {
2218 .name = "Large sequential write from scattered pages",
2219 .prepare = mmc_test_area_prepare,
2220 .run = mmc_test_large_seq_write_perf,
2221 .cleanup = mmc_test_area_cleanup,
2222 },
2223
1852}; 2224};
1853 2225
1854static DEFINE_MUTEX(mmc_test_lock); 2226static DEFINE_MUTEX(mmc_test_lock);
1855 2227
2228static LIST_HEAD(mmc_test_result);
2229
1856static void mmc_test_run(struct mmc_test_card *test, int testcase) 2230static void mmc_test_run(struct mmc_test_card *test, int testcase)
1857{ 2231{
1858 int i, ret; 2232 int i, ret;
@@ -1863,6 +2237,8 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
1863 mmc_claim_host(test->card->host); 2237 mmc_claim_host(test->card->host);
1864 2238
1865 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { 2239 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2240 struct mmc_test_general_result *gr;
2241
1866 if (testcase && ((i + 1) != testcase)) 2242 if (testcase && ((i + 1) != testcase))
1867 continue; 2243 continue;
1868 2244
@@ -1881,6 +2257,25 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
1881 } 2257 }
1882 } 2258 }
1883 2259
2260 gr = kzalloc(sizeof(struct mmc_test_general_result),
2261 GFP_KERNEL);
2262 if (gr) {
2263 INIT_LIST_HEAD(&gr->tr_lst);
2264
2265 /* Assign data what we know already */
2266 gr->card = test->card;
2267 gr->testcase = i;
2268
2269 /* Append container to global one */
2270 list_add_tail(&gr->link, &mmc_test_result);
2271
2272 /*
2273 * Save the pointer to created container in our private
2274 * structure.
2275 */
2276 test->gr = gr;
2277 }
2278
1884 ret = mmc_test_cases[i].run(test); 2279 ret = mmc_test_cases[i].run(test);
1885 switch (ret) { 2280 switch (ret) {
1886 case RESULT_OK: 2281 case RESULT_OK:
@@ -1906,6 +2301,10 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
1906 mmc_hostname(test->card->host), ret); 2301 mmc_hostname(test->card->host), ret);
1907 } 2302 }
1908 2303
2304 /* Save the result */
2305 if (gr)
2306 gr->result = ret;
2307
1909 if (mmc_test_cases[i].cleanup) { 2308 if (mmc_test_cases[i].cleanup) {
1910 ret = mmc_test_cases[i].cleanup(test); 2309 ret = mmc_test_cases[i].cleanup(test);
1911 if (ret) { 2310 if (ret) {
@@ -1923,30 +2322,95 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
1923 mmc_hostname(test->card->host)); 2322 mmc_hostname(test->card->host));
1924} 2323}
1925 2324
1926static ssize_t mmc_test_show(struct device *dev, 2325static void mmc_test_free_result(struct mmc_card *card)
1927 struct device_attribute *attr, char *buf) 2326{
2327 struct mmc_test_general_result *gr, *grs;
2328
2329 mutex_lock(&mmc_test_lock);
2330
2331 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2332 struct mmc_test_transfer_result *tr, *trs;
2333
2334 if (card && gr->card != card)
2335 continue;
2336
2337 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2338 list_del(&tr->link);
2339 kfree(tr);
2340 }
2341
2342 list_del(&gr->link);
2343 kfree(gr);
2344 }
2345
2346 mutex_unlock(&mmc_test_lock);
2347}
2348
2349static LIST_HEAD(mmc_test_file_test);
2350
2351static int mtf_test_show(struct seq_file *sf, void *data)
1928{ 2352{
2353 struct mmc_card *card = (struct mmc_card *)sf->private;
2354 struct mmc_test_general_result *gr;
2355
1929 mutex_lock(&mmc_test_lock); 2356 mutex_lock(&mmc_test_lock);
2357
2358 list_for_each_entry(gr, &mmc_test_result, link) {
2359 struct mmc_test_transfer_result *tr;
2360
2361 if (gr->card != card)
2362 continue;
2363
2364 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2365
2366 list_for_each_entry(tr, &gr->tr_lst, link) {
2367 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2368 tr->count, tr->sectors,
2369 (unsigned long)tr->ts.tv_sec,
2370 (unsigned long)tr->ts.tv_nsec,
2371 tr->rate, tr->iops / 100, tr->iops % 100);
2372 }
2373 }
2374
1930 mutex_unlock(&mmc_test_lock); 2375 mutex_unlock(&mmc_test_lock);
1931 2376
1932 return 0; 2377 return 0;
1933} 2378}
1934 2379
1935static ssize_t mmc_test_store(struct device *dev, 2380static int mtf_test_open(struct inode *inode, struct file *file)
1936 struct device_attribute *attr, const char *buf, size_t count)
1937{ 2381{
1938 struct mmc_card *card; 2382 return single_open(file, mtf_test_show, inode->i_private);
2383}
2384
2385static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2386 size_t count, loff_t *pos)
2387{
2388 struct seq_file *sf = (struct seq_file *)file->private_data;
2389 struct mmc_card *card = (struct mmc_card *)sf->private;
1939 struct mmc_test_card *test; 2390 struct mmc_test_card *test;
1940 int testcase; 2391 char lbuf[12];
2392 long testcase;
2393
2394 if (count >= sizeof(lbuf))
2395 return -EINVAL;
1941 2396
1942 card = container_of(dev, struct mmc_card, dev); 2397 if (copy_from_user(lbuf, buf, count))
2398 return -EFAULT;
2399 lbuf[count] = '\0';
1943 2400
1944 testcase = simple_strtol(buf, NULL, 10); 2401 if (strict_strtol(lbuf, 10, &testcase))
2402 return -EINVAL;
1945 2403
1946 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); 2404 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
1947 if (!test) 2405 if (!test)
1948 return -ENOMEM; 2406 return -ENOMEM;
1949 2407
2408 /*
2409 * Remove all test cases associated with given card. Thus we have only
2410 * actual data of the last run.
2411 */
2412 mmc_test_free_result(card);
2413
1950 test->card = card; 2414 test->card = card;
1951 2415
1952 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 2416 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
@@ -1973,16 +2437,78 @@ static ssize_t mmc_test_store(struct device *dev,
1973 return count; 2437 return count;
1974} 2438}
1975 2439
1976static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, mmc_test_show, mmc_test_store); 2440static const struct file_operations mmc_test_fops_test = {
2441 .open = mtf_test_open,
2442 .read = seq_read,
2443 .write = mtf_test_write,
2444 .llseek = seq_lseek,
2445 .release = single_release,
2446};
2447
2448static void mmc_test_free_file_test(struct mmc_card *card)
2449{
2450 struct mmc_test_dbgfs_file *df, *dfs;
2451
2452 mutex_lock(&mmc_test_lock);
2453
2454 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2455 if (card && df->card != card)
2456 continue;
2457 debugfs_remove(df->file);
2458 list_del(&df->link);
2459 kfree(df);
2460 }
2461
2462 mutex_unlock(&mmc_test_lock);
2463}
2464
2465static int mmc_test_register_file_test(struct mmc_card *card)
2466{
2467 struct dentry *file = NULL;
2468 struct mmc_test_dbgfs_file *df;
2469 int ret = 0;
2470
2471 mutex_lock(&mmc_test_lock);
2472
2473 if (card->debugfs_root)
2474 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2475 card->debugfs_root, card, &mmc_test_fops_test);
2476
2477 if (IS_ERR_OR_NULL(file)) {
2478 dev_err(&card->dev,
2479 "Can't create file. Perhaps debugfs is disabled.\n");
2480 ret = -ENODEV;
2481 goto err;
2482 }
2483
2484 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2485 if (!df) {
2486 debugfs_remove(file);
2487 dev_err(&card->dev,
2488 "Can't allocate memory for internal usage.\n");
2489 ret = -ENOMEM;
2490 goto err;
2491 }
2492
2493 df->card = card;
2494 df->file = file;
2495
2496 list_add(&df->link, &mmc_test_file_test);
2497
2498err:
2499 mutex_unlock(&mmc_test_lock);
2500
2501 return ret;
2502}
1977 2503
1978static int mmc_test_probe(struct mmc_card *card) 2504static int mmc_test_probe(struct mmc_card *card)
1979{ 2505{
1980 int ret; 2506 int ret;
1981 2507
1982 if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD)) 2508 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
1983 return -ENODEV; 2509 return -ENODEV;
1984 2510
1985 ret = device_create_file(&card->dev, &dev_attr_test); 2511 ret = mmc_test_register_file_test(card);
1986 if (ret) 2512 if (ret)
1987 return ret; 2513 return ret;
1988 2514
@@ -1993,7 +2519,8 @@ static int mmc_test_probe(struct mmc_card *card)
1993 2519
1994static void mmc_test_remove(struct mmc_card *card) 2520static void mmc_test_remove(struct mmc_card *card)
1995{ 2521{
1996 device_remove_file(&card->dev, &dev_attr_test); 2522 mmc_test_free_result(card);
2523 mmc_test_free_file_test(card);
1997} 2524}
1998 2525
1999static struct mmc_driver mmc_driver = { 2526static struct mmc_driver mmc_driver = {
@@ -2011,6 +2538,10 @@ static int __init mmc_test_init(void)
2011 2538
2012static void __exit mmc_test_exit(void) 2539static void __exit mmc_test_exit(void)
2013{ 2540{
2541 /* Clear stalled data if card is still plugged */
2542 mmc_test_free_result(NULL);
2543 mmc_test_free_file_test(NULL);
2544
2014 mmc_unregister_driver(&mmc_driver); 2545 mmc_unregister_driver(&mmc_driver);
2015} 2546}
2016 2547
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index e876678176be..6413afa318d2 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d)
55 55
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE); 57 set_current_state(TASK_INTERRUPTIBLE);
58 if (!blk_queue_plugged(q)) 58 req = blk_fetch_request(q);
59 req = blk_fetch_request(q);
60 mq->req = req; 59 mq->req = req;
61 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
62 61
@@ -107,10 +106,12 @@ static void mmc_request(struct request_queue *q)
107 * @mq: mmc queue 106 * @mq: mmc queue
108 * @card: mmc card to attach this queue 107 * @card: mmc card to attach this queue
109 * @lock: queue lock 108 * @lock: queue lock
109 * @subname: partition subname
110 * 110 *
111 * Initialise a MMC card request queue. 111 * Initialise a MMC card request queue.
112 */ 112 */
113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) 113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
114 spinlock_t *lock, const char *subname)
114{ 115{
115 struct mmc_host *host = card->host; 116 struct mmc_host *host = card->host;
116 u64 limit = BLK_BOUNCE_HIGH; 117 u64 limit = BLK_BOUNCE_HIGH;
@@ -128,26 +129,20 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
128 mq->req = NULL; 129 mq->req = NULL;
129 130
130 blk_queue_prep_rq(mq->queue, mmc_prep_request); 131 blk_queue_prep_rq(mq->queue, mmc_prep_request);
131 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133 if (mmc_can_erase(card)) { 133 if (mmc_can_erase(card)) {
134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); 134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
135 mq->queue->limits.max_discard_sectors = UINT_MAX; 135 mq->queue->limits.max_discard_sectors = UINT_MAX;
136 if (card->erased_byte == 0) 136 if (card->erased_byte == 0)
137 mq->queue->limits.discard_zeroes_data = 1; 137 mq->queue->limits.discard_zeroes_data = 1;
138 if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { 138 mq->queue->limits.discard_granularity = card->pref_erase << 9;
139 mq->queue->limits.discard_granularity =
140 card->erase_size << 9;
141 mq->queue->limits.discard_alignment =
142 card->erase_size << 9;
143 }
144 if (mmc_can_secure_erase_trim(card)) 139 if (mmc_can_secure_erase_trim(card))
145 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, 140 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
146 mq->queue); 141 mq->queue);
147 } 142 }
148 143
149#ifdef CONFIG_MMC_BLOCK_BOUNCE 144#ifdef CONFIG_MMC_BLOCK_BOUNCE
150 if (host->max_hw_segs == 1) { 145 if (host->max_segs == 1) {
151 unsigned int bouncesz; 146 unsigned int bouncesz;
152 147
153 bouncesz = MMC_QUEUE_BOUNCESZ; 148 bouncesz = MMC_QUEUE_BOUNCESZ;
@@ -197,21 +192,23 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
197 blk_queue_bounce_limit(mq->queue, limit); 192 blk_queue_bounce_limit(mq->queue, limit);
198 blk_queue_max_hw_sectors(mq->queue, 193 blk_queue_max_hw_sectors(mq->queue,
199 min(host->max_blk_count, host->max_req_size / 512)); 194 min(host->max_blk_count, host->max_req_size / 512));
200 blk_queue_max_segments(mq->queue, host->max_hw_segs); 195 blk_queue_max_segments(mq->queue, host->max_segs);
201 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 196 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
202 197
203 mq->sg = kmalloc(sizeof(struct scatterlist) * 198 mq->sg = kmalloc(sizeof(struct scatterlist) *
204 host->max_phys_segs, GFP_KERNEL); 199 host->max_segs, GFP_KERNEL);
205 if (!mq->sg) { 200 if (!mq->sg) {
206 ret = -ENOMEM; 201 ret = -ENOMEM;
207 goto cleanup_queue; 202 goto cleanup_queue;
208 } 203 }
209 sg_init_table(mq->sg, host->max_phys_segs); 204 sg_init_table(mq->sg, host->max_segs);
210 } 205 }
211 206
212 init_MUTEX(&mq->thread_sem); 207 sema_init(&mq->thread_sem, 1);
208
209 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
210 host->index, subname ? subname : "");
213 211
214 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
215 if (IS_ERR(mq->thread)) { 212 if (IS_ERR(mq->thread)) {
216 ret = PTR_ERR(mq->thread); 213 ret = PTR_ERR(mq->thread);
217 goto free_bounce_sg; 214 goto free_bounce_sg;
@@ -343,18 +340,14 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
343 */ 340 */
344void mmc_queue_bounce_pre(struct mmc_queue *mq) 341void mmc_queue_bounce_pre(struct mmc_queue *mq)
345{ 342{
346 unsigned long flags;
347
348 if (!mq->bounce_buf) 343 if (!mq->bounce_buf)
349 return; 344 return;
350 345
351 if (rq_data_dir(mq->req) != WRITE) 346 if (rq_data_dir(mq->req) != WRITE)
352 return; 347 return;
353 348
354 local_irq_save(flags);
355 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 349 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
356 mq->bounce_buf, mq->sg[0].length); 350 mq->bounce_buf, mq->sg[0].length);
357 local_irq_restore(flags);
358} 351}
359 352
360/* 353/*
@@ -363,17 +356,13 @@ void mmc_queue_bounce_pre(struct mmc_queue *mq)
363 */ 356 */
364void mmc_queue_bounce_post(struct mmc_queue *mq) 357void mmc_queue_bounce_post(struct mmc_queue *mq)
365{ 358{
366 unsigned long flags;
367
368 if (!mq->bounce_buf) 359 if (!mq->bounce_buf)
369 return; 360 return;
370 361
371 if (rq_data_dir(mq->req) != READ) 362 if (rq_data_dir(mq->req) != READ)
372 return; 363 return;
373 364
374 local_irq_save(flags);
375 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 365 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
376 mq->bounce_buf, mq->sg[0].length); 366 mq->bounce_buf, mq->sg[0].length);
377 local_irq_restore(flags);
378} 367}
379 368
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 64e66e0d4994..6223ef8dc9cd 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -19,7 +19,8 @@ struct mmc_queue {
19 unsigned int bounce_sg_len; 19 unsigned int bounce_sg_len;
20}; 20};
21 21
22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *); 22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
23 const char *);
23extern void mmc_cleanup_queue(struct mmc_queue *); 24extern void mmc_cleanup_queue(struct mmc_queue *);
24extern void mmc_queue_suspend(struct mmc_queue *); 25extern void mmc_queue_suspend(struct mmc_queue *);
25extern void mmc_queue_resume(struct mmc_queue *); 26extern void mmc_queue_resume(struct mmc_queue *);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index a0716967b7c8..c8c9edb3d7cb 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -956,7 +956,7 @@ static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
956 return 0; 956 return 0;
957} 957}
958 958
959static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file) 959static int sdio_uart_tiocmget(struct tty_struct *tty)
960{ 960{
961 struct sdio_uart_port *port = tty->driver_data; 961 struct sdio_uart_port *port = tty->driver_data;
962 int result; 962 int result;
@@ -970,7 +970,7 @@ static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file)
970 return result; 970 return result;
971} 971}
972 972
973static int sdio_uart_tiocmset(struct tty_struct *tty, struct file *file, 973static int sdio_uart_tiocmset(struct tty_struct *tty,
974 unsigned int set, unsigned int clear) 974 unsigned int set, unsigned int clear)
975{ 975{
976 struct sdio_uart_port *port = tty->driver_data; 976 struct sdio_uart_port *port = tty->driver_data;