aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c712
1 files changed, 646 insertions, 66 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 61d233a7c118..71da5641e258 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -31,7 +31,11 @@
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/string_helpers.h> 33#include <linux/string_helpers.h>
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
34 37
38#include <linux/mmc/ioctl.h>
35#include <linux/mmc/card.h> 39#include <linux/mmc/card.h>
36#include <linux/mmc/host.h> 40#include <linux/mmc/host.h>
37#include <linux/mmc/mmc.h> 41#include <linux/mmc/mmc.h>
@@ -48,6 +52,13 @@ MODULE_ALIAS("mmc:block");
48#endif 52#endif
49#define MODULE_PARAM_PREFIX "mmcblk." 53#define MODULE_PARAM_PREFIX "mmcblk."
50 54
55#define INAND_CMD38_ARG_EXT_CSD 113
56#define INAND_CMD38_ARG_ERASE 0x00
57#define INAND_CMD38_ARG_TRIM 0x01
58#define INAND_CMD38_ARG_SECERASE 0x80
59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88
61
51static DEFINE_MUTEX(block_mutex); 62static DEFINE_MUTEX(block_mutex);
52 63
53/* 64/*
@@ -64,6 +75,7 @@ static int max_devices;
64 75
65/* 256 minors, so at most 256 separate devices */ 76/* 256 minors, so at most 256 separate devices */
66static DECLARE_BITMAP(dev_use, 256); 77static DECLARE_BITMAP(dev_use, 256);
78static DECLARE_BITMAP(name_use, 256);
67 79
68/* 80/*
69 * There is one mmc_blk_data per slot. 81 * There is one mmc_blk_data per slot.
@@ -72,9 +84,24 @@ struct mmc_blk_data {
72 spinlock_t lock; 84 spinlock_t lock;
73 struct gendisk *disk; 85 struct gendisk *disk;
74 struct mmc_queue queue; 86 struct mmc_queue queue;
87 struct list_head part;
88
89 unsigned int flags;
90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
75 92
76 unsigned int usage; 93 unsigned int usage;
77 unsigned int read_only; 94 unsigned int read_only;
95 unsigned int part_type;
96 unsigned int name_idx;
97
98 /*
99 * Only set in main mmc_blk_data associated
100 * with mmc_card with mmc_set_drvdata, and keeps
101 * track of the current selected device partition.
102 */
103 unsigned int part_curr;
104 struct device_attribute force_ro;
78}; 105};
79 106
80static DEFINE_MUTEX(open_lock); 107static DEFINE_MUTEX(open_lock);
@@ -97,17 +124,22 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
97 return md; 124 return md;
98} 125}
99 126
127static inline int mmc_get_devidx(struct gendisk *disk)
128{
129 int devmaj = MAJOR(disk_devt(disk));
130 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
131
132 if (!devmaj)
133 devidx = disk->first_minor / perdev_minors;
134 return devidx;
135}
136
100static void mmc_blk_put(struct mmc_blk_data *md) 137static void mmc_blk_put(struct mmc_blk_data *md)
101{ 138{
102 mutex_lock(&open_lock); 139 mutex_lock(&open_lock);
103 md->usage--; 140 md->usage--;
104 if (md->usage == 0) { 141 if (md->usage == 0) {
105 int devmaj = MAJOR(disk_devt(md->disk)); 142 int devidx = mmc_get_devidx(md->disk);
106 int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
107
108 if (!devmaj)
109 devidx = md->disk->first_minor / perdev_minors;
110
111 blk_cleanup_queue(md->queue.queue); 143 blk_cleanup_queue(md->queue.queue);
112 144
113 __clear_bit(devidx, dev_use); 145 __clear_bit(devidx, dev_use);
@@ -118,6 +150,38 @@ static void mmc_blk_put(struct mmc_blk_data *md)
118 mutex_unlock(&open_lock); 150 mutex_unlock(&open_lock);
119} 151}
120 152
153static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
154 char *buf)
155{
156 int ret;
157 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
158
159 ret = snprintf(buf, PAGE_SIZE, "%d",
160 get_disk_ro(dev_to_disk(dev)) ^
161 md->read_only);
162 mmc_blk_put(md);
163 return ret;
164}
165
166static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
167 const char *buf, size_t count)
168{
169 int ret;
170 char *end;
171 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
172 unsigned long set = simple_strtoul(buf, &end, 0);
173 if (end == buf) {
174 ret = -EINVAL;
175 goto out;
176 }
177
178 set_disk_ro(dev_to_disk(dev), set || md->read_only);
179 ret = count;
180out:
181 mmc_blk_put(md);
182 return ret;
183}
184
121static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 185static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
122{ 186{
123 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 187 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -158,35 +222,255 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
158 return 0; 222 return 0;
159} 223}
160 224
225struct mmc_blk_ioc_data {
226 struct mmc_ioc_cmd ic;
227 unsigned char *buf;
228 u64 buf_bytes;
229};
230
231static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
232 struct mmc_ioc_cmd __user *user)
233{
234 struct mmc_blk_ioc_data *idata;
235 int err;
236
237 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
238 if (!idata) {
239 err = -ENOMEM;
240 goto out;
241 }
242
243 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
244 err = -EFAULT;
245 goto idata_err;
246 }
247
248 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
249 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
250 err = -EOVERFLOW;
251 goto idata_err;
252 }
253
254 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
255 if (!idata->buf) {
256 err = -ENOMEM;
257 goto idata_err;
258 }
259
260 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
261 idata->ic.data_ptr, idata->buf_bytes)) {
262 err = -EFAULT;
263 goto copy_err;
264 }
265
266 return idata;
267
268copy_err:
269 kfree(idata->buf);
270idata_err:
271 kfree(idata);
272out:
273 return ERR_PTR(err);
274}
275
276static int mmc_blk_ioctl_cmd(struct block_device *bdev,
277 struct mmc_ioc_cmd __user *ic_ptr)
278{
279 struct mmc_blk_ioc_data *idata;
280 struct mmc_blk_data *md;
281 struct mmc_card *card;
282 struct mmc_command cmd = {0};
283 struct mmc_data data = {0};
284 struct mmc_request mrq = {0};
285 struct scatterlist sg;
286 int err;
287
288 /*
289 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
290 * whole block device, not on a partition. This prevents overspray
291 * between sibling partitions.
292 */
293 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
294 return -EPERM;
295
296 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
297 if (IS_ERR(idata))
298 return PTR_ERR(idata);
299
300 cmd.opcode = idata->ic.opcode;
301 cmd.arg = idata->ic.arg;
302 cmd.flags = idata->ic.flags;
303
304 data.sg = &sg;
305 data.sg_len = 1;
306 data.blksz = idata->ic.blksz;
307 data.blocks = idata->ic.blocks;
308
309 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
310
311 if (idata->ic.write_flag)
312 data.flags = MMC_DATA_WRITE;
313 else
314 data.flags = MMC_DATA_READ;
315
316 mrq.cmd = &cmd;
317 mrq.data = &data;
318
319 md = mmc_blk_get(bdev->bd_disk);
320 if (!md) {
321 err = -EINVAL;
322 goto cmd_done;
323 }
324
325 card = md->queue.card;
326 if (IS_ERR(card)) {
327 err = PTR_ERR(card);
328 goto cmd_done;
329 }
330
331 mmc_claim_host(card->host);
332
333 if (idata->ic.is_acmd) {
334 err = mmc_app_cmd(card->host, card);
335 if (err)
336 goto cmd_rel_host;
337 }
338
339 /* data.flags must already be set before doing this. */
340 mmc_set_data_timeout(&data, card);
341 /* Allow overriding the timeout_ns for empirical tuning. */
342 if (idata->ic.data_timeout_ns)
343 data.timeout_ns = idata->ic.data_timeout_ns;
344
345 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
346 /*
347 * Pretend this is a data transfer and rely on the host driver
348 * to compute timeout. When all host drivers support
349 * cmd.cmd_timeout for R1B, this can be changed to:
350 *
351 * mrq.data = NULL;
352 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
353 */
354 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
355 }
356
357 mmc_wait_for_req(card->host, &mrq);
358
359 if (cmd.error) {
360 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
361 __func__, cmd.error);
362 err = cmd.error;
363 goto cmd_rel_host;
364 }
365 if (data.error) {
366 dev_err(mmc_dev(card->host), "%s: data error %d\n",
367 __func__, data.error);
368 err = data.error;
369 goto cmd_rel_host;
370 }
371
372 /*
373 * According to the SD specs, some commands require a delay after
374 * issuing the command.
375 */
376 if (idata->ic.postsleep_min_us)
377 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
378
379 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
380 err = -EFAULT;
381 goto cmd_rel_host;
382 }
383
384 if (!idata->ic.write_flag) {
385 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
386 idata->buf, idata->buf_bytes)) {
387 err = -EFAULT;
388 goto cmd_rel_host;
389 }
390 }
391
392cmd_rel_host:
393 mmc_release_host(card->host);
394
395cmd_done:
396 mmc_blk_put(md);
397 kfree(idata->buf);
398 kfree(idata);
399 return err;
400}
401
402static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
403 unsigned int cmd, unsigned long arg)
404{
405 int ret = -EINVAL;
406 if (cmd == MMC_IOC_CMD)
407 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
408 return ret;
409}
410
411#ifdef CONFIG_COMPAT
412static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
413 unsigned int cmd, unsigned long arg)
414{
415 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
416}
417#endif
418
161static const struct block_device_operations mmc_bdops = { 419static const struct block_device_operations mmc_bdops = {
162 .open = mmc_blk_open, 420 .open = mmc_blk_open,
163 .release = mmc_blk_release, 421 .release = mmc_blk_release,
164 .getgeo = mmc_blk_getgeo, 422 .getgeo = mmc_blk_getgeo,
165 .owner = THIS_MODULE, 423 .owner = THIS_MODULE,
424 .ioctl = mmc_blk_ioctl,
425#ifdef CONFIG_COMPAT
426 .compat_ioctl = mmc_blk_compat_ioctl,
427#endif
166}; 428};
167 429
168struct mmc_blk_request { 430struct mmc_blk_request {
169 struct mmc_request mrq; 431 struct mmc_request mrq;
432 struct mmc_command sbc;
170 struct mmc_command cmd; 433 struct mmc_command cmd;
171 struct mmc_command stop; 434 struct mmc_command stop;
172 struct mmc_data data; 435 struct mmc_data data;
173}; 436};
174 437
438static inline int mmc_blk_part_switch(struct mmc_card *card,
439 struct mmc_blk_data *md)
440{
441 int ret;
442 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
443 if (main_md->part_curr == md->part_type)
444 return 0;
445
446 if (mmc_card_mmc(card)) {
447 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
448 card->ext_csd.part_config |= md->part_type;
449
450 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
451 EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
452 card->ext_csd.part_time);
453 if (ret)
454 return ret;
455}
456
457 main_md->part_curr = md->part_type;
458 return 0;
459}
460
175static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) 461static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
176{ 462{
177 int err; 463 int err;
178 u32 result; 464 u32 result;
179 __be32 *blocks; 465 __be32 *blocks;
180 466
181 struct mmc_request mrq; 467 struct mmc_request mrq = {0};
182 struct mmc_command cmd; 468 struct mmc_command cmd = {0};
183 struct mmc_data data; 469 struct mmc_data data = {0};
184 unsigned int timeout_us; 470 unsigned int timeout_us;
185 471
186 struct scatterlist sg; 472 struct scatterlist sg;
187 473
188 memset(&cmd, 0, sizeof(struct mmc_command));
189
190 cmd.opcode = MMC_APP_CMD; 474 cmd.opcode = MMC_APP_CMD;
191 cmd.arg = card->rca << 16; 475 cmd.arg = card->rca << 16;
192 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 476 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -203,8 +487,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
203 cmd.arg = 0; 487 cmd.arg = 0;
204 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 488 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
205 489
206 memset(&data, 0, sizeof(struct mmc_data));
207
208 data.timeout_ns = card->csd.tacc_ns * 100; 490 data.timeout_ns = card->csd.tacc_ns * 100;
209 data.timeout_clks = card->csd.tacc_clks * 100; 491 data.timeout_clks = card->csd.tacc_clks * 100;
210 492
@@ -223,8 +505,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
223 data.sg = &sg; 505 data.sg = &sg;
224 data.sg_len = 1; 506 data.sg_len = 1;
225 507
226 memset(&mrq, 0, sizeof(struct mmc_request));
227
228 mrq.cmd = &cmd; 508 mrq.cmd = &cmd;
229 mrq.data = &data; 509 mrq.data = &data;
230 510
@@ -247,10 +527,9 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
247 527
248static u32 get_card_status(struct mmc_card *card, struct request *req) 528static u32 get_card_status(struct mmc_card *card, struct request *req)
249{ 529{
250 struct mmc_command cmd; 530 struct mmc_command cmd = {0};
251 int err; 531 int err;
252 532
253 memset(&cmd, 0, sizeof(struct mmc_command));
254 cmd.opcode = MMC_SEND_STATUS; 533 cmd.opcode = MMC_SEND_STATUS;
255 if (!mmc_host_is_spi(card->host)) 534 if (!mmc_host_is_spi(card->host))
256 cmd.arg = card->rca << 16; 535 cmd.arg = card->rca << 16;
@@ -269,8 +548,6 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
269 unsigned int from, nr, arg; 548 unsigned int from, nr, arg;
270 int err = 0; 549 int err = 0;
271 550
272 mmc_claim_host(card->host);
273
274 if (!mmc_can_erase(card)) { 551 if (!mmc_can_erase(card)) {
275 err = -EOPNOTSUPP; 552 err = -EOPNOTSUPP;
276 goto out; 553 goto out;
@@ -284,14 +561,22 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
284 else 561 else
285 arg = MMC_ERASE_ARG; 562 arg = MMC_ERASE_ARG;
286 563
564 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
565 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
566 INAND_CMD38_ARG_EXT_CSD,
567 arg == MMC_TRIM_ARG ?
568 INAND_CMD38_ARG_TRIM :
569 INAND_CMD38_ARG_ERASE,
570 0);
571 if (err)
572 goto out;
573 }
287 err = mmc_erase(card, from, nr, arg); 574 err = mmc_erase(card, from, nr, arg);
288out: 575out:
289 spin_lock_irq(&md->lock); 576 spin_lock_irq(&md->lock);
290 __blk_end_request(req, err, blk_rq_bytes(req)); 577 __blk_end_request(req, err, blk_rq_bytes(req));
291 spin_unlock_irq(&md->lock); 578 spin_unlock_irq(&md->lock);
292 579
293 mmc_release_host(card->host);
294
295 return err ? 0 : 1; 580 return err ? 0 : 1;
296} 581}
297 582
@@ -303,8 +588,6 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
303 unsigned int from, nr, arg; 588 unsigned int from, nr, arg;
304 int err = 0; 589 int err = 0;
305 590
306 mmc_claim_host(card->host);
307
308 if (!mmc_can_secure_erase_trim(card)) { 591 if (!mmc_can_secure_erase_trim(card)) {
309 err = -EOPNOTSUPP; 592 err = -EOPNOTSUPP;
310 goto out; 593 goto out;
@@ -318,19 +601,74 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
318 else 601 else
319 arg = MMC_SECURE_ERASE_ARG; 602 arg = MMC_SECURE_ERASE_ARG;
320 603
604 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
605 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
606 INAND_CMD38_ARG_EXT_CSD,
607 arg == MMC_SECURE_TRIM1_ARG ?
608 INAND_CMD38_ARG_SECTRIM1 :
609 INAND_CMD38_ARG_SECERASE,
610 0);
611 if (err)
612 goto out;
613 }
321 err = mmc_erase(card, from, nr, arg); 614 err = mmc_erase(card, from, nr, arg);
322 if (!err && arg == MMC_SECURE_TRIM1_ARG) 615 if (!err && arg == MMC_SECURE_TRIM1_ARG) {
616 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
617 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
618 INAND_CMD38_ARG_EXT_CSD,
619 INAND_CMD38_ARG_SECTRIM2,
620 0);
621 if (err)
622 goto out;
623 }
323 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 624 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
625 }
324out: 626out:
325 spin_lock_irq(&md->lock); 627 spin_lock_irq(&md->lock);
326 __blk_end_request(req, err, blk_rq_bytes(req)); 628 __blk_end_request(req, err, blk_rq_bytes(req));
327 spin_unlock_irq(&md->lock); 629 spin_unlock_irq(&md->lock);
328 630
329 mmc_release_host(card->host);
330
331 return err ? 0 : 1; 631 return err ? 0 : 1;
332} 632}
333 633
634static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
635{
636 struct mmc_blk_data *md = mq->data;
637
638 /*
639 * No-op, only service this because we need REQ_FUA for reliable
640 * writes.
641 */
642 spin_lock_irq(&md->lock);
643 __blk_end_request_all(req, 0);
644 spin_unlock_irq(&md->lock);
645
646 return 1;
647}
648
649/*
650 * Reformat current write as a reliable write, supporting
651 * both legacy and the enhanced reliable write MMC cards.
652 * In each transfer we'll handle only as much as a single
653 * reliable write can handle, thus finish the request in
654 * partial completions.
655 */
656static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
657 struct mmc_card *card,
658 struct request *req)
659{
660 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
661 /* Legacy mode imposes restrictions on transfers. */
662 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
663 brq->data.blocks = 1;
664
665 if (brq->data.blocks > card->ext_csd.rel_sectors)
666 brq->data.blocks = card->ext_csd.rel_sectors;
667 else if (brq->data.blocks < card->ext_csd.rel_sectors)
668 brq->data.blocks = 1;
669 }
670}
671
334static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) 672static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
335{ 673{
336 struct mmc_blk_data *md = mq->data; 674 struct mmc_blk_data *md = mq->data;
@@ -338,10 +676,17 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
338 struct mmc_blk_request brq; 676 struct mmc_blk_request brq;
339 int ret = 1, disable_multi = 0; 677 int ret = 1, disable_multi = 0;
340 678
341 mmc_claim_host(card->host); 679 /*
680 * Reliable writes are used to implement Forced Unit Access and
681 * REQ_META accesses, and are supported only on MMCs.
682 */
683 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
684 (req->cmd_flags & REQ_META)) &&
685 (rq_data_dir(req) == WRITE) &&
686 (md->flags & MMC_BLK_REL_WR);
342 687
343 do { 688 do {
344 struct mmc_command cmd; 689 struct mmc_command cmd = {0};
345 u32 readcmd, writecmd, status = 0; 690 u32 readcmd, writecmd, status = 0;
346 691
347 memset(&brq, 0, sizeof(struct mmc_blk_request)); 692 memset(&brq, 0, sizeof(struct mmc_blk_request));
@@ -374,12 +719,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
374 if (disable_multi && brq.data.blocks > 1) 719 if (disable_multi && brq.data.blocks > 1)
375 brq.data.blocks = 1; 720 brq.data.blocks = 1;
376 721
377 if (brq.data.blocks > 1) { 722 if (brq.data.blocks > 1 || do_rel_wr) {
378 /* SPI multiblock writes terminate using a special 723 /* SPI multiblock writes terminate using a special
379 * token, not a STOP_TRANSMISSION request. 724 * token, not a STOP_TRANSMISSION request.
380 */ 725 */
381 if (!mmc_host_is_spi(card->host) 726 if (!mmc_host_is_spi(card->host) ||
382 || rq_data_dir(req) == READ) 727 rq_data_dir(req) == READ)
383 brq.mrq.stop = &brq.stop; 728 brq.mrq.stop = &brq.stop;
384 readcmd = MMC_READ_MULTIPLE_BLOCK; 729 readcmd = MMC_READ_MULTIPLE_BLOCK;
385 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 730 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
@@ -396,6 +741,38 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
396 brq.data.flags |= MMC_DATA_WRITE; 741 brq.data.flags |= MMC_DATA_WRITE;
397 } 742 }
398 743
744 if (do_rel_wr)
745 mmc_apply_rel_rw(&brq, card, req);
746
747 /*
748 * Pre-defined multi-block transfers are preferable to
749 * open ended-ones (and necessary for reliable writes).
750 * However, it is not sufficient to just send CMD23,
751 * and avoid the final CMD12, as on an error condition
752 * CMD12 (stop) needs to be sent anyway. This, coupled
753 * with Auto-CMD23 enhancements provided by some
754 * hosts, means that the complexity of dealing
755 * with this is best left to the host. If CMD23 is
756 * supported by card and host, we'll fill sbc in and let
757 * the host deal with handling it correctly. This means
758 * that for hosts that don't expose MMC_CAP_CMD23, no
759 * change of behavior will be observed.
760 *
761 * N.B: Some MMC cards experience perf degradation.
762 * We'll avoid using CMD23-bounded multiblock writes for
763 * these, while retaining features like reliable writes.
764 */
765
766 if ((md->flags & MMC_BLK_CMD23) &&
767 mmc_op_multi(brq.cmd.opcode) &&
768 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
769 brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
770 brq.sbc.arg = brq.data.blocks |
771 (do_rel_wr ? (1 << 31) : 0);
772 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
773 brq.mrq.sbc = &brq.sbc;
774 }
775
399 mmc_set_data_timeout(&brq.data, card); 776 mmc_set_data_timeout(&brq.data, card);
400 777
401 brq.data.sg = mq->sg; 778 brq.data.sg = mq->sg;
@@ -431,7 +808,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
431 * until later as we need to wait for the card to leave 808 * until later as we need to wait for the card to leave
432 * programming mode even when things go wrong. 809 * programming mode even when things go wrong.
433 */ 810 */
434 if (brq.cmd.error || brq.data.error || brq.stop.error) { 811 if (brq.sbc.error || brq.cmd.error ||
812 brq.data.error || brq.stop.error) {
435 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { 813 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
436 /* Redo read one sector at a time */ 814 /* Redo read one sector at a time */
437 printk(KERN_WARNING "%s: retrying using single " 815 printk(KERN_WARNING "%s: retrying using single "
@@ -442,6 +820,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
442 status = get_card_status(card, req); 820 status = get_card_status(card, req);
443 } 821 }
444 822
823 if (brq.sbc.error) {
824 printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
825 "command, response %#x, card status %#x\n",
826 req->rq_disk->disk_name, brq.sbc.error,
827 brq.sbc.resp[0], status);
828 }
829
445 if (brq.cmd.error) { 830 if (brq.cmd.error) {
446 printk(KERN_ERR "%s: error %d sending read/write " 831 printk(KERN_ERR "%s: error %d sending read/write "
447 "command, response %#x, card status %#x\n", 832 "command, response %#x, card status %#x\n",
@@ -520,8 +905,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
520 spin_unlock_irq(&md->lock); 905 spin_unlock_irq(&md->lock);
521 } while (ret); 906 } while (ret);
522 907
523 mmc_release_host(card->host);
524
525 return 1; 908 return 1;
526 909
527 cmd_err: 910 cmd_err:
@@ -548,8 +931,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
548 spin_unlock_irq(&md->lock); 931 spin_unlock_irq(&md->lock);
549 } 932 }
550 933
551 mmc_release_host(card->host);
552
553 spin_lock_irq(&md->lock); 934 spin_lock_irq(&md->lock);
554 while (ret) 935 while (ret)
555 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 936 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
@@ -560,14 +941,31 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
560 941
561static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 942static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
562{ 943{
944 int ret;
945 struct mmc_blk_data *md = mq->data;
946 struct mmc_card *card = md->queue.card;
947
948 mmc_claim_host(card->host);
949 ret = mmc_blk_part_switch(card, md);
950 if (ret) {
951 ret = 0;
952 goto out;
953 }
954
563 if (req->cmd_flags & REQ_DISCARD) { 955 if (req->cmd_flags & REQ_DISCARD) {
564 if (req->cmd_flags & REQ_SECURE) 956 if (req->cmd_flags & REQ_SECURE)
565 return mmc_blk_issue_secdiscard_rq(mq, req); 957 ret = mmc_blk_issue_secdiscard_rq(mq, req);
566 else 958 else
567 return mmc_blk_issue_discard_rq(mq, req); 959 ret = mmc_blk_issue_discard_rq(mq, req);
960 } else if (req->cmd_flags & REQ_FLUSH) {
961 ret = mmc_blk_issue_flush(mq, req);
568 } else { 962 } else {
569 return mmc_blk_issue_rw_rq(mq, req); 963 ret = mmc_blk_issue_rw_rq(mq, req);
570 } 964 }
965
966out:
967 mmc_release_host(card->host);
968 return ret;
571} 969}
572 970
573static inline int mmc_blk_readonly(struct mmc_card *card) 971static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -576,7 +974,11 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
576 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 974 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
577} 975}
578 976
579static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 977static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
978 struct device *parent,
979 sector_t size,
980 bool default_ro,
981 const char *subname)
580{ 982{
581 struct mmc_blk_data *md; 983 struct mmc_blk_data *md;
582 int devidx, ret; 984 int devidx, ret;
@@ -592,6 +994,19 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
592 goto out; 994 goto out;
593 } 995 }
594 996
997 /*
998 * !subname implies we are creating main mmc_blk_data that will be
999 * associated with mmc_card with mmc_set_drvdata. Due to device
1000 * partitions, devidx will not coincide with a per-physical card
1001 * index anymore so we keep track of a name index.
1002 */
1003 if (!subname) {
1004 md->name_idx = find_first_zero_bit(name_use, max_devices);
1005 __set_bit(md->name_idx, name_use);
1006 }
1007 else
1008 md->name_idx = ((struct mmc_blk_data *)
1009 dev_to_disk(parent)->private_data)->name_idx;
595 1010
596 /* 1011 /*
597 * Set the read-only status based on the supported commands 1012 * Set the read-only status based on the supported commands
@@ -606,6 +1021,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
606 } 1021 }
607 1022
608 spin_lock_init(&md->lock); 1023 spin_lock_init(&md->lock);
1024 INIT_LIST_HEAD(&md->part);
609 md->usage = 1; 1025 md->usage = 1;
610 1026
611 ret = mmc_init_queue(&md->queue, card, &md->lock); 1027 ret = mmc_init_queue(&md->queue, card, &md->lock);
@@ -620,8 +1036,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
620 md->disk->fops = &mmc_bdops; 1036 md->disk->fops = &mmc_bdops;
621 md->disk->private_data = md; 1037 md->disk->private_data = md;
622 md->disk->queue = md->queue.queue; 1038 md->disk->queue = md->queue.queue;
623 md->disk->driverfs_dev = &card->dev; 1039 md->disk->driverfs_dev = parent;
624 set_disk_ro(md->disk, md->read_only); 1040 set_disk_ro(md->disk, md->read_only || default_ro);
625 1041
626 /* 1042 /*
627 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1043 * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -636,32 +1052,107 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
636 */ 1052 */
637 1053
638 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 1054 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
639 "mmcblk%d", devidx); 1055 "mmcblk%d%s", md->name_idx, subname ? subname : "");
640 1056
641 blk_queue_logical_block_size(md->queue.queue, 512); 1057 blk_queue_logical_block_size(md->queue.queue, 512);
1058 set_capacity(md->disk, size);
1059
1060 if (mmc_host_cmd23(card->host)) {
1061 if (mmc_card_mmc(card) ||
1062 (mmc_card_sd(card) &&
1063 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1064 md->flags |= MMC_BLK_CMD23;
1065 }
1066
1067 if (mmc_card_mmc(card) &&
1068 md->flags & MMC_BLK_CMD23 &&
1069 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1070 card->ext_csd.rel_sectors)) {
1071 md->flags |= MMC_BLK_REL_WR;
1072 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1073 }
1074
1075 return md;
1076
1077 err_putdisk:
1078 put_disk(md->disk);
1079 err_kfree:
1080 kfree(md);
1081 out:
1082 return ERR_PTR(ret);
1083}
1084
1085static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1086{
1087 sector_t size;
1088 struct mmc_blk_data *md;
642 1089
643 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 1090 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
644 /* 1091 /*
645 * The EXT_CSD sector count is in number or 512 byte 1092 * The EXT_CSD sector count is in number or 512 byte
646 * sectors. 1093 * sectors.
647 */ 1094 */
648 set_capacity(md->disk, card->ext_csd.sectors); 1095 size = card->ext_csd.sectors;
649 } else { 1096 } else {
650 /* 1097 /*
651 * The CSD capacity field is in units of read_blkbits. 1098 * The CSD capacity field is in units of read_blkbits.
652 * set_capacity takes units of 512 bytes. 1099 * set_capacity takes units of 512 bytes.
653 */ 1100 */
654 set_capacity(md->disk, 1101 size = card->csd.capacity << (card->csd.read_blkbits - 9);
655 card->csd.capacity << (card->csd.read_blkbits - 9));
656 } 1102 }
1103
1104 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
657 return md; 1105 return md;
1106}
658 1107
659 err_putdisk: 1108static int mmc_blk_alloc_part(struct mmc_card *card,
660 put_disk(md->disk); 1109 struct mmc_blk_data *md,
661 err_kfree: 1110 unsigned int part_type,
662 kfree(md); 1111 sector_t size,
663 out: 1112 bool default_ro,
664 return ERR_PTR(ret); 1113 const char *subname)
1114{
1115 char cap_str[10];
1116 struct mmc_blk_data *part_md;
1117
1118 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1119 subname);
1120 if (IS_ERR(part_md))
1121 return PTR_ERR(part_md);
1122 part_md->part_type = part_type;
1123 list_add(&part_md->part, &md->part);
1124
1125 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1126 cap_str, sizeof(cap_str));
1127 printk(KERN_INFO "%s: %s %s partition %u %s\n",
1128 part_md->disk->disk_name, mmc_card_id(card),
1129 mmc_card_name(card), part_md->part_type, cap_str);
1130 return 0;
1131}
1132
1133static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1134{
1135 int ret = 0;
1136
1137 if (!mmc_card_mmc(card))
1138 return 0;
1139
1140 if (card->ext_csd.boot_size) {
1141 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
1142 card->ext_csd.boot_size >> 9,
1143 true,
1144 "boot0");
1145 if (ret)
1146 return ret;
1147 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
1148 card->ext_csd.boot_size >> 9,
1149 true,
1150 "boot1");
1151 if (ret)
1152 return ret;
1153 }
1154
1155 return ret;
665} 1156}
666 1157
667static int 1158static int
@@ -682,9 +1173,81 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
682 return 0; 1173 return 0;
683} 1174}
684 1175
1176static void mmc_blk_remove_req(struct mmc_blk_data *md)
1177{
1178 if (md) {
1179 if (md->disk->flags & GENHD_FL_UP) {
1180 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1181
1182 /* Stop new requests from getting into the queue */
1183 del_gendisk(md->disk);
1184 }
1185
1186 /* Then flush out any already in there */
1187 mmc_cleanup_queue(&md->queue);
1188 mmc_blk_put(md);
1189 }
1190}
1191
1192static void mmc_blk_remove_parts(struct mmc_card *card,
1193 struct mmc_blk_data *md)
1194{
1195 struct list_head *pos, *q;
1196 struct mmc_blk_data *part_md;
1197
1198 __clear_bit(md->name_idx, name_use);
1199 list_for_each_safe(pos, q, &md->part) {
1200 part_md = list_entry(pos, struct mmc_blk_data, part);
1201 list_del(pos);
1202 mmc_blk_remove_req(part_md);
1203 }
1204}
1205
1206static int mmc_add_disk(struct mmc_blk_data *md)
1207{
1208 int ret;
1209
1210 add_disk(md->disk);
1211 md->force_ro.show = force_ro_show;
1212 md->force_ro.store = force_ro_store;
1213 sysfs_attr_init(&md->force_ro.attr);
1214 md->force_ro.attr.name = "force_ro";
1215 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1216 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1217 if (ret)
1218 del_gendisk(md->disk);
1219
1220 return ret;
1221}
1222
1223static const struct mmc_fixup blk_fixups[] =
1224{
1225 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1226 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1227 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1228 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1229 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1230
1231 /*
1232 * Some MMC cards experience performance degradation with CMD23
1233 * instead of CMD12-bounded multiblock transfers. For now we'll
1234 * black list what's bad...
1235 * - Certain Toshiba cards.
1236 *
1237 * N.B. This doesn't affect SD cards.
1238 */
1239 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1240 MMC_QUIRK_BLK_NO_CMD23),
1241 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1242 MMC_QUIRK_BLK_NO_CMD23),
1243 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1244 MMC_QUIRK_BLK_NO_CMD23),
1245 END_FIXUP
1246};
1247
685static int mmc_blk_probe(struct mmc_card *card) 1248static int mmc_blk_probe(struct mmc_card *card)
686{ 1249{
687 struct mmc_blk_data *md; 1250 struct mmc_blk_data *md, *part_md;
688 int err; 1251 int err;
689 char cap_str[10]; 1252 char cap_str[10];
690 1253
@@ -708,14 +1271,24 @@ static int mmc_blk_probe(struct mmc_card *card)
708 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 1271 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
709 cap_str, md->read_only ? "(ro)" : ""); 1272 cap_str, md->read_only ? "(ro)" : "");
710 1273
1274 if (mmc_blk_alloc_parts(card, md))
1275 goto out;
1276
711 mmc_set_drvdata(card, md); 1277 mmc_set_drvdata(card, md);
712 add_disk(md->disk); 1278 mmc_fixup_device(card, blk_fixups);
1279
1280 if (mmc_add_disk(md))
1281 goto out;
1282
1283 list_for_each_entry(part_md, &md->part, part) {
1284 if (mmc_add_disk(part_md))
1285 goto out;
1286 }
713 return 0; 1287 return 0;
714 1288
715 out: 1289 out:
716 mmc_cleanup_queue(&md->queue); 1290 mmc_blk_remove_parts(card, md);
717 mmc_blk_put(md); 1291 mmc_blk_remove_req(md);
718
719 return err; 1292 return err;
720} 1293}
721 1294
@@ -723,36 +1296,43 @@ static void mmc_blk_remove(struct mmc_card *card)
723{ 1296{
724 struct mmc_blk_data *md = mmc_get_drvdata(card); 1297 struct mmc_blk_data *md = mmc_get_drvdata(card);
725 1298
726 if (md) { 1299 mmc_blk_remove_parts(card, md);
727 /* Stop new requests from getting into the queue */ 1300 mmc_blk_remove_req(md);
728 del_gendisk(md->disk);
729
730 /* Then flush out any already in there */
731 mmc_cleanup_queue(&md->queue);
732
733 mmc_blk_put(md);
734 }
735 mmc_set_drvdata(card, NULL); 1301 mmc_set_drvdata(card, NULL);
736} 1302}
737 1303
738#ifdef CONFIG_PM 1304#ifdef CONFIG_PM
739static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) 1305static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
740{ 1306{
1307 struct mmc_blk_data *part_md;
741 struct mmc_blk_data *md = mmc_get_drvdata(card); 1308 struct mmc_blk_data *md = mmc_get_drvdata(card);
742 1309
743 if (md) { 1310 if (md) {
744 mmc_queue_suspend(&md->queue); 1311 mmc_queue_suspend(&md->queue);
1312 list_for_each_entry(part_md, &md->part, part) {
1313 mmc_queue_suspend(&part_md->queue);
1314 }
745 } 1315 }
746 return 0; 1316 return 0;
747} 1317}
748 1318
749static int mmc_blk_resume(struct mmc_card *card) 1319static int mmc_blk_resume(struct mmc_card *card)
750{ 1320{
1321 struct mmc_blk_data *part_md;
751 struct mmc_blk_data *md = mmc_get_drvdata(card); 1322 struct mmc_blk_data *md = mmc_get_drvdata(card);
752 1323
753 if (md) { 1324 if (md) {
754 mmc_blk_set_blksize(md, card); 1325 mmc_blk_set_blksize(md, card);
1326
1327 /*
1328 * Resume involves the card going into idle state,
1329 * so current partition is always the main one.
1330 */
1331 md->part_curr = md->part_type;
755 mmc_queue_resume(&md->queue); 1332 mmc_queue_resume(&md->queue);
1333 list_for_each_entry(part_md, &md->part, part) {
1334 mmc_queue_resume(&part_md->queue);
1335 }
756 } 1336 }
757 return 0; 1337 return 0;
758} 1338}