aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 12:31:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 12:31:09 -0500
commited5dc2372dba46e0ecd08791b1a0399d313e5cff (patch)
tree571319985b59a2963fb7580c24ee2aa1696359e2 /drivers/mmc/card
parent0512c04a2b5d29a33d96d315e1d14c55f5148aa7 (diff)
parent0e786102949d7461859c6ce9f39c2c8d28e42db3 (diff)
Merge tag 'mmc-updates-for-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
Pull MMC update from Chris Ball: "MMC highlights for 3.9: Core: - Support for packed commands in eMMC 4.5. (This requires a host capability to be turned on. It increases write throughput by 20%+, but may also increase average write latency; more testing needed.) - Add DT bindings for capability flags. - Add mmc_of_parse() for shared DT parsing between drivers. Drivers: - android-goldfish: New MMC driver for the Android Goldfish emulator. - mvsdio: Add DT bindings, pinctrl, use slot-gpio for card detection. - omap_hsmmc: Fix boot hangs with RPMB partitions. - sdhci-bcm2835: New driver for controller used by Raspberry Pi. - sdhci-esdhc-imx: Add 8-bit data, auto CMD23 support, use slot-gpio. - sh_mmcif: Add support for eMMC DDR, bundled MMCIF IRQs. - tmio_mmc: Add DT bindings, support for vccq regulator" * tag 'mmc-updates-for-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (92 commits) mmc: tegra: assume CONFIG_OF, remove platform data mmc: add DT bindings for more MMC capability flags mmc: tmio: add support for the VccQ regulator mmc: tmio: remove unused and deprecated symbols mmc: sh_mobile_sdhi: use managed resource allocations mmc: sh_mobile_sdhi: remove unused .pdata field mmc: tmio-mmc: parse device-tree bindings mmc: tmio-mmc: define device-tree bindings mmc: sh_mmcif: use mmc_of_parse() to parse standard MMC DT bindings mmc: (cosmetic) remove "extern" from function declarations mmc: provide a standard MMC device-tree binding parser centrally mmc: detailed definition of CD and WP MMC line polarities in DT mmc: sdhi, tmio: only check flags in tmio-mmc driver proper mmc: sdhci: Fix parameter of sdhci_do_start_signal_voltage_switch() mmc: sdhci: check voltage range only on regulators aware of voltage value mmc: bcm2835: set SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK mmc: support packed write command for eMMC4.5 devices mmc: add packed command feature of eMMC4.5 mmc: rtsx: remove driving adjustment mmc: use regulator_can_change_voltage() instead of regulator_count_voltages ...
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/block.c491
-rw-r--r--drivers/mmc/card/queue.c128
-rw-r--r--drivers/mmc/card/queue.h25
3 files changed, 612 insertions, 32 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 21056b9ef0a0..5bab73b91c20 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,6 +59,12 @@ MODULE_ALIAS("mmc:block");
59#define INAND_CMD38_ARG_SECTRIM2 0x88 59#define INAND_CMD38_ARG_SECTRIM2 0x88
60#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 60#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
61 61
62#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
63 (req->cmd_flags & REQ_META)) && \
64 (rq_data_dir(req) == WRITE))
65#define PACKED_CMD_VER 0x01
66#define PACKED_CMD_WR 0x02
67
62static DEFINE_MUTEX(block_mutex); 68static DEFINE_MUTEX(block_mutex);
63 69
64/* 70/*
@@ -89,6 +95,7 @@ struct mmc_blk_data {
89 unsigned int flags; 95 unsigned int flags;
90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 96#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 97#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
98#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
92 99
93 unsigned int usage; 100 unsigned int usage;
94 unsigned int read_only; 101 unsigned int read_only;
@@ -113,15 +120,10 @@ struct mmc_blk_data {
113 120
114static DEFINE_MUTEX(open_lock); 121static DEFINE_MUTEX(open_lock);
115 122
116enum mmc_blk_status { 123enum {
117 MMC_BLK_SUCCESS = 0, 124 MMC_PACKED_NR_IDX = -1,
118 MMC_BLK_PARTIAL, 125 MMC_PACKED_NR_ZERO,
119 MMC_BLK_CMD_ERR, 126 MMC_PACKED_NR_SINGLE,
120 MMC_BLK_RETRY,
121 MMC_BLK_ABORT,
122 MMC_BLK_DATA_ERR,
123 MMC_BLK_ECC_ERR,
124 MMC_BLK_NOMEDIUM,
125}; 127};
126 128
127module_param(perdev_minors, int, 0444); 129module_param(perdev_minors, int, 0444);
@@ -131,6 +133,19 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
131 struct mmc_blk_data *md); 133 struct mmc_blk_data *md);
132static int get_card_status(struct mmc_card *card, u32 *status, int retries); 134static int get_card_status(struct mmc_card *card, u32 *status, int retries);
133 135
136static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
137{
138 struct mmc_packed *packed = mqrq->packed;
139
140 BUG_ON(!packed);
141
142 mqrq->cmd_type = MMC_PACKED_NONE;
143 packed->nr_entries = MMC_PACKED_NR_ZERO;
144 packed->idx_failure = MMC_PACKED_NR_IDX;
145 packed->retries = 0;
146 packed->blocks = 0;
147}
148
134static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 149static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
135{ 150{
136 struct mmc_blk_data *md; 151 struct mmc_blk_data *md;
@@ -1148,12 +1163,78 @@ static int mmc_blk_err_check(struct mmc_card *card,
1148 if (!brq->data.bytes_xfered) 1163 if (!brq->data.bytes_xfered)
1149 return MMC_BLK_RETRY; 1164 return MMC_BLK_RETRY;
1150 1165
1166 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1167 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1168 return MMC_BLK_PARTIAL;
1169 else
1170 return MMC_BLK_SUCCESS;
1171 }
1172
1151 if (blk_rq_bytes(req) != brq->data.bytes_xfered) 1173 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1152 return MMC_BLK_PARTIAL; 1174 return MMC_BLK_PARTIAL;
1153 1175
1154 return MMC_BLK_SUCCESS; 1176 return MMC_BLK_SUCCESS;
1155} 1177}
1156 1178
1179static int mmc_blk_packed_err_check(struct mmc_card *card,
1180 struct mmc_async_req *areq)
1181{
1182 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1183 mmc_active);
1184 struct request *req = mq_rq->req;
1185 struct mmc_packed *packed = mq_rq->packed;
1186 int err, check, status;
1187 u8 *ext_csd;
1188
1189 BUG_ON(!packed);
1190
1191 packed->retries--;
1192 check = mmc_blk_err_check(card, areq);
1193 err = get_card_status(card, &status, 0);
1194 if (err) {
1195 pr_err("%s: error %d sending status command\n",
1196 req->rq_disk->disk_name, err);
1197 return MMC_BLK_ABORT;
1198 }
1199
1200 if (status & R1_EXCEPTION_EVENT) {
1201 ext_csd = kzalloc(512, GFP_KERNEL);
1202 if (!ext_csd) {
1203 pr_err("%s: unable to allocate buffer for ext_csd\n",
1204 req->rq_disk->disk_name);
1205 return -ENOMEM;
1206 }
1207
1208 err = mmc_send_ext_csd(card, ext_csd);
1209 if (err) {
1210 pr_err("%s: error %d sending ext_csd\n",
1211 req->rq_disk->disk_name, err);
1212 check = MMC_BLK_ABORT;
1213 goto free;
1214 }
1215
1216 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1217 EXT_CSD_PACKED_FAILURE) &&
1218 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1219 EXT_CSD_PACKED_GENERIC_ERROR)) {
1220 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1221 EXT_CSD_PACKED_INDEXED_ERROR) {
1222 packed->idx_failure =
1223 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1224 check = MMC_BLK_PARTIAL;
1225 }
1226 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1227 "failure index: %d\n",
1228 req->rq_disk->disk_name, packed->nr_entries,
1229 packed->blocks, packed->idx_failure);
1230 }
1231free:
1232 kfree(ext_csd);
1233 }
1234
1235 return check;
1236}
1237
1157static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1238static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1158 struct mmc_card *card, 1239 struct mmc_card *card,
1159 int disable_multi, 1240 int disable_multi,
@@ -1308,10 +1389,221 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1308 mmc_queue_bounce_pre(mqrq); 1389 mmc_queue_bounce_pre(mqrq);
1309} 1390}
1310 1391
1392static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1393 struct mmc_card *card)
1394{
1395 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1396 unsigned int max_seg_sz = queue_max_segment_size(q);
1397 unsigned int len, nr_segs = 0;
1398
1399 do {
1400 len = min(hdr_sz, max_seg_sz);
1401 hdr_sz -= len;
1402 nr_segs++;
1403 } while (hdr_sz);
1404
1405 return nr_segs;
1406}
1407
1408static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1409{
1410 struct request_queue *q = mq->queue;
1411 struct mmc_card *card = mq->card;
1412 struct request *cur = req, *next = NULL;
1413 struct mmc_blk_data *md = mq->data;
1414 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1415 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1416 unsigned int req_sectors = 0, phys_segments = 0;
1417 unsigned int max_blk_count, max_phys_segs;
1418 bool put_back = true;
1419 u8 max_packed_rw = 0;
1420 u8 reqs = 0;
1421
1422 if (!(md->flags & MMC_BLK_PACKED_CMD))
1423 goto no_packed;
1424
1425 if ((rq_data_dir(cur) == WRITE) &&
1426 mmc_host_packed_wr(card->host))
1427 max_packed_rw = card->ext_csd.max_packed_writes;
1428
1429 if (max_packed_rw == 0)
1430 goto no_packed;
1431
1432 if (mmc_req_rel_wr(cur) &&
1433 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1434 goto no_packed;
1435
1436 if (mmc_large_sector(card) &&
1437 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1438 goto no_packed;
1439
1440 mmc_blk_clear_packed(mqrq);
1441
1442 max_blk_count = min(card->host->max_blk_count,
1443 card->host->max_req_size >> 9);
1444 if (unlikely(max_blk_count > 0xffff))
1445 max_blk_count = 0xffff;
1446
1447 max_phys_segs = queue_max_segments(q);
1448 req_sectors += blk_rq_sectors(cur);
1449 phys_segments += cur->nr_phys_segments;
1450
1451 if (rq_data_dir(cur) == WRITE) {
1452 req_sectors += mmc_large_sector(card) ? 8 : 1;
1453 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1454 }
1455
1456 do {
1457 if (reqs >= max_packed_rw - 1) {
1458 put_back = false;
1459 break;
1460 }
1461
1462 spin_lock_irq(q->queue_lock);
1463 next = blk_fetch_request(q);
1464 spin_unlock_irq(q->queue_lock);
1465 if (!next) {
1466 put_back = false;
1467 break;
1468 }
1469
1470 if (mmc_large_sector(card) &&
1471 !IS_ALIGNED(blk_rq_sectors(next), 8))
1472 break;
1473
1474 if (next->cmd_flags & REQ_DISCARD ||
1475 next->cmd_flags & REQ_FLUSH)
1476 break;
1477
1478 if (rq_data_dir(cur) != rq_data_dir(next))
1479 break;
1480
1481 if (mmc_req_rel_wr(next) &&
1482 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1483 break;
1484
1485 req_sectors += blk_rq_sectors(next);
1486 if (req_sectors > max_blk_count)
1487 break;
1488
1489 phys_segments += next->nr_phys_segments;
1490 if (phys_segments > max_phys_segs)
1491 break;
1492
1493 list_add_tail(&next->queuelist, &mqrq->packed->list);
1494 cur = next;
1495 reqs++;
1496 } while (1);
1497
1498 if (put_back) {
1499 spin_lock_irq(q->queue_lock);
1500 blk_requeue_request(q, next);
1501 spin_unlock_irq(q->queue_lock);
1502 }
1503
1504 if (reqs > 0) {
1505 list_add(&req->queuelist, &mqrq->packed->list);
1506 mqrq->packed->nr_entries = ++reqs;
1507 mqrq->packed->retries = reqs;
1508 return reqs;
1509 }
1510
1511no_packed:
1512 mqrq->cmd_type = MMC_PACKED_NONE;
1513 return 0;
1514}
1515
1516static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1517 struct mmc_card *card,
1518 struct mmc_queue *mq)
1519{
1520 struct mmc_blk_request *brq = &mqrq->brq;
1521 struct request *req = mqrq->req;
1522 struct request *prq;
1523 struct mmc_blk_data *md = mq->data;
1524 struct mmc_packed *packed = mqrq->packed;
1525 bool do_rel_wr, do_data_tag;
1526 u32 *packed_cmd_hdr;
1527 u8 hdr_blocks;
1528 u8 i = 1;
1529
1530 BUG_ON(!packed);
1531
1532 mqrq->cmd_type = MMC_PACKED_WRITE;
1533 packed->blocks = 0;
1534 packed->idx_failure = MMC_PACKED_NR_IDX;
1535
1536 packed_cmd_hdr = packed->cmd_hdr;
1537 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1538 packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1539 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1540 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1541
1542 /*
1543 * Argument for each entry of packed group
1544 */
1545 list_for_each_entry(prq, &packed->list, queuelist) {
1546 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1547 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1548 (prq->cmd_flags & REQ_META) &&
1549 (rq_data_dir(prq) == WRITE) &&
1550 ((brq->data.blocks * brq->data.blksz) >=
1551 card->ext_csd.data_tag_unit_size);
1552 /* Argument of CMD23 */
1553 packed_cmd_hdr[(i * 2)] =
1554 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1555 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1556 blk_rq_sectors(prq);
1557 /* Argument of CMD18 or CMD25 */
1558 packed_cmd_hdr[((i * 2)) + 1] =
1559 mmc_card_blockaddr(card) ?
1560 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1561 packed->blocks += blk_rq_sectors(prq);
1562 i++;
1563 }
1564
1565 memset(brq, 0, sizeof(struct mmc_blk_request));
1566 brq->mrq.cmd = &brq->cmd;
1567 brq->mrq.data = &brq->data;
1568 brq->mrq.sbc = &brq->sbc;
1569 brq->mrq.stop = &brq->stop;
1570
1571 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1572 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1573 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1574
1575 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1576 brq->cmd.arg = blk_rq_pos(req);
1577 if (!mmc_card_blockaddr(card))
1578 brq->cmd.arg <<= 9;
1579 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1580
1581 brq->data.blksz = 512;
1582 brq->data.blocks = packed->blocks + hdr_blocks;
1583 brq->data.flags |= MMC_DATA_WRITE;
1584
1585 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1586 brq->stop.arg = 0;
1587 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1588
1589 mmc_set_data_timeout(&brq->data, card);
1590
1591 brq->data.sg = mqrq->sg;
1592 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1593
1594 mqrq->mmc_active.mrq = &brq->mrq;
1595 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1596
1597 mmc_queue_bounce_pre(mqrq);
1598}
1599
1311static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1600static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1312 struct mmc_blk_request *brq, struct request *req, 1601 struct mmc_blk_request *brq, struct request *req,
1313 int ret) 1602 int ret)
1314{ 1603{
1604 struct mmc_queue_req *mq_rq;
1605 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1606
1315 /* 1607 /*
1316 * If this is an SD card and we're writing, we can first 1608 * If this is an SD card and we're writing, we can first
1317 * mark the known good sectors as ok. 1609 * mark the known good sectors as ok.
@@ -1328,11 +1620,84 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1328 ret = blk_end_request(req, 0, blocks << 9); 1620 ret = blk_end_request(req, 0, blocks << 9);
1329 } 1621 }
1330 } else { 1622 } else {
1331 ret = blk_end_request(req, 0, brq->data.bytes_xfered); 1623 if (!mmc_packed_cmd(mq_rq->cmd_type))
1624 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1332 } 1625 }
1333 return ret; 1626 return ret;
1334} 1627}
1335 1628
1629static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1630{
1631 struct request *prq;
1632 struct mmc_packed *packed = mq_rq->packed;
1633 int idx = packed->idx_failure, i = 0;
1634 int ret = 0;
1635
1636 BUG_ON(!packed);
1637
1638 while (!list_empty(&packed->list)) {
1639 prq = list_entry_rq(packed->list.next);
1640 if (idx == i) {
1641 /* retry from error index */
1642 packed->nr_entries -= idx;
1643 mq_rq->req = prq;
1644 ret = 1;
1645
1646 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1647 list_del_init(&prq->queuelist);
1648 mmc_blk_clear_packed(mq_rq);
1649 }
1650 return ret;
1651 }
1652 list_del_init(&prq->queuelist);
1653 blk_end_request(prq, 0, blk_rq_bytes(prq));
1654 i++;
1655 }
1656
1657 mmc_blk_clear_packed(mq_rq);
1658 return ret;
1659}
1660
1661static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1662{
1663 struct request *prq;
1664 struct mmc_packed *packed = mq_rq->packed;
1665
1666 BUG_ON(!packed);
1667
1668 while (!list_empty(&packed->list)) {
1669 prq = list_entry_rq(packed->list.next);
1670 list_del_init(&prq->queuelist);
1671 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1672 }
1673
1674 mmc_blk_clear_packed(mq_rq);
1675}
1676
1677static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1678 struct mmc_queue_req *mq_rq)
1679{
1680 struct request *prq;
1681 struct request_queue *q = mq->queue;
1682 struct mmc_packed *packed = mq_rq->packed;
1683
1684 BUG_ON(!packed);
1685
1686 while (!list_empty(&packed->list)) {
1687 prq = list_entry_rq(packed->list.prev);
1688 if (prq->queuelist.prev != &packed->list) {
1689 list_del_init(&prq->queuelist);
1690 spin_lock_irq(q->queue_lock);
1691 blk_requeue_request(mq->queue, prq);
1692 spin_unlock_irq(q->queue_lock);
1693 } else {
1694 list_del_init(&prq->queuelist);
1695 }
1696 }
1697
1698 mmc_blk_clear_packed(mq_rq);
1699}
1700
1336static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) 1701static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1337{ 1702{
1338 struct mmc_blk_data *md = mq->data; 1703 struct mmc_blk_data *md = mq->data;
@@ -1343,10 +1708,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1343 struct mmc_queue_req *mq_rq; 1708 struct mmc_queue_req *mq_rq;
1344 struct request *req = rqc; 1709 struct request *req = rqc;
1345 struct mmc_async_req *areq; 1710 struct mmc_async_req *areq;
1711 const u8 packed_nr = 2;
1712 u8 reqs = 0;
1346 1713
1347 if (!rqc && !mq->mqrq_prev->req) 1714 if (!rqc && !mq->mqrq_prev->req)
1348 return 0; 1715 return 0;
1349 1716
1717 if (rqc)
1718 reqs = mmc_blk_prep_packed_list(mq, rqc);
1719
1350 do { 1720 do {
1351 if (rqc) { 1721 if (rqc) {
1352 /* 1722 /*
@@ -1357,15 +1727,24 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1357 (card->ext_csd.data_sector_size == 4096)) { 1727 (card->ext_csd.data_sector_size == 4096)) {
1358 pr_err("%s: Transfer size is not 4KB sector size aligned\n", 1728 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1359 req->rq_disk->disk_name); 1729 req->rq_disk->disk_name);
1730 mq_rq = mq->mqrq_cur;
1360 goto cmd_abort; 1731 goto cmd_abort;
1361 } 1732 }
1362 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1733
1734 if (reqs >= packed_nr)
1735 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1736 card, mq);
1737 else
1738 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1363 areq = &mq->mqrq_cur->mmc_active; 1739 areq = &mq->mqrq_cur->mmc_active;
1364 } else 1740 } else
1365 areq = NULL; 1741 areq = NULL;
1366 areq = mmc_start_req(card->host, areq, (int *) &status); 1742 areq = mmc_start_req(card->host, areq, (int *) &status);
1367 if (!areq) 1743 if (!areq) {
1744 if (status == MMC_BLK_NEW_REQUEST)
1745 mq->flags |= MMC_QUEUE_NEW_REQUEST;
1368 return 0; 1746 return 0;
1747 }
1369 1748
1370 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); 1749 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1371 brq = &mq_rq->brq; 1750 brq = &mq_rq->brq;
@@ -1380,8 +1759,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1380 * A block was successfully transferred. 1759 * A block was successfully transferred.
1381 */ 1760 */
1382 mmc_blk_reset_success(md, type); 1761 mmc_blk_reset_success(md, type);
1383 ret = blk_end_request(req, 0, 1762
1763 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1764 ret = mmc_blk_end_packed_req(mq_rq);
1765 break;
1766 } else {
1767 ret = blk_end_request(req, 0,
1384 brq->data.bytes_xfered); 1768 brq->data.bytes_xfered);
1769 }
1770
1385 /* 1771 /*
1386 * If the blk_end_request function returns non-zero even 1772 * If the blk_end_request function returns non-zero even
1387 * though all data has been transferred and no errors 1773 * though all data has been transferred and no errors
@@ -1414,7 +1800,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1414 err = mmc_blk_reset(md, card->host, type); 1800 err = mmc_blk_reset(md, card->host, type);
1415 if (!err) 1801 if (!err)
1416 break; 1802 break;
1417 if (err == -ENODEV) 1803 if (err == -ENODEV ||
1804 mmc_packed_cmd(mq_rq->cmd_type))
1418 goto cmd_abort; 1805 goto cmd_abort;
1419 /* Fall through */ 1806 /* Fall through */
1420 } 1807 }
@@ -1438,30 +1825,62 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1438 break; 1825 break;
1439 case MMC_BLK_NOMEDIUM: 1826 case MMC_BLK_NOMEDIUM:
1440 goto cmd_abort; 1827 goto cmd_abort;
1828 default:
1829 pr_err("%s: Unhandled return value (%d)",
1830 req->rq_disk->disk_name, status);
1831 goto cmd_abort;
1441 } 1832 }
1442 1833
1443 if (ret) { 1834 if (ret) {
1444 /* 1835 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1445 * In case of a incomplete request 1836 if (!mq_rq->packed->retries)
1446 * prepare it again and resend. 1837 goto cmd_abort;
1447 */ 1838 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1448 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); 1839 mmc_start_req(card->host,
1449 mmc_start_req(card->host, &mq_rq->mmc_active, NULL); 1840 &mq_rq->mmc_active, NULL);
1841 } else {
1842
1843 /*
1844 * In case of a incomplete request
1845 * prepare it again and resend.
1846 */
1847 mmc_blk_rw_rq_prep(mq_rq, card,
1848 disable_multi, mq);
1849 mmc_start_req(card->host,
1850 &mq_rq->mmc_active, NULL);
1851 }
1450 } 1852 }
1451 } while (ret); 1853 } while (ret);
1452 1854
1453 return 1; 1855 return 1;
1454 1856
1455 cmd_abort: 1857 cmd_abort:
1456 if (mmc_card_removed(card)) 1858 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1457 req->cmd_flags |= REQ_QUIET; 1859 mmc_blk_abort_packed_req(mq_rq);
1458 while (ret) 1860 } else {
1459 ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 1861 if (mmc_card_removed(card))
1862 req->cmd_flags |= REQ_QUIET;
1863 while (ret)
1864 ret = blk_end_request(req, -EIO,
1865 blk_rq_cur_bytes(req));
1866 }
1460 1867
1461 start_new_req: 1868 start_new_req:
1462 if (rqc) { 1869 if (rqc) {
1463 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1870 if (mmc_card_removed(card)) {
1464 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); 1871 rqc->cmd_flags |= REQ_QUIET;
1872 blk_end_request_all(rqc, -EIO);
1873 } else {
1874 /*
1875 * If current request is packed, it needs to put back.
1876 */
1877 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
1878 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
1879
1880 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1881 mmc_start_req(card->host,
1882 &mq->mqrq_cur->mmc_active, NULL);
1883 }
1465 } 1884 }
1466 1885
1467 return 0; 1886 return 0;
@@ -1472,6 +1891,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1472 int ret; 1891 int ret;
1473 struct mmc_blk_data *md = mq->data; 1892 struct mmc_blk_data *md = mq->data;
1474 struct mmc_card *card = md->queue.card; 1893 struct mmc_card *card = md->queue.card;
1894 struct mmc_host *host = card->host;
1895 unsigned long flags;
1475 1896
1476 if (req && !mq->mqrq_prev->req) 1897 if (req && !mq->mqrq_prev->req)
1477 /* claim host only for the first request */ 1898 /* claim host only for the first request */
@@ -1486,6 +1907,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1486 goto out; 1907 goto out;
1487 } 1908 }
1488 1909
1910 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
1489 if (req && req->cmd_flags & REQ_DISCARD) { 1911 if (req && req->cmd_flags & REQ_DISCARD) {
1490 /* complete ongoing async transfer before issuing discard */ 1912 /* complete ongoing async transfer before issuing discard */
1491 if (card->host->areq) 1913 if (card->host->areq)
@@ -1501,11 +1923,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1501 mmc_blk_issue_rw_rq(mq, NULL); 1923 mmc_blk_issue_rw_rq(mq, NULL);
1502 ret = mmc_blk_issue_flush(mq, req); 1924 ret = mmc_blk_issue_flush(mq, req);
1503 } else { 1925 } else {
1926 if (!req && host->areq) {
1927 spin_lock_irqsave(&host->context_info.lock, flags);
1928 host->context_info.is_waiting_last_req = true;
1929 spin_unlock_irqrestore(&host->context_info.lock, flags);
1930 }
1504 ret = mmc_blk_issue_rw_rq(mq, req); 1931 ret = mmc_blk_issue_rw_rq(mq, req);
1505 } 1932 }
1506 1933
1507out: 1934out:
1508 if (!req) 1935 if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST))
1509 /* release host only when there are no more requests */ 1936 /* release host only when there are no more requests */
1510 mmc_release_host(card->host); 1937 mmc_release_host(card->host);
1511 return ret; 1938 return ret;
@@ -1624,6 +2051,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1624 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); 2051 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1625 } 2052 }
1626 2053
2054 if (mmc_card_mmc(card) &&
2055 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2056 (md->flags & MMC_BLK_CMD23) &&
2057 card->ext_csd.packed_event_en) {
2058 if (!mmc_packed_init(&md->queue, card))
2059 md->flags |= MMC_BLK_PACKED_CMD;
2060 }
2061
1627 return md; 2062 return md;
1628 2063
1629 err_putdisk: 2064 err_putdisk:
@@ -1732,6 +2167,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
1732 2167
1733 /* Then flush out any already in there */ 2168 /* Then flush out any already in there */
1734 mmc_cleanup_queue(&md->queue); 2169 mmc_cleanup_queue(&md->queue);
2170 if (md->flags & MMC_BLK_PACKED_CMD)
2171 mmc_packed_clean(&md->queue);
1735 mmc_blk_put(md); 2172 mmc_blk_put(md);
1736 } 2173 }
1737} 2174}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fadf52eb5d70..fa4e44ee7961 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -22,7 +22,8 @@
22 22
23#define MMC_QUEUE_BOUNCESZ 65536 23#define MMC_QUEUE_BOUNCESZ 65536
24 24
25#define MMC_QUEUE_SUSPENDED (1 << 0) 25
26#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
26 27
27/* 28/*
28 * Prepare a MMC request. This just filters out odd stuff. 29 * Prepare a MMC request. This just filters out odd stuff.
@@ -58,6 +59,7 @@ static int mmc_queue_thread(void *d)
58 do { 59 do {
59 struct request *req = NULL; 60 struct request *req = NULL;
60 struct mmc_queue_req *tmp; 61 struct mmc_queue_req *tmp;
62 unsigned int cmd_flags = 0;
61 63
62 spin_lock_irq(q->queue_lock); 64 spin_lock_irq(q->queue_lock);
63 set_current_state(TASK_INTERRUPTIBLE); 65 set_current_state(TASK_INTERRUPTIBLE);
@@ -67,12 +69,23 @@ static int mmc_queue_thread(void *d)
67 69
68 if (req || mq->mqrq_prev->req) { 70 if (req || mq->mqrq_prev->req) {
69 set_current_state(TASK_RUNNING); 71 set_current_state(TASK_RUNNING);
72 cmd_flags = req ? req->cmd_flags : 0;
70 mq->issue_fn(mq, req); 73 mq->issue_fn(mq, req);
74 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
75 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
76 continue; /* fetch again */
77 }
71 78
72 /* 79 /*
73 * Current request becomes previous request 80 * Current request becomes previous request
74 * and vice versa. 81 * and vice versa.
82 * In case of special requests, current request
83 * has been finished. Do not assign it to previous
84 * request.
75 */ 85 */
86 if (cmd_flags & MMC_REQ_SPECIAL_MASK)
87 mq->mqrq_cur->req = NULL;
88
76 mq->mqrq_prev->brq.mrq.data = NULL; 89 mq->mqrq_prev->brq.mrq.data = NULL;
77 mq->mqrq_prev->req = NULL; 90 mq->mqrq_prev->req = NULL;
78 tmp = mq->mqrq_prev; 91 tmp = mq->mqrq_prev;
@@ -103,6 +116,8 @@ static void mmc_request_fn(struct request_queue *q)
103{ 116{
104 struct mmc_queue *mq = q->queuedata; 117 struct mmc_queue *mq = q->queuedata;
105 struct request *req; 118 struct request *req;
119 unsigned long flags;
120 struct mmc_context_info *cntx;
106 121
107 if (!mq) { 122 if (!mq) {
108 while ((req = blk_fetch_request(q)) != NULL) { 123 while ((req = blk_fetch_request(q)) != NULL) {
@@ -112,7 +127,20 @@ static void mmc_request_fn(struct request_queue *q)
112 return; 127 return;
113 } 128 }
114 129
115 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) 130 cntx = &mq->card->host->context_info;
131 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
132 /*
133 * New MMC request arrived when MMC thread may be
134 * blocked on the previous request to be complete
135 * with no current request fetched
136 */
137 spin_lock_irqsave(&cntx->lock, flags);
138 if (cntx->is_waiting_last_req) {
139 cntx->is_new_req = true;
140 wake_up_interruptible(&cntx->wait);
141 }
142 spin_unlock_irqrestore(&cntx->lock, flags);
143 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
116 wake_up_process(mq->thread); 144 wake_up_process(mq->thread);
117} 145}
118 146
@@ -334,6 +362,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
334} 362}
335EXPORT_SYMBOL(mmc_cleanup_queue); 363EXPORT_SYMBOL(mmc_cleanup_queue);
336 364
365int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
366{
367 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
368 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
369 int ret = 0;
370
371
372 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
373 if (!mqrq_cur->packed) {
374 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
375 mmc_card_name(card));
376 ret = -ENOMEM;
377 goto out;
378 }
379
380 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
381 if (!mqrq_prev->packed) {
382 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
383 mmc_card_name(card));
384 kfree(mqrq_cur->packed);
385 mqrq_cur->packed = NULL;
386 ret = -ENOMEM;
387 goto out;
388 }
389
390 INIT_LIST_HEAD(&mqrq_cur->packed->list);
391 INIT_LIST_HEAD(&mqrq_prev->packed->list);
392
393out:
394 return ret;
395}
396
397void mmc_packed_clean(struct mmc_queue *mq)
398{
399 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
400 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
401
402 kfree(mqrq_cur->packed);
403 mqrq_cur->packed = NULL;
404 kfree(mqrq_prev->packed);
405 mqrq_prev->packed = NULL;
406}
407
337/** 408/**
338 * mmc_queue_suspend - suspend a MMC request queue 409 * mmc_queue_suspend - suspend a MMC request queue
339 * @mq: MMC queue to suspend 410 * @mq: MMC queue to suspend
@@ -378,6 +449,41 @@ void mmc_queue_resume(struct mmc_queue *mq)
378 } 449 }
379} 450}
380 451
452static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
453 struct mmc_packed *packed,
454 struct scatterlist *sg,
455 enum mmc_packed_type cmd_type)
456{
457 struct scatterlist *__sg = sg;
458 unsigned int sg_len = 0;
459 struct request *req;
460
461 if (mmc_packed_wr(cmd_type)) {
462 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
463 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
464 unsigned int len, remain, offset = 0;
465 u8 *buf = (u8 *)packed->cmd_hdr;
466
467 remain = hdr_sz;
468 do {
469 len = min(remain, max_seg_sz);
470 sg_set_buf(__sg, buf + offset, len);
471 offset += len;
472 remain -= len;
473 (__sg++)->page_link &= ~0x02;
474 sg_len++;
475 } while (remain);
476 }
477
478 list_for_each_entry(req, &packed->list, queuelist) {
479 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
480 __sg = sg + (sg_len - 1);
481 (__sg++)->page_link &= ~0x02;
482 }
483 sg_mark_end(sg + (sg_len - 1));
484 return sg_len;
485}
486
381/* 487/*
382 * Prepare the sg list(s) to be handed of to the host driver 488 * Prepare the sg list(s) to be handed of to the host driver
383 */ 489 */
@@ -386,14 +492,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
386 unsigned int sg_len; 492 unsigned int sg_len;
387 size_t buflen; 493 size_t buflen;
388 struct scatterlist *sg; 494 struct scatterlist *sg;
495 enum mmc_packed_type cmd_type;
389 int i; 496 int i;
390 497
391 if (!mqrq->bounce_buf) 498 cmd_type = mqrq->cmd_type;
392 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); 499
500 if (!mqrq->bounce_buf) {
501 if (mmc_packed_cmd(cmd_type))
502 return mmc_queue_packed_map_sg(mq, mqrq->packed,
503 mqrq->sg, cmd_type);
504 else
505 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
506 }
393 507
394 BUG_ON(!mqrq->bounce_sg); 508 BUG_ON(!mqrq->bounce_sg);
395 509
396 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); 510 if (mmc_packed_cmd(cmd_type))
511 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
512 mqrq->bounce_sg, cmd_type);
513 else
514 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
397 515
398 mqrq->bounce_sg_len = sg_len; 516 mqrq->bounce_sg_len = sg_len;
399 517
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d2a1eb4b9f9f..031bf6376c99 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -12,6 +12,23 @@ struct mmc_blk_request {
12 struct mmc_data data; 12 struct mmc_data data;
13}; 13};
14 14
15enum mmc_packed_type {
16 MMC_PACKED_NONE = 0,
17 MMC_PACKED_WRITE,
18};
19
20#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
21#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
22
23struct mmc_packed {
24 struct list_head list;
25 u32 cmd_hdr[1024];
26 unsigned int blocks;
27 u8 nr_entries;
28 u8 retries;
29 s16 idx_failure;
30};
31
15struct mmc_queue_req { 32struct mmc_queue_req {
16 struct request *req; 33 struct request *req;
17 struct mmc_blk_request brq; 34 struct mmc_blk_request brq;
@@ -20,6 +37,8 @@ struct mmc_queue_req {
20 struct scatterlist *bounce_sg; 37 struct scatterlist *bounce_sg;
21 unsigned int bounce_sg_len; 38 unsigned int bounce_sg_len;
22 struct mmc_async_req mmc_active; 39 struct mmc_async_req mmc_active;
40 enum mmc_packed_type cmd_type;
41 struct mmc_packed *packed;
23}; 42};
24 43
25struct mmc_queue { 44struct mmc_queue {
@@ -27,6 +46,9 @@ struct mmc_queue {
27 struct task_struct *thread; 46 struct task_struct *thread;
28 struct semaphore thread_sem; 47 struct semaphore thread_sem;
29 unsigned int flags; 48 unsigned int flags;
49#define MMC_QUEUE_SUSPENDED (1 << 0)
50#define MMC_QUEUE_NEW_REQUEST (1 << 1)
51
30 int (*issue_fn)(struct mmc_queue *, struct request *); 52 int (*issue_fn)(struct mmc_queue *, struct request *);
31 void *data; 53 void *data;
32 struct request_queue *queue; 54 struct request_queue *queue;
@@ -46,4 +68,7 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
46extern void mmc_queue_bounce_pre(struct mmc_queue_req *); 68extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
47extern void mmc_queue_bounce_post(struct mmc_queue_req *); 69extern void mmc_queue_bounce_post(struct mmc_queue_req *);
48 70
71extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
72extern void mmc_packed_clean(struct mmc_queue *);
73
49#endif 74#endif