aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorSeungwon Jeon <tgih.jun@samsung.com>2013-02-06 03:02:46 -0500
committerChris Ball <cjb@laptop.org>2013-02-24 14:37:16 -0500
commitce39f9d17c14e56ea6772aa84393e6e0cc8499c4 (patch)
tree7b641a7f89614e3cc3f6ec2f81ee32f64ead4f0d /drivers/mmc
parentabd9ac144947d9a604beb763339e2f77ce8bec79 (diff)
mmc: support packed write command for eMMC4.5 devices
This patch supports packed write command of eMMC4.5 devices. Several writes can be grouped in packed command and all data of the individual commands can be sent in a single transfer on the bus. Large amounts of data in one transfer rather than several data of small size are effective for eMMC write internally. As a result, packed command help write throughput be improved. The following tables show the results of packed write. Type A: test none | packed iozone 25.8 | 31 tiotest 27.6 | 31.2 lmdd 31.2 | 35.4 Type B: test none | packed iozone 44.1 | 51.1 tiotest 47.9 | 52.5 lmdd 51.6 | 59.2 Type C: test none | packed iozone 19.5 | 32 tiotest 19.9 | 34.5 lmdd 22.8 | 40.7 Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Reviewed-by: Maya Erez <merez@codeaurora.org> Reviewed-by: Namjae Jeon <linkinjeon@gmail.com> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/block.c455
-rw-r--r--drivers/mmc/card/queue.c96
-rw-r--r--drivers/mmc/card/queue.h22
-rw-r--r--drivers/mmc/core/mmc_ops.c1
4 files changed, 557 insertions, 17 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1170afe1a596..5bab73b91c20 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,6 +59,12 @@ MODULE_ALIAS("mmc:block");
59#define INAND_CMD38_ARG_SECTRIM2 0x88 59#define INAND_CMD38_ARG_SECTRIM2 0x88
60#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 60#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
61 61
62#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
63 (req->cmd_flags & REQ_META)) && \
64 (rq_data_dir(req) == WRITE))
65#define PACKED_CMD_VER 0x01
66#define PACKED_CMD_WR 0x02
67
62static DEFINE_MUTEX(block_mutex); 68static DEFINE_MUTEX(block_mutex);
63 69
64/* 70/*
@@ -89,6 +95,7 @@ struct mmc_blk_data {
89 unsigned int flags; 95 unsigned int flags;
90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 96#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 97#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
98#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
92 99
93 unsigned int usage; 100 unsigned int usage;
94 unsigned int read_only; 101 unsigned int read_only;
@@ -113,6 +120,12 @@ struct mmc_blk_data {
113 120
114static DEFINE_MUTEX(open_lock); 121static DEFINE_MUTEX(open_lock);
115 122
123enum {
124 MMC_PACKED_NR_IDX = -1,
125 MMC_PACKED_NR_ZERO,
126 MMC_PACKED_NR_SINGLE,
127};
128
116module_param(perdev_minors, int, 0444); 129module_param(perdev_minors, int, 0444);
117MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 130MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
118 131
@@ -120,6 +133,19 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
120 struct mmc_blk_data *md); 133 struct mmc_blk_data *md);
121static int get_card_status(struct mmc_card *card, u32 *status, int retries); 134static int get_card_status(struct mmc_card *card, u32 *status, int retries);
122 135
136static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
137{
138 struct mmc_packed *packed = mqrq->packed;
139
140 BUG_ON(!packed);
141
142 mqrq->cmd_type = MMC_PACKED_NONE;
143 packed->nr_entries = MMC_PACKED_NR_ZERO;
144 packed->idx_failure = MMC_PACKED_NR_IDX;
145 packed->retries = 0;
146 packed->blocks = 0;
147}
148
123static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 149static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
124{ 150{
125 struct mmc_blk_data *md; 151 struct mmc_blk_data *md;
@@ -1137,12 +1163,78 @@ static int mmc_blk_err_check(struct mmc_card *card,
1137 if (!brq->data.bytes_xfered) 1163 if (!brq->data.bytes_xfered)
1138 return MMC_BLK_RETRY; 1164 return MMC_BLK_RETRY;
1139 1165
1166 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1167 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1168 return MMC_BLK_PARTIAL;
1169 else
1170 return MMC_BLK_SUCCESS;
1171 }
1172
1140 if (blk_rq_bytes(req) != brq->data.bytes_xfered) 1173 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1141 return MMC_BLK_PARTIAL; 1174 return MMC_BLK_PARTIAL;
1142 1175
1143 return MMC_BLK_SUCCESS; 1176 return MMC_BLK_SUCCESS;
1144} 1177}
1145 1178
1179static int mmc_blk_packed_err_check(struct mmc_card *card,
1180 struct mmc_async_req *areq)
1181{
1182 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1183 mmc_active);
1184 struct request *req = mq_rq->req;
1185 struct mmc_packed *packed = mq_rq->packed;
1186 int err, check, status;
1187 u8 *ext_csd;
1188
1189 BUG_ON(!packed);
1190
1191 packed->retries--;
1192 check = mmc_blk_err_check(card, areq);
1193 err = get_card_status(card, &status, 0);
1194 if (err) {
1195 pr_err("%s: error %d sending status command\n",
1196 req->rq_disk->disk_name, err);
1197 return MMC_BLK_ABORT;
1198 }
1199
1200 if (status & R1_EXCEPTION_EVENT) {
1201 ext_csd = kzalloc(512, GFP_KERNEL);
1202 if (!ext_csd) {
1203 pr_err("%s: unable to allocate buffer for ext_csd\n",
1204 req->rq_disk->disk_name);
1205 return -ENOMEM;
1206 }
1207
1208 err = mmc_send_ext_csd(card, ext_csd);
1209 if (err) {
1210 pr_err("%s: error %d sending ext_csd\n",
1211 req->rq_disk->disk_name, err);
1212 check = MMC_BLK_ABORT;
1213 goto free;
1214 }
1215
1216 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1217 EXT_CSD_PACKED_FAILURE) &&
1218 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1219 EXT_CSD_PACKED_GENERIC_ERROR)) {
1220 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1221 EXT_CSD_PACKED_INDEXED_ERROR) {
1222 packed->idx_failure =
1223 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1224 check = MMC_BLK_PARTIAL;
1225 }
1226 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1227 "failure index: %d\n",
1228 req->rq_disk->disk_name, packed->nr_entries,
1229 packed->blocks, packed->idx_failure);
1230 }
1231free:
1232 kfree(ext_csd);
1233 }
1234
1235 return check;
1236}
1237
1146static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1238static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1147 struct mmc_card *card, 1239 struct mmc_card *card,
1148 int disable_multi, 1240 int disable_multi,
@@ -1297,10 +1389,221 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1297 mmc_queue_bounce_pre(mqrq); 1389 mmc_queue_bounce_pre(mqrq);
1298} 1390}
1299 1391
1392static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1393 struct mmc_card *card)
1394{
1395 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1396 unsigned int max_seg_sz = queue_max_segment_size(q);
1397 unsigned int len, nr_segs = 0;
1398
1399 do {
1400 len = min(hdr_sz, max_seg_sz);
1401 hdr_sz -= len;
1402 nr_segs++;
1403 } while (hdr_sz);
1404
1405 return nr_segs;
1406}
1407
1408static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1409{
1410 struct request_queue *q = mq->queue;
1411 struct mmc_card *card = mq->card;
1412 struct request *cur = req, *next = NULL;
1413 struct mmc_blk_data *md = mq->data;
1414 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1415 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1416 unsigned int req_sectors = 0, phys_segments = 0;
1417 unsigned int max_blk_count, max_phys_segs;
1418 bool put_back = true;
1419 u8 max_packed_rw = 0;
1420 u8 reqs = 0;
1421
1422 if (!(md->flags & MMC_BLK_PACKED_CMD))
1423 goto no_packed;
1424
1425 if ((rq_data_dir(cur) == WRITE) &&
1426 mmc_host_packed_wr(card->host))
1427 max_packed_rw = card->ext_csd.max_packed_writes;
1428
1429 if (max_packed_rw == 0)
1430 goto no_packed;
1431
1432 if (mmc_req_rel_wr(cur) &&
1433 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1434 goto no_packed;
1435
1436 if (mmc_large_sector(card) &&
1437 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1438 goto no_packed;
1439
1440 mmc_blk_clear_packed(mqrq);
1441
1442 max_blk_count = min(card->host->max_blk_count,
1443 card->host->max_req_size >> 9);
1444 if (unlikely(max_blk_count > 0xffff))
1445 max_blk_count = 0xffff;
1446
1447 max_phys_segs = queue_max_segments(q);
1448 req_sectors += blk_rq_sectors(cur);
1449 phys_segments += cur->nr_phys_segments;
1450
1451 if (rq_data_dir(cur) == WRITE) {
1452 req_sectors += mmc_large_sector(card) ? 8 : 1;
1453 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1454 }
1455
1456 do {
1457 if (reqs >= max_packed_rw - 1) {
1458 put_back = false;
1459 break;
1460 }
1461
1462 spin_lock_irq(q->queue_lock);
1463 next = blk_fetch_request(q);
1464 spin_unlock_irq(q->queue_lock);
1465 if (!next) {
1466 put_back = false;
1467 break;
1468 }
1469
1470 if (mmc_large_sector(card) &&
1471 !IS_ALIGNED(blk_rq_sectors(next), 8))
1472 break;
1473
1474 if (next->cmd_flags & REQ_DISCARD ||
1475 next->cmd_flags & REQ_FLUSH)
1476 break;
1477
1478 if (rq_data_dir(cur) != rq_data_dir(next))
1479 break;
1480
1481 if (mmc_req_rel_wr(next) &&
1482 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1483 break;
1484
1485 req_sectors += blk_rq_sectors(next);
1486 if (req_sectors > max_blk_count)
1487 break;
1488
1489 phys_segments += next->nr_phys_segments;
1490 if (phys_segments > max_phys_segs)
1491 break;
1492
1493 list_add_tail(&next->queuelist, &mqrq->packed->list);
1494 cur = next;
1495 reqs++;
1496 } while (1);
1497
1498 if (put_back) {
1499 spin_lock_irq(q->queue_lock);
1500 blk_requeue_request(q, next);
1501 spin_unlock_irq(q->queue_lock);
1502 }
1503
1504 if (reqs > 0) {
1505 list_add(&req->queuelist, &mqrq->packed->list);
1506 mqrq->packed->nr_entries = ++reqs;
1507 mqrq->packed->retries = reqs;
1508 return reqs;
1509 }
1510
1511no_packed:
1512 mqrq->cmd_type = MMC_PACKED_NONE;
1513 return 0;
1514}
1515
1516static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1517 struct mmc_card *card,
1518 struct mmc_queue *mq)
1519{
1520 struct mmc_blk_request *brq = &mqrq->brq;
1521 struct request *req = mqrq->req;
1522 struct request *prq;
1523 struct mmc_blk_data *md = mq->data;
1524 struct mmc_packed *packed = mqrq->packed;
1525 bool do_rel_wr, do_data_tag;
1526 u32 *packed_cmd_hdr;
1527 u8 hdr_blocks;
1528 u8 i = 1;
1529
1530 BUG_ON(!packed);
1531
1532 mqrq->cmd_type = MMC_PACKED_WRITE;
1533 packed->blocks = 0;
1534 packed->idx_failure = MMC_PACKED_NR_IDX;
1535
1536 packed_cmd_hdr = packed->cmd_hdr;
1537 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1538 packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1539 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1540 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1541
1542 /*
1543 * Argument for each entry of packed group
1544 */
1545 list_for_each_entry(prq, &packed->list, queuelist) {
1546 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1547 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1548 (prq->cmd_flags & REQ_META) &&
1549 (rq_data_dir(prq) == WRITE) &&
1550 ((brq->data.blocks * brq->data.blksz) >=
1551 card->ext_csd.data_tag_unit_size);
1552 /* Argument of CMD23 */
1553 packed_cmd_hdr[(i * 2)] =
1554 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1555 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1556 blk_rq_sectors(prq);
1557 /* Argument of CMD18 or CMD25 */
1558 packed_cmd_hdr[((i * 2)) + 1] =
1559 mmc_card_blockaddr(card) ?
1560 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1561 packed->blocks += blk_rq_sectors(prq);
1562 i++;
1563 }
1564
1565 memset(brq, 0, sizeof(struct mmc_blk_request));
1566 brq->mrq.cmd = &brq->cmd;
1567 brq->mrq.data = &brq->data;
1568 brq->mrq.sbc = &brq->sbc;
1569 brq->mrq.stop = &brq->stop;
1570
1571 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1572 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1573 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1574
1575 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1576 brq->cmd.arg = blk_rq_pos(req);
1577 if (!mmc_card_blockaddr(card))
1578 brq->cmd.arg <<= 9;
1579 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1580
1581 brq->data.blksz = 512;
1582 brq->data.blocks = packed->blocks + hdr_blocks;
1583 brq->data.flags |= MMC_DATA_WRITE;
1584
1585 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1586 brq->stop.arg = 0;
1587 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1588
1589 mmc_set_data_timeout(&brq->data, card);
1590
1591 brq->data.sg = mqrq->sg;
1592 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1593
1594 mqrq->mmc_active.mrq = &brq->mrq;
1595 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1596
1597 mmc_queue_bounce_pre(mqrq);
1598}
1599
1300static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1600static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1301 struct mmc_blk_request *brq, struct request *req, 1601 struct mmc_blk_request *brq, struct request *req,
1302 int ret) 1602 int ret)
1303{ 1603{
1604 struct mmc_queue_req *mq_rq;
1605 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1606
1304 /* 1607 /*
1305 * If this is an SD card and we're writing, we can first 1608 * If this is an SD card and we're writing, we can first
1306 * mark the known good sectors as ok. 1609 * mark the known good sectors as ok.
@@ -1317,11 +1620,84 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1317 ret = blk_end_request(req, 0, blocks << 9); 1620 ret = blk_end_request(req, 0, blocks << 9);
1318 } 1621 }
1319 } else { 1622 } else {
1320 ret = blk_end_request(req, 0, brq->data.bytes_xfered); 1623 if (!mmc_packed_cmd(mq_rq->cmd_type))
1624 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1321 } 1625 }
1322 return ret; 1626 return ret;
1323} 1627}
1324 1628
1629static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1630{
1631 struct request *prq;
1632 struct mmc_packed *packed = mq_rq->packed;
1633 int idx = packed->idx_failure, i = 0;
1634 int ret = 0;
1635
1636 BUG_ON(!packed);
1637
1638 while (!list_empty(&packed->list)) {
1639 prq = list_entry_rq(packed->list.next);
1640 if (idx == i) {
1641 /* retry from error index */
1642 packed->nr_entries -= idx;
1643 mq_rq->req = prq;
1644 ret = 1;
1645
1646 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1647 list_del_init(&prq->queuelist);
1648 mmc_blk_clear_packed(mq_rq);
1649 }
1650 return ret;
1651 }
1652 list_del_init(&prq->queuelist);
1653 blk_end_request(prq, 0, blk_rq_bytes(prq));
1654 i++;
1655 }
1656
1657 mmc_blk_clear_packed(mq_rq);
1658 return ret;
1659}
1660
1661static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1662{
1663 struct request *prq;
1664 struct mmc_packed *packed = mq_rq->packed;
1665
1666 BUG_ON(!packed);
1667
1668 while (!list_empty(&packed->list)) {
1669 prq = list_entry_rq(packed->list.next);
1670 list_del_init(&prq->queuelist);
1671 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1672 }
1673
1674 mmc_blk_clear_packed(mq_rq);
1675}
1676
1677static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1678 struct mmc_queue_req *mq_rq)
1679{
1680 struct request *prq;
1681 struct request_queue *q = mq->queue;
1682 struct mmc_packed *packed = mq_rq->packed;
1683
1684 BUG_ON(!packed);
1685
1686 while (!list_empty(&packed->list)) {
1687 prq = list_entry_rq(packed->list.prev);
1688 if (prq->queuelist.prev != &packed->list) {
1689 list_del_init(&prq->queuelist);
1690 spin_lock_irq(q->queue_lock);
1691 blk_requeue_request(mq->queue, prq);
1692 spin_unlock_irq(q->queue_lock);
1693 } else {
1694 list_del_init(&prq->queuelist);
1695 }
1696 }
1697
1698 mmc_blk_clear_packed(mq_rq);
1699}
1700
1325static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) 1701static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1326{ 1702{
1327 struct mmc_blk_data *md = mq->data; 1703 struct mmc_blk_data *md = mq->data;
@@ -1332,10 +1708,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1332 struct mmc_queue_req *mq_rq; 1708 struct mmc_queue_req *mq_rq;
1333 struct request *req = rqc; 1709 struct request *req = rqc;
1334 struct mmc_async_req *areq; 1710 struct mmc_async_req *areq;
1711 const u8 packed_nr = 2;
1712 u8 reqs = 0;
1335 1713
1336 if (!rqc && !mq->mqrq_prev->req) 1714 if (!rqc && !mq->mqrq_prev->req)
1337 return 0; 1715 return 0;
1338 1716
1717 if (rqc)
1718 reqs = mmc_blk_prep_packed_list(mq, rqc);
1719
1339 do { 1720 do {
1340 if (rqc) { 1721 if (rqc) {
1341 /* 1722 /*
@@ -1346,9 +1727,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1346 (card->ext_csd.data_sector_size == 4096)) { 1727 (card->ext_csd.data_sector_size == 4096)) {
1347 pr_err("%s: Transfer size is not 4KB sector size aligned\n", 1728 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1348 req->rq_disk->disk_name); 1729 req->rq_disk->disk_name);
1730 mq_rq = mq->mqrq_cur;
1349 goto cmd_abort; 1731 goto cmd_abort;
1350 } 1732 }
1351 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1733
1734 if (reqs >= packed_nr)
1735 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1736 card, mq);
1737 else
1738 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1352 areq = &mq->mqrq_cur->mmc_active; 1739 areq = &mq->mqrq_cur->mmc_active;
1353 } else 1740 } else
1354 areq = NULL; 1741 areq = NULL;
@@ -1372,8 +1759,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1372 * A block was successfully transferred. 1759 * A block was successfully transferred.
1373 */ 1760 */
1374 mmc_blk_reset_success(md, type); 1761 mmc_blk_reset_success(md, type);
1375 ret = blk_end_request(req, 0, 1762
1763 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1764 ret = mmc_blk_end_packed_req(mq_rq);
1765 break;
1766 } else {
1767 ret = blk_end_request(req, 0,
1376 brq->data.bytes_xfered); 1768 brq->data.bytes_xfered);
1769 }
1770
1377 /* 1771 /*
1378 * If the blk_end_request function returns non-zero even 1772 * If the blk_end_request function returns non-zero even
1379 * though all data has been transferred and no errors 1773 * though all data has been transferred and no errors
@@ -1406,7 +1800,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1406 err = mmc_blk_reset(md, card->host, type); 1800 err = mmc_blk_reset(md, card->host, type);
1407 if (!err) 1801 if (!err)
1408 break; 1802 break;
1409 if (err == -ENODEV) 1803 if (err == -ENODEV ||
1804 mmc_packed_cmd(mq_rq->cmd_type))
1410 goto cmd_abort; 1805 goto cmd_abort;
1411 /* Fall through */ 1806 /* Fall through */
1412 } 1807 }
@@ -1437,22 +1832,38 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1437 } 1832 }
1438 1833
1439 if (ret) { 1834 if (ret) {
1440 /* 1835 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1441 * In case of a incomplete request 1836 if (!mq_rq->packed->retries)
1442 * prepare it again and resend. 1837 goto cmd_abort;
1443 */ 1838 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1444 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); 1839 mmc_start_req(card->host,
1445 mmc_start_req(card->host, &mq_rq->mmc_active, NULL); 1840 &mq_rq->mmc_active, NULL);
1841 } else {
1842
1843 /*
1844 * In case of a incomplete request
1845 * prepare it again and resend.
1846 */
1847 mmc_blk_rw_rq_prep(mq_rq, card,
1848 disable_multi, mq);
1849 mmc_start_req(card->host,
1850 &mq_rq->mmc_active, NULL);
1851 }
1446 } 1852 }
1447 } while (ret); 1853 } while (ret);
1448 1854
1449 return 1; 1855 return 1;
1450 1856
1451 cmd_abort: 1857 cmd_abort:
1452 if (mmc_card_removed(card)) 1858 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1453 req->cmd_flags |= REQ_QUIET; 1859 mmc_blk_abort_packed_req(mq_rq);
1454 while (ret) 1860 } else {
1455 ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 1861 if (mmc_card_removed(card))
1862 req->cmd_flags |= REQ_QUIET;
1863 while (ret)
1864 ret = blk_end_request(req, -EIO,
1865 blk_rq_cur_bytes(req));
1866 }
1456 1867
1457 start_new_req: 1868 start_new_req:
1458 if (rqc) { 1869 if (rqc) {
@@ -1460,6 +1871,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1460 rqc->cmd_flags |= REQ_QUIET; 1871 rqc->cmd_flags |= REQ_QUIET;
1461 blk_end_request_all(rqc, -EIO); 1872 blk_end_request_all(rqc, -EIO);
1462 } else { 1873 } else {
1874 /*
1875 * If current request is packed, it needs to put back.
1876 */
1877 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
1878 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
1879
1463 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1880 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1464 mmc_start_req(card->host, 1881 mmc_start_req(card->host,
1465 &mq->mqrq_cur->mmc_active, NULL); 1882 &mq->mqrq_cur->mmc_active, NULL);
@@ -1634,6 +2051,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1634 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); 2051 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1635 } 2052 }
1636 2053
2054 if (mmc_card_mmc(card) &&
2055 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2056 (md->flags & MMC_BLK_CMD23) &&
2057 card->ext_csd.packed_event_en) {
2058 if (!mmc_packed_init(&md->queue, card))
2059 md->flags |= MMC_BLK_PACKED_CMD;
2060 }
2061
1637 return md; 2062 return md;
1638 2063
1639 err_putdisk: 2064 err_putdisk:
@@ -1742,6 +2167,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
1742 2167
1743 /* Then flush out any already in there */ 2168 /* Then flush out any already in there */
1744 mmc_cleanup_queue(&md->queue); 2169 mmc_cleanup_queue(&md->queue);
2170 if (md->flags & MMC_BLK_PACKED_CMD)
2171 mmc_packed_clean(&md->queue);
1745 mmc_blk_put(md); 2172 mmc_blk_put(md);
1746 } 2173 }
1747} 2174}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 5e0971016ac5..fa4e44ee7961 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -362,6 +362,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
362} 362}
363EXPORT_SYMBOL(mmc_cleanup_queue); 363EXPORT_SYMBOL(mmc_cleanup_queue);
364 364
365int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
366{
367 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
368 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
369 int ret = 0;
370
371
372 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
373 if (!mqrq_cur->packed) {
374 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
375 mmc_card_name(card));
376 ret = -ENOMEM;
377 goto out;
378 }
379
380 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
381 if (!mqrq_prev->packed) {
382 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
383 mmc_card_name(card));
384 kfree(mqrq_cur->packed);
385 mqrq_cur->packed = NULL;
386 ret = -ENOMEM;
387 goto out;
388 }
389
390 INIT_LIST_HEAD(&mqrq_cur->packed->list);
391 INIT_LIST_HEAD(&mqrq_prev->packed->list);
392
393out:
394 return ret;
395}
396
397void mmc_packed_clean(struct mmc_queue *mq)
398{
399 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
400 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
401
402 kfree(mqrq_cur->packed);
403 mqrq_cur->packed = NULL;
404 kfree(mqrq_prev->packed);
405 mqrq_prev->packed = NULL;
406}
407
365/** 408/**
366 * mmc_queue_suspend - suspend a MMC request queue 409 * mmc_queue_suspend - suspend a MMC request queue
367 * @mq: MMC queue to suspend 410 * @mq: MMC queue to suspend
@@ -406,6 +449,41 @@ void mmc_queue_resume(struct mmc_queue *mq)
406 } 449 }
407} 450}
408 451
452static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
453 struct mmc_packed *packed,
454 struct scatterlist *sg,
455 enum mmc_packed_type cmd_type)
456{
457 struct scatterlist *__sg = sg;
458 unsigned int sg_len = 0;
459 struct request *req;
460
461 if (mmc_packed_wr(cmd_type)) {
462 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
463 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
464 unsigned int len, remain, offset = 0;
465 u8 *buf = (u8 *)packed->cmd_hdr;
466
467 remain = hdr_sz;
468 do {
469 len = min(remain, max_seg_sz);
470 sg_set_buf(__sg, buf + offset, len);
471 offset += len;
472 remain -= len;
473 (__sg++)->page_link &= ~0x02;
474 sg_len++;
475 } while (remain);
476 }
477
478 list_for_each_entry(req, &packed->list, queuelist) {
479 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
480 __sg = sg + (sg_len - 1);
481 (__sg++)->page_link &= ~0x02;
482 }
483 sg_mark_end(sg + (sg_len - 1));
484 return sg_len;
485}
486
409/* 487/*
410 * Prepare the sg list(s) to be handed of to the host driver 488 * Prepare the sg list(s) to be handed of to the host driver
411 */ 489 */
@@ -414,14 +492,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
414 unsigned int sg_len; 492 unsigned int sg_len;
415 size_t buflen; 493 size_t buflen;
416 struct scatterlist *sg; 494 struct scatterlist *sg;
495 enum mmc_packed_type cmd_type;
417 int i; 496 int i;
418 497
419 if (!mqrq->bounce_buf) 498 cmd_type = mqrq->cmd_type;
420 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); 499
500 if (!mqrq->bounce_buf) {
501 if (mmc_packed_cmd(cmd_type))
502 return mmc_queue_packed_map_sg(mq, mqrq->packed,
503 mqrq->sg, cmd_type);
504 else
505 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
506 }
421 507
422 BUG_ON(!mqrq->bounce_sg); 508 BUG_ON(!mqrq->bounce_sg);
423 509
424 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); 510 if (mmc_packed_cmd(cmd_type))
511 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
512 mqrq->bounce_sg, cmd_type);
513 else
514 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
425 515
426 mqrq->bounce_sg_len = sg_len; 516 mqrq->bounce_sg_len = sg_len;
427 517
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index e20c27b2b8b4..031bf6376c99 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -12,6 +12,23 @@ struct mmc_blk_request {
12 struct mmc_data data; 12 struct mmc_data data;
13}; 13};
14 14
15enum mmc_packed_type {
16 MMC_PACKED_NONE = 0,
17 MMC_PACKED_WRITE,
18};
19
20#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
21#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
22
23struct mmc_packed {
24 struct list_head list;
25 u32 cmd_hdr[1024];
26 unsigned int blocks;
27 u8 nr_entries;
28 u8 retries;
29 s16 idx_failure;
30};
31
15struct mmc_queue_req { 32struct mmc_queue_req {
16 struct request *req; 33 struct request *req;
17 struct mmc_blk_request brq; 34 struct mmc_blk_request brq;
@@ -20,6 +37,8 @@ struct mmc_queue_req {
20 struct scatterlist *bounce_sg; 37 struct scatterlist *bounce_sg;
21 unsigned int bounce_sg_len; 38 unsigned int bounce_sg_len;
22 struct mmc_async_req mmc_active; 39 struct mmc_async_req mmc_active;
40 enum mmc_packed_type cmd_type;
41 struct mmc_packed *packed;
23}; 42};
24 43
25struct mmc_queue { 44struct mmc_queue {
@@ -49,4 +68,7 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
49extern void mmc_queue_bounce_pre(struct mmc_queue_req *); 68extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
50extern void mmc_queue_bounce_post(struct mmc_queue_req *); 69extern void mmc_queue_bounce_post(struct mmc_queue_req *);
51 70
71extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
72extern void mmc_packed_clean(struct mmc_queue *);
73
52#endif 74#endif
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 6d8f7012d73a..49f04bc9d0eb 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -363,6 +363,7 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
363 return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, 363 return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
364 ext_csd, 512); 364 ext_csd, 512);
365} 365}
366EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
366 367
367int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 368int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
368{ 369{