aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
authorSeungwon Jeon <tgih.jun@samsung.com>2013-02-06 03:02:46 -0500
committerChris Ball <cjb@laptop.org>2013-02-24 14:37:16 -0500
commitce39f9d17c14e56ea6772aa84393e6e0cc8499c4 (patch)
tree7b641a7f89614e3cc3f6ec2f81ee32f64ead4f0d /drivers/mmc/card/block.c
parentabd9ac144947d9a604beb763339e2f77ce8bec79 (diff)
mmc: support packed write command for eMMC4.5 devices
This patch supports packed write command of eMMC4.5 devices. Several writes can be grouped in packed command and all data of the individual commands can be sent in a single transfer on the bus. Large amounts of data in one transfer rather than several data of small size are effective for eMMC write internally. As a result, packed command help write throughput be improved. The following tables show the results of packed write. Type A: test none | packed iozone 25.8 | 31 tiotest 27.6 | 31.2 lmdd 31.2 | 35.4 Type B: test none | packed iozone 44.1 | 51.1 tiotest 47.9 | 52.5 lmdd 51.6 | 59.2 Type C: test none | packed iozone 19.5 | 32 tiotest 19.9 | 34.5 lmdd 22.8 | 40.7 Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Reviewed-by: Maya Erez <merez@codeaurora.org> Reviewed-by: Namjae Jeon <linkinjeon@gmail.com> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c455
1 files changed, 441 insertions, 14 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1170afe1a596..5bab73b91c20 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,6 +59,12 @@ MODULE_ALIAS("mmc:block");
59#define INAND_CMD38_ARG_SECTRIM2 0x88 59#define INAND_CMD38_ARG_SECTRIM2 0x88
60#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 60#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
61 61
62#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
63 (req->cmd_flags & REQ_META)) && \
64 (rq_data_dir(req) == WRITE))
65#define PACKED_CMD_VER 0x01
66#define PACKED_CMD_WR 0x02
67
62static DEFINE_MUTEX(block_mutex); 68static DEFINE_MUTEX(block_mutex);
63 69
64/* 70/*
@@ -89,6 +95,7 @@ struct mmc_blk_data {
89 unsigned int flags; 95 unsigned int flags;
90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 96#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 97#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
98#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
92 99
93 unsigned int usage; 100 unsigned int usage;
94 unsigned int read_only; 101 unsigned int read_only;
@@ -113,6 +120,12 @@ struct mmc_blk_data {
113 120
114static DEFINE_MUTEX(open_lock); 121static DEFINE_MUTEX(open_lock);
115 122
123enum {
124 MMC_PACKED_NR_IDX = -1,
125 MMC_PACKED_NR_ZERO,
126 MMC_PACKED_NR_SINGLE,
127};
128
116module_param(perdev_minors, int, 0444); 129module_param(perdev_minors, int, 0444);
117MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 130MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
118 131
@@ -120,6 +133,19 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
120 struct mmc_blk_data *md); 133 struct mmc_blk_data *md);
121static int get_card_status(struct mmc_card *card, u32 *status, int retries); 134static int get_card_status(struct mmc_card *card, u32 *status, int retries);
122 135
136static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
137{
138 struct mmc_packed *packed = mqrq->packed;
139
140 BUG_ON(!packed);
141
142 mqrq->cmd_type = MMC_PACKED_NONE;
143 packed->nr_entries = MMC_PACKED_NR_ZERO;
144 packed->idx_failure = MMC_PACKED_NR_IDX;
145 packed->retries = 0;
146 packed->blocks = 0;
147}
148
123static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 149static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
124{ 150{
125 struct mmc_blk_data *md; 151 struct mmc_blk_data *md;
@@ -1137,12 +1163,78 @@ static int mmc_blk_err_check(struct mmc_card *card,
1137 if (!brq->data.bytes_xfered) 1163 if (!brq->data.bytes_xfered)
1138 return MMC_BLK_RETRY; 1164 return MMC_BLK_RETRY;
1139 1165
1166 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1167 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1168 return MMC_BLK_PARTIAL;
1169 else
1170 return MMC_BLK_SUCCESS;
1171 }
1172
1140 if (blk_rq_bytes(req) != brq->data.bytes_xfered) 1173 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1141 return MMC_BLK_PARTIAL; 1174 return MMC_BLK_PARTIAL;
1142 1175
1143 return MMC_BLK_SUCCESS; 1176 return MMC_BLK_SUCCESS;
1144} 1177}
1145 1178
1179static int mmc_blk_packed_err_check(struct mmc_card *card,
1180 struct mmc_async_req *areq)
1181{
1182 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1183 mmc_active);
1184 struct request *req = mq_rq->req;
1185 struct mmc_packed *packed = mq_rq->packed;
1186 int err, check, status;
1187 u8 *ext_csd;
1188
1189 BUG_ON(!packed);
1190
1191 packed->retries--;
1192 check = mmc_blk_err_check(card, areq);
1193 err = get_card_status(card, &status, 0);
1194 if (err) {
1195 pr_err("%s: error %d sending status command\n",
1196 req->rq_disk->disk_name, err);
1197 return MMC_BLK_ABORT;
1198 }
1199
1200 if (status & R1_EXCEPTION_EVENT) {
1201 ext_csd = kzalloc(512, GFP_KERNEL);
1202 if (!ext_csd) {
1203 pr_err("%s: unable to allocate buffer for ext_csd\n",
1204 req->rq_disk->disk_name);
1205 return -ENOMEM;
1206 }
1207
1208 err = mmc_send_ext_csd(card, ext_csd);
1209 if (err) {
1210 pr_err("%s: error %d sending ext_csd\n",
1211 req->rq_disk->disk_name, err);
1212 check = MMC_BLK_ABORT;
1213 goto free;
1214 }
1215
1216 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1217 EXT_CSD_PACKED_FAILURE) &&
1218 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1219 EXT_CSD_PACKED_GENERIC_ERROR)) {
1220 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1221 EXT_CSD_PACKED_INDEXED_ERROR) {
1222 packed->idx_failure =
1223 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1224 check = MMC_BLK_PARTIAL;
1225 }
1226 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1227 "failure index: %d\n",
1228 req->rq_disk->disk_name, packed->nr_entries,
1229 packed->blocks, packed->idx_failure);
1230 }
1231free:
1232 kfree(ext_csd);
1233 }
1234
1235 return check;
1236}
1237
1146static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1238static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1147 struct mmc_card *card, 1239 struct mmc_card *card,
1148 int disable_multi, 1240 int disable_multi,
@@ -1297,10 +1389,221 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1297 mmc_queue_bounce_pre(mqrq); 1389 mmc_queue_bounce_pre(mqrq);
1298} 1390}
1299 1391
1392static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1393 struct mmc_card *card)
1394{
1395 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1396 unsigned int max_seg_sz = queue_max_segment_size(q);
1397 unsigned int len, nr_segs = 0;
1398
1399 do {
1400 len = min(hdr_sz, max_seg_sz);
1401 hdr_sz -= len;
1402 nr_segs++;
1403 } while (hdr_sz);
1404
1405 return nr_segs;
1406}
1407
1408static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1409{
1410 struct request_queue *q = mq->queue;
1411 struct mmc_card *card = mq->card;
1412 struct request *cur = req, *next = NULL;
1413 struct mmc_blk_data *md = mq->data;
1414 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1415 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1416 unsigned int req_sectors = 0, phys_segments = 0;
1417 unsigned int max_blk_count, max_phys_segs;
1418 bool put_back = true;
1419 u8 max_packed_rw = 0;
1420 u8 reqs = 0;
1421
1422 if (!(md->flags & MMC_BLK_PACKED_CMD))
1423 goto no_packed;
1424
1425 if ((rq_data_dir(cur) == WRITE) &&
1426 mmc_host_packed_wr(card->host))
1427 max_packed_rw = card->ext_csd.max_packed_writes;
1428
1429 if (max_packed_rw == 0)
1430 goto no_packed;
1431
1432 if (mmc_req_rel_wr(cur) &&
1433 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1434 goto no_packed;
1435
1436 if (mmc_large_sector(card) &&
1437 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1438 goto no_packed;
1439
1440 mmc_blk_clear_packed(mqrq);
1441
1442 max_blk_count = min(card->host->max_blk_count,
1443 card->host->max_req_size >> 9);
1444 if (unlikely(max_blk_count > 0xffff))
1445 max_blk_count = 0xffff;
1446
1447 max_phys_segs = queue_max_segments(q);
1448 req_sectors += blk_rq_sectors(cur);
1449 phys_segments += cur->nr_phys_segments;
1450
1451 if (rq_data_dir(cur) == WRITE) {
1452 req_sectors += mmc_large_sector(card) ? 8 : 1;
1453 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1454 }
1455
1456 do {
1457 if (reqs >= max_packed_rw - 1) {
1458 put_back = false;
1459 break;
1460 }
1461
1462 spin_lock_irq(q->queue_lock);
1463 next = blk_fetch_request(q);
1464 spin_unlock_irq(q->queue_lock);
1465 if (!next) {
1466 put_back = false;
1467 break;
1468 }
1469
1470 if (mmc_large_sector(card) &&
1471 !IS_ALIGNED(blk_rq_sectors(next), 8))
1472 break;
1473
1474 if (next->cmd_flags & REQ_DISCARD ||
1475 next->cmd_flags & REQ_FLUSH)
1476 break;
1477
1478 if (rq_data_dir(cur) != rq_data_dir(next))
1479 break;
1480
1481 if (mmc_req_rel_wr(next) &&
1482 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1483 break;
1484
1485 req_sectors += blk_rq_sectors(next);
1486 if (req_sectors > max_blk_count)
1487 break;
1488
1489 phys_segments += next->nr_phys_segments;
1490 if (phys_segments > max_phys_segs)
1491 break;
1492
1493 list_add_tail(&next->queuelist, &mqrq->packed->list);
1494 cur = next;
1495 reqs++;
1496 } while (1);
1497
1498 if (put_back) {
1499 spin_lock_irq(q->queue_lock);
1500 blk_requeue_request(q, next);
1501 spin_unlock_irq(q->queue_lock);
1502 }
1503
1504 if (reqs > 0) {
1505 list_add(&req->queuelist, &mqrq->packed->list);
1506 mqrq->packed->nr_entries = ++reqs;
1507 mqrq->packed->retries = reqs;
1508 return reqs;
1509 }
1510
1511no_packed:
1512 mqrq->cmd_type = MMC_PACKED_NONE;
1513 return 0;
1514}
1515
1516static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1517 struct mmc_card *card,
1518 struct mmc_queue *mq)
1519{
1520 struct mmc_blk_request *brq = &mqrq->brq;
1521 struct request *req = mqrq->req;
1522 struct request *prq;
1523 struct mmc_blk_data *md = mq->data;
1524 struct mmc_packed *packed = mqrq->packed;
1525 bool do_rel_wr, do_data_tag;
1526 u32 *packed_cmd_hdr;
1527 u8 hdr_blocks;
1528 u8 i = 1;
1529
1530 BUG_ON(!packed);
1531
1532 mqrq->cmd_type = MMC_PACKED_WRITE;
1533 packed->blocks = 0;
1534 packed->idx_failure = MMC_PACKED_NR_IDX;
1535
1536 packed_cmd_hdr = packed->cmd_hdr;
1537 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1538 packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1539 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1540 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1541
1542 /*
1543 * Argument for each entry of packed group
1544 */
1545 list_for_each_entry(prq, &packed->list, queuelist) {
1546 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1547 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1548 (prq->cmd_flags & REQ_META) &&
1549 (rq_data_dir(prq) == WRITE) &&
1550 ((brq->data.blocks * brq->data.blksz) >=
1551 card->ext_csd.data_tag_unit_size);
1552 /* Argument of CMD23 */
1553 packed_cmd_hdr[(i * 2)] =
1554 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1555 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1556 blk_rq_sectors(prq);
1557 /* Argument of CMD18 or CMD25 */
1558 packed_cmd_hdr[((i * 2)) + 1] =
1559 mmc_card_blockaddr(card) ?
1560 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1561 packed->blocks += blk_rq_sectors(prq);
1562 i++;
1563 }
1564
1565 memset(brq, 0, sizeof(struct mmc_blk_request));
1566 brq->mrq.cmd = &brq->cmd;
1567 brq->mrq.data = &brq->data;
1568 brq->mrq.sbc = &brq->sbc;
1569 brq->mrq.stop = &brq->stop;
1570
1571 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1572 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1573 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1574
1575 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1576 brq->cmd.arg = blk_rq_pos(req);
1577 if (!mmc_card_blockaddr(card))
1578 brq->cmd.arg <<= 9;
1579 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1580
1581 brq->data.blksz = 512;
1582 brq->data.blocks = packed->blocks + hdr_blocks;
1583 brq->data.flags |= MMC_DATA_WRITE;
1584
1585 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1586 brq->stop.arg = 0;
1587 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1588
1589 mmc_set_data_timeout(&brq->data, card);
1590
1591 brq->data.sg = mqrq->sg;
1592 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1593
1594 mqrq->mmc_active.mrq = &brq->mrq;
1595 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1596
1597 mmc_queue_bounce_pre(mqrq);
1598}
1599
1300static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1600static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1301 struct mmc_blk_request *brq, struct request *req, 1601 struct mmc_blk_request *brq, struct request *req,
1302 int ret) 1602 int ret)
1303{ 1603{
1604 struct mmc_queue_req *mq_rq;
1605 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1606
1304 /* 1607 /*
1305 * If this is an SD card and we're writing, we can first 1608 * If this is an SD card and we're writing, we can first
1306 * mark the known good sectors as ok. 1609 * mark the known good sectors as ok.
@@ -1317,11 +1620,84 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1317 ret = blk_end_request(req, 0, blocks << 9); 1620 ret = blk_end_request(req, 0, blocks << 9);
1318 } 1621 }
1319 } else { 1622 } else {
1320 ret = blk_end_request(req, 0, brq->data.bytes_xfered); 1623 if (!mmc_packed_cmd(mq_rq->cmd_type))
1624 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1321 } 1625 }
1322 return ret; 1626 return ret;
1323} 1627}
1324 1628
1629static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1630{
1631 struct request *prq;
1632 struct mmc_packed *packed = mq_rq->packed;
1633 int idx = packed->idx_failure, i = 0;
1634 int ret = 0;
1635
1636 BUG_ON(!packed);
1637
1638 while (!list_empty(&packed->list)) {
1639 prq = list_entry_rq(packed->list.next);
1640 if (idx == i) {
1641 /* retry from error index */
1642 packed->nr_entries -= idx;
1643 mq_rq->req = prq;
1644 ret = 1;
1645
1646 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1647 list_del_init(&prq->queuelist);
1648 mmc_blk_clear_packed(mq_rq);
1649 }
1650 return ret;
1651 }
1652 list_del_init(&prq->queuelist);
1653 blk_end_request(prq, 0, blk_rq_bytes(prq));
1654 i++;
1655 }
1656
1657 mmc_blk_clear_packed(mq_rq);
1658 return ret;
1659}
1660
1661static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1662{
1663 struct request *prq;
1664 struct mmc_packed *packed = mq_rq->packed;
1665
1666 BUG_ON(!packed);
1667
1668 while (!list_empty(&packed->list)) {
1669 prq = list_entry_rq(packed->list.next);
1670 list_del_init(&prq->queuelist);
1671 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1672 }
1673
1674 mmc_blk_clear_packed(mq_rq);
1675}
1676
1677static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1678 struct mmc_queue_req *mq_rq)
1679{
1680 struct request *prq;
1681 struct request_queue *q = mq->queue;
1682 struct mmc_packed *packed = mq_rq->packed;
1683
1684 BUG_ON(!packed);
1685
1686 while (!list_empty(&packed->list)) {
1687 prq = list_entry_rq(packed->list.prev);
1688 if (prq->queuelist.prev != &packed->list) {
1689 list_del_init(&prq->queuelist);
1690 spin_lock_irq(q->queue_lock);
1691 blk_requeue_request(mq->queue, prq);
1692 spin_unlock_irq(q->queue_lock);
1693 } else {
1694 list_del_init(&prq->queuelist);
1695 }
1696 }
1697
1698 mmc_blk_clear_packed(mq_rq);
1699}
1700
1325static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) 1701static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1326{ 1702{
1327 struct mmc_blk_data *md = mq->data; 1703 struct mmc_blk_data *md = mq->data;
@@ -1332,10 +1708,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1332 struct mmc_queue_req *mq_rq; 1708 struct mmc_queue_req *mq_rq;
1333 struct request *req = rqc; 1709 struct request *req = rqc;
1334 struct mmc_async_req *areq; 1710 struct mmc_async_req *areq;
1711 const u8 packed_nr = 2;
1712 u8 reqs = 0;
1335 1713
1336 if (!rqc && !mq->mqrq_prev->req) 1714 if (!rqc && !mq->mqrq_prev->req)
1337 return 0; 1715 return 0;
1338 1716
1717 if (rqc)
1718 reqs = mmc_blk_prep_packed_list(mq, rqc);
1719
1339 do { 1720 do {
1340 if (rqc) { 1721 if (rqc) {
1341 /* 1722 /*
@@ -1346,9 +1727,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1346 (card->ext_csd.data_sector_size == 4096)) { 1727 (card->ext_csd.data_sector_size == 4096)) {
1347 pr_err("%s: Transfer size is not 4KB sector size aligned\n", 1728 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1348 req->rq_disk->disk_name); 1729 req->rq_disk->disk_name);
1730 mq_rq = mq->mqrq_cur;
1349 goto cmd_abort; 1731 goto cmd_abort;
1350 } 1732 }
1351 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1733
1734 if (reqs >= packed_nr)
1735 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1736 card, mq);
1737 else
1738 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1352 areq = &mq->mqrq_cur->mmc_active; 1739 areq = &mq->mqrq_cur->mmc_active;
1353 } else 1740 } else
1354 areq = NULL; 1741 areq = NULL;
@@ -1372,8 +1759,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1372 * A block was successfully transferred. 1759 * A block was successfully transferred.
1373 */ 1760 */
1374 mmc_blk_reset_success(md, type); 1761 mmc_blk_reset_success(md, type);
1375 ret = blk_end_request(req, 0, 1762
1763 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1764 ret = mmc_blk_end_packed_req(mq_rq);
1765 break;
1766 } else {
1767 ret = blk_end_request(req, 0,
1376 brq->data.bytes_xfered); 1768 brq->data.bytes_xfered);
1769 }
1770
1377 /* 1771 /*
1378 * If the blk_end_request function returns non-zero even 1772 * If the blk_end_request function returns non-zero even
1379 * though all data has been transferred and no errors 1773 * though all data has been transferred and no errors
@@ -1406,7 +1800,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1406 err = mmc_blk_reset(md, card->host, type); 1800 err = mmc_blk_reset(md, card->host, type);
1407 if (!err) 1801 if (!err)
1408 break; 1802 break;
1409 if (err == -ENODEV) 1803 if (err == -ENODEV ||
1804 mmc_packed_cmd(mq_rq->cmd_type))
1410 goto cmd_abort; 1805 goto cmd_abort;
1411 /* Fall through */ 1806 /* Fall through */
1412 } 1807 }
@@ -1437,22 +1832,38 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1437 } 1832 }
1438 1833
1439 if (ret) { 1834 if (ret) {
1440 /* 1835 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1441 * In case of a incomplete request 1836 if (!mq_rq->packed->retries)
1442 * prepare it again and resend. 1837 goto cmd_abort;
1443 */ 1838 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1444 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); 1839 mmc_start_req(card->host,
1445 mmc_start_req(card->host, &mq_rq->mmc_active, NULL); 1840 &mq_rq->mmc_active, NULL);
1841 } else {
1842
1843 /*
1844 * In case of a incomplete request
1845 * prepare it again and resend.
1846 */
1847 mmc_blk_rw_rq_prep(mq_rq, card,
1848 disable_multi, mq);
1849 mmc_start_req(card->host,
1850 &mq_rq->mmc_active, NULL);
1851 }
1446 } 1852 }
1447 } while (ret); 1853 } while (ret);
1448 1854
1449 return 1; 1855 return 1;
1450 1856
1451 cmd_abort: 1857 cmd_abort:
1452 if (mmc_card_removed(card)) 1858 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1453 req->cmd_flags |= REQ_QUIET; 1859 mmc_blk_abort_packed_req(mq_rq);
1454 while (ret) 1860 } else {
1455 ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 1861 if (mmc_card_removed(card))
1862 req->cmd_flags |= REQ_QUIET;
1863 while (ret)
1864 ret = blk_end_request(req, -EIO,
1865 blk_rq_cur_bytes(req));
1866 }
1456 1867
1457 start_new_req: 1868 start_new_req:
1458 if (rqc) { 1869 if (rqc) {
@@ -1460,6 +1871,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1460 rqc->cmd_flags |= REQ_QUIET; 1871 rqc->cmd_flags |= REQ_QUIET;
1461 blk_end_request_all(rqc, -EIO); 1872 blk_end_request_all(rqc, -EIO);
1462 } else { 1873 } else {
1874 /*
1875 * If current request is packed, it needs to put back.
1876 */
1877 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
1878 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
1879
1463 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1880 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1464 mmc_start_req(card->host, 1881 mmc_start_req(card->host,
1465 &mq->mqrq_cur->mmc_active, NULL); 1882 &mq->mqrq_cur->mmc_active, NULL);
@@ -1634,6 +2051,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1634 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); 2051 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1635 } 2052 }
1636 2053
2054 if (mmc_card_mmc(card) &&
2055 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2056 (md->flags & MMC_BLK_CMD23) &&
2057 card->ext_csd.packed_event_en) {
2058 if (!mmc_packed_init(&md->queue, card))
2059 md->flags |= MMC_BLK_PACKED_CMD;
2060 }
2061
1637 return md; 2062 return md;
1638 2063
1639 err_putdisk: 2064 err_putdisk:
@@ -1742,6 +2167,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
1742 2167
1743 /* Then flush out any already in there */ 2168 /* Then flush out any already in there */
1744 mmc_cleanup_queue(&md->queue); 2169 mmc_cleanup_queue(&md->queue);
2170 if (md->flags & MMC_BLK_PACKED_CMD)
2171 mmc_packed_clean(&md->queue);
1745 mmc_blk_put(md); 2172 mmc_blk_put(md);
1746 } 2173 }
1747} 2174}