diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 12:31:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 12:31:09 -0500 |
commit | ed5dc2372dba46e0ecd08791b1a0399d313e5cff (patch) | |
tree | 571319985b59a2963fb7580c24ee2aa1696359e2 /drivers/mmc | |
parent | 0512c04a2b5d29a33d96d315e1d14c55f5148aa7 (diff) | |
parent | 0e786102949d7461859c6ce9f39c2c8d28e42db3 (diff) |
Merge tag 'mmc-updates-for-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
Pull MMC update from Chris Ball:
"MMC highlights for 3.9:
Core:
- Support for packed commands in eMMC 4.5. (This requires a host
capability to be turned on. It increases write throughput by 20%+,
but may also increase average write latency; more testing needed.)
- Add DT bindings for capability flags.
- Add mmc_of_parse() for shared DT parsing between drivers.
Drivers:
- android-goldfish: New MMC driver for the Android Goldfish emulator.
- mvsdio: Add DT bindings, pinctrl, use slot-gpio for card detection.
- omap_hsmmc: Fix boot hangs with RPMB partitions.
- sdhci-bcm2835: New driver for controller used by Raspberry Pi.
- sdhci-esdhc-imx: Add 8-bit data, auto CMD23 support, use slot-gpio.
- sh_mmcif: Add support for eMMC DDR, bundled MMCIF IRQs.
- tmio_mmc: Add DT bindings, support for vccq regulator"
* tag 'mmc-updates-for-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (92 commits)
mmc: tegra: assume CONFIG_OF, remove platform data
mmc: add DT bindings for more MMC capability flags
mmc: tmio: add support for the VccQ regulator
mmc: tmio: remove unused and deprecated symbols
mmc: sh_mobile_sdhi: use managed resource allocations
mmc: sh_mobile_sdhi: remove unused .pdata field
mmc: tmio-mmc: parse device-tree bindings
mmc: tmio-mmc: define device-tree bindings
mmc: sh_mmcif: use mmc_of_parse() to parse standard MMC DT bindings
mmc: (cosmetic) remove "extern" from function declarations
mmc: provide a standard MMC device-tree binding parser centrally
mmc: detailed definition of CD and WP MMC line polarities in DT
mmc: sdhi, tmio: only check flags in tmio-mmc driver proper
mmc: sdhci: Fix parameter of sdhci_do_start_signal_voltage_switch()
mmc: sdhci: check voltage range only on regulators aware of voltage value
mmc: bcm2835: set SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
mmc: support packed write command for eMMC4.5 devices
mmc: add packed command feature of eMMC4.5
mmc: rtsx: remove driving adjustment
mmc: use regulator_can_change_voltage() instead of regulator_count_voltages
...
Diffstat (limited to 'drivers/mmc')
36 files changed, 2866 insertions, 807 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 21056b9ef0a0..5bab73b91c20 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -59,6 +59,12 @@ MODULE_ALIAS("mmc:block"); | |||
59 | #define INAND_CMD38_ARG_SECTRIM2 0x88 | 59 | #define INAND_CMD38_ARG_SECTRIM2 0x88 |
60 | #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ | 60 | #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ |
61 | 61 | ||
62 | #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ | ||
63 | (req->cmd_flags & REQ_META)) && \ | ||
64 | (rq_data_dir(req) == WRITE)) | ||
65 | #define PACKED_CMD_VER 0x01 | ||
66 | #define PACKED_CMD_WR 0x02 | ||
67 | |||
62 | static DEFINE_MUTEX(block_mutex); | 68 | static DEFINE_MUTEX(block_mutex); |
63 | 69 | ||
64 | /* | 70 | /* |
@@ -89,6 +95,7 @@ struct mmc_blk_data { | |||
89 | unsigned int flags; | 95 | unsigned int flags; |
90 | #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ | 96 | #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ |
91 | #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ | 97 | #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ |
98 | #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */ | ||
92 | 99 | ||
93 | unsigned int usage; | 100 | unsigned int usage; |
94 | unsigned int read_only; | 101 | unsigned int read_only; |
@@ -113,15 +120,10 @@ struct mmc_blk_data { | |||
113 | 120 | ||
114 | static DEFINE_MUTEX(open_lock); | 121 | static DEFINE_MUTEX(open_lock); |
115 | 122 | ||
116 | enum mmc_blk_status { | 123 | enum { |
117 | MMC_BLK_SUCCESS = 0, | 124 | MMC_PACKED_NR_IDX = -1, |
118 | MMC_BLK_PARTIAL, | 125 | MMC_PACKED_NR_ZERO, |
119 | MMC_BLK_CMD_ERR, | 126 | MMC_PACKED_NR_SINGLE, |
120 | MMC_BLK_RETRY, | ||
121 | MMC_BLK_ABORT, | ||
122 | MMC_BLK_DATA_ERR, | ||
123 | MMC_BLK_ECC_ERR, | ||
124 | MMC_BLK_NOMEDIUM, | ||
125 | }; | 127 | }; |
126 | 128 | ||
127 | module_param(perdev_minors, int, 0444); | 129 | module_param(perdev_minors, int, 0444); |
@@ -131,6 +133,19 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, | |||
131 | struct mmc_blk_data *md); | 133 | struct mmc_blk_data *md); |
132 | static int get_card_status(struct mmc_card *card, u32 *status, int retries); | 134 | static int get_card_status(struct mmc_card *card, u32 *status, int retries); |
133 | 135 | ||
136 | static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) | ||
137 | { | ||
138 | struct mmc_packed *packed = mqrq->packed; | ||
139 | |||
140 | BUG_ON(!packed); | ||
141 | |||
142 | mqrq->cmd_type = MMC_PACKED_NONE; | ||
143 | packed->nr_entries = MMC_PACKED_NR_ZERO; | ||
144 | packed->idx_failure = MMC_PACKED_NR_IDX; | ||
145 | packed->retries = 0; | ||
146 | packed->blocks = 0; | ||
147 | } | ||
148 | |||
134 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) | 149 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) |
135 | { | 150 | { |
136 | struct mmc_blk_data *md; | 151 | struct mmc_blk_data *md; |
@@ -1148,12 +1163,78 @@ static int mmc_blk_err_check(struct mmc_card *card, | |||
1148 | if (!brq->data.bytes_xfered) | 1163 | if (!brq->data.bytes_xfered) |
1149 | return MMC_BLK_RETRY; | 1164 | return MMC_BLK_RETRY; |
1150 | 1165 | ||
1166 | if (mmc_packed_cmd(mq_mrq->cmd_type)) { | ||
1167 | if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) | ||
1168 | return MMC_BLK_PARTIAL; | ||
1169 | else | ||
1170 | return MMC_BLK_SUCCESS; | ||
1171 | } | ||
1172 | |||
1151 | if (blk_rq_bytes(req) != brq->data.bytes_xfered) | 1173 | if (blk_rq_bytes(req) != brq->data.bytes_xfered) |
1152 | return MMC_BLK_PARTIAL; | 1174 | return MMC_BLK_PARTIAL; |
1153 | 1175 | ||
1154 | return MMC_BLK_SUCCESS; | 1176 | return MMC_BLK_SUCCESS; |
1155 | } | 1177 | } |
1156 | 1178 | ||
1179 | static int mmc_blk_packed_err_check(struct mmc_card *card, | ||
1180 | struct mmc_async_req *areq) | ||
1181 | { | ||
1182 | struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, | ||
1183 | mmc_active); | ||
1184 | struct request *req = mq_rq->req; | ||
1185 | struct mmc_packed *packed = mq_rq->packed; | ||
1186 | int err, check, status; | ||
1187 | u8 *ext_csd; | ||
1188 | |||
1189 | BUG_ON(!packed); | ||
1190 | |||
1191 | packed->retries--; | ||
1192 | check = mmc_blk_err_check(card, areq); | ||
1193 | err = get_card_status(card, &status, 0); | ||
1194 | if (err) { | ||
1195 | pr_err("%s: error %d sending status command\n", | ||
1196 | req->rq_disk->disk_name, err); | ||
1197 | return MMC_BLK_ABORT; | ||
1198 | } | ||
1199 | |||
1200 | if (status & R1_EXCEPTION_EVENT) { | ||
1201 | ext_csd = kzalloc(512, GFP_KERNEL); | ||
1202 | if (!ext_csd) { | ||
1203 | pr_err("%s: unable to allocate buffer for ext_csd\n", | ||
1204 | req->rq_disk->disk_name); | ||
1205 | return -ENOMEM; | ||
1206 | } | ||
1207 | |||
1208 | err = mmc_send_ext_csd(card, ext_csd); | ||
1209 | if (err) { | ||
1210 | pr_err("%s: error %d sending ext_csd\n", | ||
1211 | req->rq_disk->disk_name, err); | ||
1212 | check = MMC_BLK_ABORT; | ||
1213 | goto free; | ||
1214 | } | ||
1215 | |||
1216 | if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & | ||
1217 | EXT_CSD_PACKED_FAILURE) && | ||
1218 | (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & | ||
1219 | EXT_CSD_PACKED_GENERIC_ERROR)) { | ||
1220 | if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & | ||
1221 | EXT_CSD_PACKED_INDEXED_ERROR) { | ||
1222 | packed->idx_failure = | ||
1223 | ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; | ||
1224 | check = MMC_BLK_PARTIAL; | ||
1225 | } | ||
1226 | pr_err("%s: packed cmd failed, nr %u, sectors %u, " | ||
1227 | "failure index: %d\n", | ||
1228 | req->rq_disk->disk_name, packed->nr_entries, | ||
1229 | packed->blocks, packed->idx_failure); | ||
1230 | } | ||
1231 | free: | ||
1232 | kfree(ext_csd); | ||
1233 | } | ||
1234 | |||
1235 | return check; | ||
1236 | } | ||
1237 | |||
1157 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | 1238 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
1158 | struct mmc_card *card, | 1239 | struct mmc_card *card, |
1159 | int disable_multi, | 1240 | int disable_multi, |
@@ -1308,10 +1389,221 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
1308 | mmc_queue_bounce_pre(mqrq); | 1389 | mmc_queue_bounce_pre(mqrq); |
1309 | } | 1390 | } |
1310 | 1391 | ||
1392 | static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q, | ||
1393 | struct mmc_card *card) | ||
1394 | { | ||
1395 | unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512; | ||
1396 | unsigned int max_seg_sz = queue_max_segment_size(q); | ||
1397 | unsigned int len, nr_segs = 0; | ||
1398 | |||
1399 | do { | ||
1400 | len = min(hdr_sz, max_seg_sz); | ||
1401 | hdr_sz -= len; | ||
1402 | nr_segs++; | ||
1403 | } while (hdr_sz); | ||
1404 | |||
1405 | return nr_segs; | ||
1406 | } | ||
1407 | |||
1408 | static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) | ||
1409 | { | ||
1410 | struct request_queue *q = mq->queue; | ||
1411 | struct mmc_card *card = mq->card; | ||
1412 | struct request *cur = req, *next = NULL; | ||
1413 | struct mmc_blk_data *md = mq->data; | ||
1414 | struct mmc_queue_req *mqrq = mq->mqrq_cur; | ||
1415 | bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; | ||
1416 | unsigned int req_sectors = 0, phys_segments = 0; | ||
1417 | unsigned int max_blk_count, max_phys_segs; | ||
1418 | bool put_back = true; | ||
1419 | u8 max_packed_rw = 0; | ||
1420 | u8 reqs = 0; | ||
1421 | |||
1422 | if (!(md->flags & MMC_BLK_PACKED_CMD)) | ||
1423 | goto no_packed; | ||
1424 | |||
1425 | if ((rq_data_dir(cur) == WRITE) && | ||
1426 | mmc_host_packed_wr(card->host)) | ||
1427 | max_packed_rw = card->ext_csd.max_packed_writes; | ||
1428 | |||
1429 | if (max_packed_rw == 0) | ||
1430 | goto no_packed; | ||
1431 | |||
1432 | if (mmc_req_rel_wr(cur) && | ||
1433 | (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) | ||
1434 | goto no_packed; | ||
1435 | |||
1436 | if (mmc_large_sector(card) && | ||
1437 | !IS_ALIGNED(blk_rq_sectors(cur), 8)) | ||
1438 | goto no_packed; | ||
1439 | |||
1440 | mmc_blk_clear_packed(mqrq); | ||
1441 | |||
1442 | max_blk_count = min(card->host->max_blk_count, | ||
1443 | card->host->max_req_size >> 9); | ||
1444 | if (unlikely(max_blk_count > 0xffff)) | ||
1445 | max_blk_count = 0xffff; | ||
1446 | |||
1447 | max_phys_segs = queue_max_segments(q); | ||
1448 | req_sectors += blk_rq_sectors(cur); | ||
1449 | phys_segments += cur->nr_phys_segments; | ||
1450 | |||
1451 | if (rq_data_dir(cur) == WRITE) { | ||
1452 | req_sectors += mmc_large_sector(card) ? 8 : 1; | ||
1453 | phys_segments += mmc_calc_packed_hdr_segs(q, card); | ||
1454 | } | ||
1455 | |||
1456 | do { | ||
1457 | if (reqs >= max_packed_rw - 1) { | ||
1458 | put_back = false; | ||
1459 | break; | ||
1460 | } | ||
1461 | |||
1462 | spin_lock_irq(q->queue_lock); | ||
1463 | next = blk_fetch_request(q); | ||
1464 | spin_unlock_irq(q->queue_lock); | ||
1465 | if (!next) { | ||
1466 | put_back = false; | ||
1467 | break; | ||
1468 | } | ||
1469 | |||
1470 | if (mmc_large_sector(card) && | ||
1471 | !IS_ALIGNED(blk_rq_sectors(next), 8)) | ||
1472 | break; | ||
1473 | |||
1474 | if (next->cmd_flags & REQ_DISCARD || | ||
1475 | next->cmd_flags & REQ_FLUSH) | ||
1476 | break; | ||
1477 | |||
1478 | if (rq_data_dir(cur) != rq_data_dir(next)) | ||
1479 | break; | ||
1480 | |||
1481 | if (mmc_req_rel_wr(next) && | ||
1482 | (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) | ||
1483 | break; | ||
1484 | |||
1485 | req_sectors += blk_rq_sectors(next); | ||
1486 | if (req_sectors > max_blk_count) | ||
1487 | break; | ||
1488 | |||
1489 | phys_segments += next->nr_phys_segments; | ||
1490 | if (phys_segments > max_phys_segs) | ||
1491 | break; | ||
1492 | |||
1493 | list_add_tail(&next->queuelist, &mqrq->packed->list); | ||
1494 | cur = next; | ||
1495 | reqs++; | ||
1496 | } while (1); | ||
1497 | |||
1498 | if (put_back) { | ||
1499 | spin_lock_irq(q->queue_lock); | ||
1500 | blk_requeue_request(q, next); | ||
1501 | spin_unlock_irq(q->queue_lock); | ||
1502 | } | ||
1503 | |||
1504 | if (reqs > 0) { | ||
1505 | list_add(&req->queuelist, &mqrq->packed->list); | ||
1506 | mqrq->packed->nr_entries = ++reqs; | ||
1507 | mqrq->packed->retries = reqs; | ||
1508 | return reqs; | ||
1509 | } | ||
1510 | |||
1511 | no_packed: | ||
1512 | mqrq->cmd_type = MMC_PACKED_NONE; | ||
1513 | return 0; | ||
1514 | } | ||
1515 | |||
1516 | static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, | ||
1517 | struct mmc_card *card, | ||
1518 | struct mmc_queue *mq) | ||
1519 | { | ||
1520 | struct mmc_blk_request *brq = &mqrq->brq; | ||
1521 | struct request *req = mqrq->req; | ||
1522 | struct request *prq; | ||
1523 | struct mmc_blk_data *md = mq->data; | ||
1524 | struct mmc_packed *packed = mqrq->packed; | ||
1525 | bool do_rel_wr, do_data_tag; | ||
1526 | u32 *packed_cmd_hdr; | ||
1527 | u8 hdr_blocks; | ||
1528 | u8 i = 1; | ||
1529 | |||
1530 | BUG_ON(!packed); | ||
1531 | |||
1532 | mqrq->cmd_type = MMC_PACKED_WRITE; | ||
1533 | packed->blocks = 0; | ||
1534 | packed->idx_failure = MMC_PACKED_NR_IDX; | ||
1535 | |||
1536 | packed_cmd_hdr = packed->cmd_hdr; | ||
1537 | memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); | ||
1538 | packed_cmd_hdr[0] = (packed->nr_entries << 16) | | ||
1539 | (PACKED_CMD_WR << 8) | PACKED_CMD_VER; | ||
1540 | hdr_blocks = mmc_large_sector(card) ? 8 : 1; | ||
1541 | |||
1542 | /* | ||
1543 | * Argument for each entry of packed group | ||
1544 | */ | ||
1545 | list_for_each_entry(prq, &packed->list, queuelist) { | ||
1546 | do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); | ||
1547 | do_data_tag = (card->ext_csd.data_tag_unit_size) && | ||
1548 | (prq->cmd_flags & REQ_META) && | ||
1549 | (rq_data_dir(prq) == WRITE) && | ||
1550 | ((brq->data.blocks * brq->data.blksz) >= | ||
1551 | card->ext_csd.data_tag_unit_size); | ||
1552 | /* Argument of CMD23 */ | ||
1553 | packed_cmd_hdr[(i * 2)] = | ||
1554 | (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | | ||
1555 | (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | | ||
1556 | blk_rq_sectors(prq); | ||
1557 | /* Argument of CMD18 or CMD25 */ | ||
1558 | packed_cmd_hdr[((i * 2)) + 1] = | ||
1559 | mmc_card_blockaddr(card) ? | ||
1560 | blk_rq_pos(prq) : blk_rq_pos(prq) << 9; | ||
1561 | packed->blocks += blk_rq_sectors(prq); | ||
1562 | i++; | ||
1563 | } | ||
1564 | |||
1565 | memset(brq, 0, sizeof(struct mmc_blk_request)); | ||
1566 | brq->mrq.cmd = &brq->cmd; | ||
1567 | brq->mrq.data = &brq->data; | ||
1568 | brq->mrq.sbc = &brq->sbc; | ||
1569 | brq->mrq.stop = &brq->stop; | ||
1570 | |||
1571 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; | ||
1572 | brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks); | ||
1573 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
1574 | |||
1575 | brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; | ||
1576 | brq->cmd.arg = blk_rq_pos(req); | ||
1577 | if (!mmc_card_blockaddr(card)) | ||
1578 | brq->cmd.arg <<= 9; | ||
1579 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | ||
1580 | |||
1581 | brq->data.blksz = 512; | ||
1582 | brq->data.blocks = packed->blocks + hdr_blocks; | ||
1583 | brq->data.flags |= MMC_DATA_WRITE; | ||
1584 | |||
1585 | brq->stop.opcode = MMC_STOP_TRANSMISSION; | ||
1586 | brq->stop.arg = 0; | ||
1587 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | ||
1588 | |||
1589 | mmc_set_data_timeout(&brq->data, card); | ||
1590 | |||
1591 | brq->data.sg = mqrq->sg; | ||
1592 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); | ||
1593 | |||
1594 | mqrq->mmc_active.mrq = &brq->mrq; | ||
1595 | mqrq->mmc_active.err_check = mmc_blk_packed_err_check; | ||
1596 | |||
1597 | mmc_queue_bounce_pre(mqrq); | ||
1598 | } | ||
1599 | |||
1311 | static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, | 1600 | static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, |
1312 | struct mmc_blk_request *brq, struct request *req, | 1601 | struct mmc_blk_request *brq, struct request *req, |
1313 | int ret) | 1602 | int ret) |
1314 | { | 1603 | { |
1604 | struct mmc_queue_req *mq_rq; | ||
1605 | mq_rq = container_of(brq, struct mmc_queue_req, brq); | ||
1606 | |||
1315 | /* | 1607 | /* |
1316 | * If this is an SD card and we're writing, we can first | 1608 | * If this is an SD card and we're writing, we can first |
1317 | * mark the known good sectors as ok. | 1609 | * mark the known good sectors as ok. |
@@ -1328,11 +1620,84 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, | |||
1328 | ret = blk_end_request(req, 0, blocks << 9); | 1620 | ret = blk_end_request(req, 0, blocks << 9); |
1329 | } | 1621 | } |
1330 | } else { | 1622 | } else { |
1331 | ret = blk_end_request(req, 0, brq->data.bytes_xfered); | 1623 | if (!mmc_packed_cmd(mq_rq->cmd_type)) |
1624 | ret = blk_end_request(req, 0, brq->data.bytes_xfered); | ||
1332 | } | 1625 | } |
1333 | return ret; | 1626 | return ret; |
1334 | } | 1627 | } |
1335 | 1628 | ||
1629 | static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq) | ||
1630 | { | ||
1631 | struct request *prq; | ||
1632 | struct mmc_packed *packed = mq_rq->packed; | ||
1633 | int idx = packed->idx_failure, i = 0; | ||
1634 | int ret = 0; | ||
1635 | |||
1636 | BUG_ON(!packed); | ||
1637 | |||
1638 | while (!list_empty(&packed->list)) { | ||
1639 | prq = list_entry_rq(packed->list.next); | ||
1640 | if (idx == i) { | ||
1641 | /* retry from error index */ | ||
1642 | packed->nr_entries -= idx; | ||
1643 | mq_rq->req = prq; | ||
1644 | ret = 1; | ||
1645 | |||
1646 | if (packed->nr_entries == MMC_PACKED_NR_SINGLE) { | ||
1647 | list_del_init(&prq->queuelist); | ||
1648 | mmc_blk_clear_packed(mq_rq); | ||
1649 | } | ||
1650 | return ret; | ||
1651 | } | ||
1652 | list_del_init(&prq->queuelist); | ||
1653 | blk_end_request(prq, 0, blk_rq_bytes(prq)); | ||
1654 | i++; | ||
1655 | } | ||
1656 | |||
1657 | mmc_blk_clear_packed(mq_rq); | ||
1658 | return ret; | ||
1659 | } | ||
1660 | |||
1661 | static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq) | ||
1662 | { | ||
1663 | struct request *prq; | ||
1664 | struct mmc_packed *packed = mq_rq->packed; | ||
1665 | |||
1666 | BUG_ON(!packed); | ||
1667 | |||
1668 | while (!list_empty(&packed->list)) { | ||
1669 | prq = list_entry_rq(packed->list.next); | ||
1670 | list_del_init(&prq->queuelist); | ||
1671 | blk_end_request(prq, -EIO, blk_rq_bytes(prq)); | ||
1672 | } | ||
1673 | |||
1674 | mmc_blk_clear_packed(mq_rq); | ||
1675 | } | ||
1676 | |||
1677 | static void mmc_blk_revert_packed_req(struct mmc_queue *mq, | ||
1678 | struct mmc_queue_req *mq_rq) | ||
1679 | { | ||
1680 | struct request *prq; | ||
1681 | struct request_queue *q = mq->queue; | ||
1682 | struct mmc_packed *packed = mq_rq->packed; | ||
1683 | |||
1684 | BUG_ON(!packed); | ||
1685 | |||
1686 | while (!list_empty(&packed->list)) { | ||
1687 | prq = list_entry_rq(packed->list.prev); | ||
1688 | if (prq->queuelist.prev != &packed->list) { | ||
1689 | list_del_init(&prq->queuelist); | ||
1690 | spin_lock_irq(q->queue_lock); | ||
1691 | blk_requeue_request(mq->queue, prq); | ||
1692 | spin_unlock_irq(q->queue_lock); | ||
1693 | } else { | ||
1694 | list_del_init(&prq->queuelist); | ||
1695 | } | ||
1696 | } | ||
1697 | |||
1698 | mmc_blk_clear_packed(mq_rq); | ||
1699 | } | ||
1700 | |||
1336 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | 1701 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) |
1337 | { | 1702 | { |
1338 | struct mmc_blk_data *md = mq->data; | 1703 | struct mmc_blk_data *md = mq->data; |
@@ -1343,10 +1708,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1343 | struct mmc_queue_req *mq_rq; | 1708 | struct mmc_queue_req *mq_rq; |
1344 | struct request *req = rqc; | 1709 | struct request *req = rqc; |
1345 | struct mmc_async_req *areq; | 1710 | struct mmc_async_req *areq; |
1711 | const u8 packed_nr = 2; | ||
1712 | u8 reqs = 0; | ||
1346 | 1713 | ||
1347 | if (!rqc && !mq->mqrq_prev->req) | 1714 | if (!rqc && !mq->mqrq_prev->req) |
1348 | return 0; | 1715 | return 0; |
1349 | 1716 | ||
1717 | if (rqc) | ||
1718 | reqs = mmc_blk_prep_packed_list(mq, rqc); | ||
1719 | |||
1350 | do { | 1720 | do { |
1351 | if (rqc) { | 1721 | if (rqc) { |
1352 | /* | 1722 | /* |
@@ -1357,15 +1727,24 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1357 | (card->ext_csd.data_sector_size == 4096)) { | 1727 | (card->ext_csd.data_sector_size == 4096)) { |
1358 | pr_err("%s: Transfer size is not 4KB sector size aligned\n", | 1728 | pr_err("%s: Transfer size is not 4KB sector size aligned\n", |
1359 | req->rq_disk->disk_name); | 1729 | req->rq_disk->disk_name); |
1730 | mq_rq = mq->mqrq_cur; | ||
1360 | goto cmd_abort; | 1731 | goto cmd_abort; |
1361 | } | 1732 | } |
1362 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | 1733 | |
1734 | if (reqs >= packed_nr) | ||
1735 | mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, | ||
1736 | card, mq); | ||
1737 | else | ||
1738 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | ||
1363 | areq = &mq->mqrq_cur->mmc_active; | 1739 | areq = &mq->mqrq_cur->mmc_active; |
1364 | } else | 1740 | } else |
1365 | areq = NULL; | 1741 | areq = NULL; |
1366 | areq = mmc_start_req(card->host, areq, (int *) &status); | 1742 | areq = mmc_start_req(card->host, areq, (int *) &status); |
1367 | if (!areq) | 1743 | if (!areq) { |
1744 | if (status == MMC_BLK_NEW_REQUEST) | ||
1745 | mq->flags |= MMC_QUEUE_NEW_REQUEST; | ||
1368 | return 0; | 1746 | return 0; |
1747 | } | ||
1369 | 1748 | ||
1370 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); | 1749 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); |
1371 | brq = &mq_rq->brq; | 1750 | brq = &mq_rq->brq; |
@@ -1380,8 +1759,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1380 | * A block was successfully transferred. | 1759 | * A block was successfully transferred. |
1381 | */ | 1760 | */ |
1382 | mmc_blk_reset_success(md, type); | 1761 | mmc_blk_reset_success(md, type); |
1383 | ret = blk_end_request(req, 0, | 1762 | |
1763 | if (mmc_packed_cmd(mq_rq->cmd_type)) { | ||
1764 | ret = mmc_blk_end_packed_req(mq_rq); | ||
1765 | break; | ||
1766 | } else { | ||
1767 | ret = blk_end_request(req, 0, | ||
1384 | brq->data.bytes_xfered); | 1768 | brq->data.bytes_xfered); |
1769 | } | ||
1770 | |||
1385 | /* | 1771 | /* |
1386 | * If the blk_end_request function returns non-zero even | 1772 | * If the blk_end_request function returns non-zero even |
1387 | * though all data has been transferred and no errors | 1773 | * though all data has been transferred and no errors |
@@ -1414,7 +1800,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1414 | err = mmc_blk_reset(md, card->host, type); | 1800 | err = mmc_blk_reset(md, card->host, type); |
1415 | if (!err) | 1801 | if (!err) |
1416 | break; | 1802 | break; |
1417 | if (err == -ENODEV) | 1803 | if (err == -ENODEV || |
1804 | mmc_packed_cmd(mq_rq->cmd_type)) | ||
1418 | goto cmd_abort; | 1805 | goto cmd_abort; |
1419 | /* Fall through */ | 1806 | /* Fall through */ |
1420 | } | 1807 | } |
@@ -1438,30 +1825,62 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1438 | break; | 1825 | break; |
1439 | case MMC_BLK_NOMEDIUM: | 1826 | case MMC_BLK_NOMEDIUM: |
1440 | goto cmd_abort; | 1827 | goto cmd_abort; |
1828 | default: | ||
1829 | pr_err("%s: Unhandled return value (%d)", | ||
1830 | req->rq_disk->disk_name, status); | ||
1831 | goto cmd_abort; | ||
1441 | } | 1832 | } |
1442 | 1833 | ||
1443 | if (ret) { | 1834 | if (ret) { |
1444 | /* | 1835 | if (mmc_packed_cmd(mq_rq->cmd_type)) { |
1445 | * In case of a incomplete request | 1836 | if (!mq_rq->packed->retries) |
1446 | * prepare it again and resend. | 1837 | goto cmd_abort; |
1447 | */ | 1838 | mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq); |
1448 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); | 1839 | mmc_start_req(card->host, |
1449 | mmc_start_req(card->host, &mq_rq->mmc_active, NULL); | 1840 | &mq_rq->mmc_active, NULL); |
1841 | } else { | ||
1842 | |||
1843 | /* | ||
1844 | * In case of a incomplete request | ||
1845 | * prepare it again and resend. | ||
1846 | */ | ||
1847 | mmc_blk_rw_rq_prep(mq_rq, card, | ||
1848 | disable_multi, mq); | ||
1849 | mmc_start_req(card->host, | ||
1850 | &mq_rq->mmc_active, NULL); | ||
1851 | } | ||
1450 | } | 1852 | } |
1451 | } while (ret); | 1853 | } while (ret); |
1452 | 1854 | ||
1453 | return 1; | 1855 | return 1; |
1454 | 1856 | ||
1455 | cmd_abort: | 1857 | cmd_abort: |
1456 | if (mmc_card_removed(card)) | 1858 | if (mmc_packed_cmd(mq_rq->cmd_type)) { |
1457 | req->cmd_flags |= REQ_QUIET; | 1859 | mmc_blk_abort_packed_req(mq_rq); |
1458 | while (ret) | 1860 | } else { |
1459 | ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); | 1861 | if (mmc_card_removed(card)) |
1862 | req->cmd_flags |= REQ_QUIET; | ||
1863 | while (ret) | ||
1864 | ret = blk_end_request(req, -EIO, | ||
1865 | blk_rq_cur_bytes(req)); | ||
1866 | } | ||
1460 | 1867 | ||
1461 | start_new_req: | 1868 | start_new_req: |
1462 | if (rqc) { | 1869 | if (rqc) { |
1463 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | 1870 | if (mmc_card_removed(card)) { |
1464 | mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); | 1871 | rqc->cmd_flags |= REQ_QUIET; |
1872 | blk_end_request_all(rqc, -EIO); | ||
1873 | } else { | ||
1874 | /* | ||
1875 | * If current request is packed, it needs to put back. | ||
1876 | */ | ||
1877 | if (mmc_packed_cmd(mq->mqrq_cur->cmd_type)) | ||
1878 | mmc_blk_revert_packed_req(mq, mq->mqrq_cur); | ||
1879 | |||
1880 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | ||
1881 | mmc_start_req(card->host, | ||
1882 | &mq->mqrq_cur->mmc_active, NULL); | ||
1883 | } | ||
1465 | } | 1884 | } |
1466 | 1885 | ||
1467 | return 0; | 1886 | return 0; |
@@ -1472,6 +1891,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1472 | int ret; | 1891 | int ret; |
1473 | struct mmc_blk_data *md = mq->data; | 1892 | struct mmc_blk_data *md = mq->data; |
1474 | struct mmc_card *card = md->queue.card; | 1893 | struct mmc_card *card = md->queue.card; |
1894 | struct mmc_host *host = card->host; | ||
1895 | unsigned long flags; | ||
1475 | 1896 | ||
1476 | if (req && !mq->mqrq_prev->req) | 1897 | if (req && !mq->mqrq_prev->req) |
1477 | /* claim host only for the first request */ | 1898 | /* claim host only for the first request */ |
@@ -1486,6 +1907,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1486 | goto out; | 1907 | goto out; |
1487 | } | 1908 | } |
1488 | 1909 | ||
1910 | mq->flags &= ~MMC_QUEUE_NEW_REQUEST; | ||
1489 | if (req && req->cmd_flags & REQ_DISCARD) { | 1911 | if (req && req->cmd_flags & REQ_DISCARD) { |
1490 | /* complete ongoing async transfer before issuing discard */ | 1912 | /* complete ongoing async transfer before issuing discard */ |
1491 | if (card->host->areq) | 1913 | if (card->host->areq) |
@@ -1501,11 +1923,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1501 | mmc_blk_issue_rw_rq(mq, NULL); | 1923 | mmc_blk_issue_rw_rq(mq, NULL); |
1502 | ret = mmc_blk_issue_flush(mq, req); | 1924 | ret = mmc_blk_issue_flush(mq, req); |
1503 | } else { | 1925 | } else { |
1926 | if (!req && host->areq) { | ||
1927 | spin_lock_irqsave(&host->context_info.lock, flags); | ||
1928 | host->context_info.is_waiting_last_req = true; | ||
1929 | spin_unlock_irqrestore(&host->context_info.lock, flags); | ||
1930 | } | ||
1504 | ret = mmc_blk_issue_rw_rq(mq, req); | 1931 | ret = mmc_blk_issue_rw_rq(mq, req); |
1505 | } | 1932 | } |
1506 | 1933 | ||
1507 | out: | 1934 | out: |
1508 | if (!req) | 1935 | if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) |
1509 | /* release host only when there are no more requests */ | 1936 | /* release host only when there are no more requests */ |
1510 | mmc_release_host(card->host); | 1937 | mmc_release_host(card->host); |
1511 | return ret; | 1938 | return ret; |
@@ -1624,6 +2051,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, | |||
1624 | blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); | 2051 | blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); |
1625 | } | 2052 | } |
1626 | 2053 | ||
2054 | if (mmc_card_mmc(card) && | ||
2055 | (area_type == MMC_BLK_DATA_AREA_MAIN) && | ||
2056 | (md->flags & MMC_BLK_CMD23) && | ||
2057 | card->ext_csd.packed_event_en) { | ||
2058 | if (!mmc_packed_init(&md->queue, card)) | ||
2059 | md->flags |= MMC_BLK_PACKED_CMD; | ||
2060 | } | ||
2061 | |||
1627 | return md; | 2062 | return md; |
1628 | 2063 | ||
1629 | err_putdisk: | 2064 | err_putdisk: |
@@ -1732,6 +2167,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) | |||
1732 | 2167 | ||
1733 | /* Then flush out any already in there */ | 2168 | /* Then flush out any already in there */ |
1734 | mmc_cleanup_queue(&md->queue); | 2169 | mmc_cleanup_queue(&md->queue); |
2170 | if (md->flags & MMC_BLK_PACKED_CMD) | ||
2171 | mmc_packed_clean(&md->queue); | ||
1735 | mmc_blk_put(md); | 2172 | mmc_blk_put(md); |
1736 | } | 2173 | } |
1737 | } | 2174 | } |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index fadf52eb5d70..fa4e44ee7961 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -22,7 +22,8 @@ | |||
22 | 22 | ||
23 | #define MMC_QUEUE_BOUNCESZ 65536 | 23 | #define MMC_QUEUE_BOUNCESZ 65536 |
24 | 24 | ||
25 | #define MMC_QUEUE_SUSPENDED (1 << 0) | 25 | |
26 | #define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH) | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * Prepare a MMC request. This just filters out odd stuff. | 29 | * Prepare a MMC request. This just filters out odd stuff. |
@@ -58,6 +59,7 @@ static int mmc_queue_thread(void *d) | |||
58 | do { | 59 | do { |
59 | struct request *req = NULL; | 60 | struct request *req = NULL; |
60 | struct mmc_queue_req *tmp; | 61 | struct mmc_queue_req *tmp; |
62 | unsigned int cmd_flags = 0; | ||
61 | 63 | ||
62 | spin_lock_irq(q->queue_lock); | 64 | spin_lock_irq(q->queue_lock); |
63 | set_current_state(TASK_INTERRUPTIBLE); | 65 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -67,12 +69,23 @@ static int mmc_queue_thread(void *d) | |||
67 | 69 | ||
68 | if (req || mq->mqrq_prev->req) { | 70 | if (req || mq->mqrq_prev->req) { |
69 | set_current_state(TASK_RUNNING); | 71 | set_current_state(TASK_RUNNING); |
72 | cmd_flags = req ? req->cmd_flags : 0; | ||
70 | mq->issue_fn(mq, req); | 73 | mq->issue_fn(mq, req); |
74 | if (mq->flags & MMC_QUEUE_NEW_REQUEST) { | ||
75 | mq->flags &= ~MMC_QUEUE_NEW_REQUEST; | ||
76 | continue; /* fetch again */ | ||
77 | } | ||
71 | 78 | ||
72 | /* | 79 | /* |
73 | * Current request becomes previous request | 80 | * Current request becomes previous request |
74 | * and vice versa. | 81 | * and vice versa. |
82 | * In case of special requests, current request | ||
83 | * has been finished. Do not assign it to previous | ||
84 | * request. | ||
75 | */ | 85 | */ |
86 | if (cmd_flags & MMC_REQ_SPECIAL_MASK) | ||
87 | mq->mqrq_cur->req = NULL; | ||
88 | |||
76 | mq->mqrq_prev->brq.mrq.data = NULL; | 89 | mq->mqrq_prev->brq.mrq.data = NULL; |
77 | mq->mqrq_prev->req = NULL; | 90 | mq->mqrq_prev->req = NULL; |
78 | tmp = mq->mqrq_prev; | 91 | tmp = mq->mqrq_prev; |
@@ -103,6 +116,8 @@ static void mmc_request_fn(struct request_queue *q) | |||
103 | { | 116 | { |
104 | struct mmc_queue *mq = q->queuedata; | 117 | struct mmc_queue *mq = q->queuedata; |
105 | struct request *req; | 118 | struct request *req; |
119 | unsigned long flags; | ||
120 | struct mmc_context_info *cntx; | ||
106 | 121 | ||
107 | if (!mq) { | 122 | if (!mq) { |
108 | while ((req = blk_fetch_request(q)) != NULL) { | 123 | while ((req = blk_fetch_request(q)) != NULL) { |
@@ -112,7 +127,20 @@ static void mmc_request_fn(struct request_queue *q) | |||
112 | return; | 127 | return; |
113 | } | 128 | } |
114 | 129 | ||
115 | if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) | 130 | cntx = &mq->card->host->context_info; |
131 | if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { | ||
132 | /* | ||
133 | * New MMC request arrived when MMC thread may be | ||
134 | * blocked on the previous request to be complete | ||
135 | * with no current request fetched | ||
136 | */ | ||
137 | spin_lock_irqsave(&cntx->lock, flags); | ||
138 | if (cntx->is_waiting_last_req) { | ||
139 | cntx->is_new_req = true; | ||
140 | wake_up_interruptible(&cntx->wait); | ||
141 | } | ||
142 | spin_unlock_irqrestore(&cntx->lock, flags); | ||
143 | } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) | ||
116 | wake_up_process(mq->thread); | 144 | wake_up_process(mq->thread); |
117 | } | 145 | } |
118 | 146 | ||
@@ -334,6 +362,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | |||
334 | } | 362 | } |
335 | EXPORT_SYMBOL(mmc_cleanup_queue); | 363 | EXPORT_SYMBOL(mmc_cleanup_queue); |
336 | 364 | ||
365 | int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) | ||
366 | { | ||
367 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; | ||
368 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; | ||
369 | int ret = 0; | ||
370 | |||
371 | |||
372 | mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); | ||
373 | if (!mqrq_cur->packed) { | ||
374 | pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n", | ||
375 | mmc_card_name(card)); | ||
376 | ret = -ENOMEM; | ||
377 | goto out; | ||
378 | } | ||
379 | |||
380 | mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); | ||
381 | if (!mqrq_prev->packed) { | ||
382 | pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n", | ||
383 | mmc_card_name(card)); | ||
384 | kfree(mqrq_cur->packed); | ||
385 | mqrq_cur->packed = NULL; | ||
386 | ret = -ENOMEM; | ||
387 | goto out; | ||
388 | } | ||
389 | |||
390 | INIT_LIST_HEAD(&mqrq_cur->packed->list); | ||
391 | INIT_LIST_HEAD(&mqrq_prev->packed->list); | ||
392 | |||
393 | out: | ||
394 | return ret; | ||
395 | } | ||
396 | |||
397 | void mmc_packed_clean(struct mmc_queue *mq) | ||
398 | { | ||
399 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; | ||
400 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; | ||
401 | |||
402 | kfree(mqrq_cur->packed); | ||
403 | mqrq_cur->packed = NULL; | ||
404 | kfree(mqrq_prev->packed); | ||
405 | mqrq_prev->packed = NULL; | ||
406 | } | ||
407 | |||
337 | /** | 408 | /** |
338 | * mmc_queue_suspend - suspend a MMC request queue | 409 | * mmc_queue_suspend - suspend a MMC request queue |
339 | * @mq: MMC queue to suspend | 410 | * @mq: MMC queue to suspend |
@@ -378,6 +449,41 @@ void mmc_queue_resume(struct mmc_queue *mq) | |||
378 | } | 449 | } |
379 | } | 450 | } |
380 | 451 | ||
452 | static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, | ||
453 | struct mmc_packed *packed, | ||
454 | struct scatterlist *sg, | ||
455 | enum mmc_packed_type cmd_type) | ||
456 | { | ||
457 | struct scatterlist *__sg = sg; | ||
458 | unsigned int sg_len = 0; | ||
459 | struct request *req; | ||
460 | |||
461 | if (mmc_packed_wr(cmd_type)) { | ||
462 | unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512; | ||
463 | unsigned int max_seg_sz = queue_max_segment_size(mq->queue); | ||
464 | unsigned int len, remain, offset = 0; | ||
465 | u8 *buf = (u8 *)packed->cmd_hdr; | ||
466 | |||
467 | remain = hdr_sz; | ||
468 | do { | ||
469 | len = min(remain, max_seg_sz); | ||
470 | sg_set_buf(__sg, buf + offset, len); | ||
471 | offset += len; | ||
472 | remain -= len; | ||
473 | (__sg++)->page_link &= ~0x02; | ||
474 | sg_len++; | ||
475 | } while (remain); | ||
476 | } | ||
477 | |||
478 | list_for_each_entry(req, &packed->list, queuelist) { | ||
479 | sg_len += blk_rq_map_sg(mq->queue, req, __sg); | ||
480 | __sg = sg + (sg_len - 1); | ||
481 | (__sg++)->page_link &= ~0x02; | ||
482 | } | ||
483 | sg_mark_end(sg + (sg_len - 1)); | ||
484 | return sg_len; | ||
485 | } | ||
486 | |||
381 | /* | 487 | /* |
382 | * Prepare the sg list(s) to be handed of to the host driver | 488 | * Prepare the sg list(s) to be handed of to the host driver |
383 | */ | 489 | */ |
@@ -386,14 +492,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) | |||
386 | unsigned int sg_len; | 492 | unsigned int sg_len; |
387 | size_t buflen; | 493 | size_t buflen; |
388 | struct scatterlist *sg; | 494 | struct scatterlist *sg; |
495 | enum mmc_packed_type cmd_type; | ||
389 | int i; | 496 | int i; |
390 | 497 | ||
391 | if (!mqrq->bounce_buf) | 498 | cmd_type = mqrq->cmd_type; |
392 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); | 499 | |
500 | if (!mqrq->bounce_buf) { | ||
501 | if (mmc_packed_cmd(cmd_type)) | ||
502 | return mmc_queue_packed_map_sg(mq, mqrq->packed, | ||
503 | mqrq->sg, cmd_type); | ||
504 | else | ||
505 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); | ||
506 | } | ||
393 | 507 | ||
394 | BUG_ON(!mqrq->bounce_sg); | 508 | BUG_ON(!mqrq->bounce_sg); |
395 | 509 | ||
396 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); | 510 | if (mmc_packed_cmd(cmd_type)) |
511 | sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed, | ||
512 | mqrq->bounce_sg, cmd_type); | ||
513 | else | ||
514 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); | ||
397 | 515 | ||
398 | mqrq->bounce_sg_len = sg_len; | 516 | mqrq->bounce_sg_len = sg_len; |
399 | 517 | ||
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index d2a1eb4b9f9f..031bf6376c99 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h | |||
@@ -12,6 +12,23 @@ struct mmc_blk_request { | |||
12 | struct mmc_data data; | 12 | struct mmc_data data; |
13 | }; | 13 | }; |
14 | 14 | ||
15 | enum mmc_packed_type { | ||
16 | MMC_PACKED_NONE = 0, | ||
17 | MMC_PACKED_WRITE, | ||
18 | }; | ||
19 | |||
20 | #define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE) | ||
21 | #define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE) | ||
22 | |||
23 | struct mmc_packed { | ||
24 | struct list_head list; | ||
25 | u32 cmd_hdr[1024]; | ||
26 | unsigned int blocks; | ||
27 | u8 nr_entries; | ||
28 | u8 retries; | ||
29 | s16 idx_failure; | ||
30 | }; | ||
31 | |||
15 | struct mmc_queue_req { | 32 | struct mmc_queue_req { |
16 | struct request *req; | 33 | struct request *req; |
17 | struct mmc_blk_request brq; | 34 | struct mmc_blk_request brq; |
@@ -20,6 +37,8 @@ struct mmc_queue_req { | |||
20 | struct scatterlist *bounce_sg; | 37 | struct scatterlist *bounce_sg; |
21 | unsigned int bounce_sg_len; | 38 | unsigned int bounce_sg_len; |
22 | struct mmc_async_req mmc_active; | 39 | struct mmc_async_req mmc_active; |
40 | enum mmc_packed_type cmd_type; | ||
41 | struct mmc_packed *packed; | ||
23 | }; | 42 | }; |
24 | 43 | ||
25 | struct mmc_queue { | 44 | struct mmc_queue { |
@@ -27,6 +46,9 @@ struct mmc_queue { | |||
27 | struct task_struct *thread; | 46 | struct task_struct *thread; |
28 | struct semaphore thread_sem; | 47 | struct semaphore thread_sem; |
29 | unsigned int flags; | 48 | unsigned int flags; |
49 | #define MMC_QUEUE_SUSPENDED (1 << 0) | ||
50 | #define MMC_QUEUE_NEW_REQUEST (1 << 1) | ||
51 | |||
30 | int (*issue_fn)(struct mmc_queue *, struct request *); | 52 | int (*issue_fn)(struct mmc_queue *, struct request *); |
31 | void *data; | 53 | void *data; |
32 | struct request_queue *queue; | 54 | struct request_queue *queue; |
@@ -46,4 +68,7 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *, | |||
46 | extern void mmc_queue_bounce_pre(struct mmc_queue_req *); | 68 | extern void mmc_queue_bounce_pre(struct mmc_queue_req *); |
47 | extern void mmc_queue_bounce_post(struct mmc_queue_req *); | 69 | extern void mmc_queue_bounce_post(struct mmc_queue_req *); |
48 | 70 | ||
71 | extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); | ||
72 | extern void mmc_packed_clean(struct mmc_queue *); | ||
73 | |||
49 | #endif | 74 | #endif |
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 420cb6753c1e..e219c97a02a4 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c | |||
@@ -321,6 +321,7 @@ int mmc_add_card(struct mmc_card *card) | |||
321 | #ifdef CONFIG_DEBUG_FS | 321 | #ifdef CONFIG_DEBUG_FS |
322 | mmc_add_card_debugfs(card); | 322 | mmc_add_card_debugfs(card); |
323 | #endif | 323 | #endif |
324 | mmc_init_context_info(card->host); | ||
324 | 325 | ||
325 | ret = device_add(&card->dev); | 326 | ret = device_add(&card->dev); |
326 | if (ret) | 327 | if (ret) |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index aaed7687cf09..08a3cf2a7610 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -319,11 +319,45 @@ out: | |||
319 | } | 319 | } |
320 | EXPORT_SYMBOL(mmc_start_bkops); | 320 | EXPORT_SYMBOL(mmc_start_bkops); |
321 | 321 | ||
322 | /* | ||
323 | * mmc_wait_data_done() - done callback for data request | ||
324 | * @mrq: done data request | ||
325 | * | ||
326 | * Wakes up mmc context, passed as a callback to host controller driver | ||
327 | */ | ||
328 | static void mmc_wait_data_done(struct mmc_request *mrq) | ||
329 | { | ||
330 | mrq->host->context_info.is_done_rcv = true; | ||
331 | wake_up_interruptible(&mrq->host->context_info.wait); | ||
332 | } | ||
333 | |||
322 | static void mmc_wait_done(struct mmc_request *mrq) | 334 | static void mmc_wait_done(struct mmc_request *mrq) |
323 | { | 335 | { |
324 | complete(&mrq->completion); | 336 | complete(&mrq->completion); |
325 | } | 337 | } |
326 | 338 | ||
339 | /* | ||
340 | *__mmc_start_data_req() - starts data request | ||
341 | * @host: MMC host to start the request | ||
342 | * @mrq: data request to start | ||
343 | * | ||
344 | * Sets the done callback to be called when request is completed by the card. | ||
345 | * Starts data mmc request execution | ||
346 | */ | ||
347 | static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) | ||
348 | { | ||
349 | mrq->done = mmc_wait_data_done; | ||
350 | mrq->host = host; | ||
351 | if (mmc_card_removed(host->card)) { | ||
352 | mrq->cmd->error = -ENOMEDIUM; | ||
353 | mmc_wait_data_done(mrq); | ||
354 | return -ENOMEDIUM; | ||
355 | } | ||
356 | mmc_start_request(host, mrq); | ||
357 | |||
358 | return 0; | ||
359 | } | ||
360 | |||
327 | static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) | 361 | static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) |
328 | { | 362 | { |
329 | init_completion(&mrq->completion); | 363 | init_completion(&mrq->completion); |
@@ -337,6 +371,62 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) | |||
337 | return 0; | 371 | return 0; |
338 | } | 372 | } |
339 | 373 | ||
374 | /* | ||
375 | * mmc_wait_for_data_req_done() - wait for request completed | ||
376 | * @host: MMC host to prepare the command. | ||
377 | * @mrq: MMC request to wait for | ||
378 | * | ||
379 | * Blocks MMC context till host controller will ack end of data request | ||
380 | * execution or new request notification arrives from the block layer. | ||
381 | * Handles command retries. | ||
382 | * | ||
383 | * Returns enum mmc_blk_status after checking errors. | ||
384 | */ | ||
385 | static int mmc_wait_for_data_req_done(struct mmc_host *host, | ||
386 | struct mmc_request *mrq, | ||
387 | struct mmc_async_req *next_req) | ||
388 | { | ||
389 | struct mmc_command *cmd; | ||
390 | struct mmc_context_info *context_info = &host->context_info; | ||
391 | int err; | ||
392 | unsigned long flags; | ||
393 | |||
394 | while (1) { | ||
395 | wait_event_interruptible(context_info->wait, | ||
396 | (context_info->is_done_rcv || | ||
397 | context_info->is_new_req)); | ||
398 | spin_lock_irqsave(&context_info->lock, flags); | ||
399 | context_info->is_waiting_last_req = false; | ||
400 | spin_unlock_irqrestore(&context_info->lock, flags); | ||
401 | if (context_info->is_done_rcv) { | ||
402 | context_info->is_done_rcv = false; | ||
403 | context_info->is_new_req = false; | ||
404 | cmd = mrq->cmd; | ||
405 | if (!cmd->error || !cmd->retries || | ||
406 | mmc_card_removed(host->card)) { | ||
407 | err = host->areq->err_check(host->card, | ||
408 | host->areq); | ||
409 | break; /* return err */ | ||
410 | } else { | ||
411 | pr_info("%s: req failed (CMD%u): %d, retrying...\n", | ||
412 | mmc_hostname(host), | ||
413 | cmd->opcode, cmd->error); | ||
414 | cmd->retries--; | ||
415 | cmd->error = 0; | ||
416 | host->ops->request(host, mrq); | ||
417 | continue; /* wait for done/new event again */ | ||
418 | } | ||
419 | } else if (context_info->is_new_req) { | ||
420 | context_info->is_new_req = false; | ||
421 | if (!next_req) { | ||
422 | err = MMC_BLK_NEW_REQUEST; | ||
423 | break; /* return err */ | ||
424 | } | ||
425 | } | ||
426 | } | ||
427 | return err; | ||
428 | } | ||
429 | |||
340 | static void mmc_wait_for_req_done(struct mmc_host *host, | 430 | static void mmc_wait_for_req_done(struct mmc_host *host, |
341 | struct mmc_request *mrq) | 431 | struct mmc_request *mrq) |
342 | { | 432 | { |
@@ -426,8 +516,16 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host, | |||
426 | mmc_pre_req(host, areq->mrq, !host->areq); | 516 | mmc_pre_req(host, areq->mrq, !host->areq); |
427 | 517 | ||
428 | if (host->areq) { | 518 | if (host->areq) { |
429 | mmc_wait_for_req_done(host, host->areq->mrq); | 519 | err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq); |
430 | err = host->areq->err_check(host->card, host->areq); | 520 | if (err == MMC_BLK_NEW_REQUEST) { |
521 | if (error) | ||
522 | *error = err; | ||
523 | /* | ||
524 | * The previous request was not completed, | ||
525 | * nothing to return | ||
526 | */ | ||
527 | return NULL; | ||
528 | } | ||
431 | /* | 529 | /* |
432 | * Check BKOPS urgency for each R1 response | 530 | * Check BKOPS urgency for each R1 response |
433 | */ | 531 | */ |
@@ -439,14 +537,14 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host, | |||
439 | } | 537 | } |
440 | 538 | ||
441 | if (!err && areq) | 539 | if (!err && areq) |
442 | start_err = __mmc_start_req(host, areq->mrq); | 540 | start_err = __mmc_start_data_req(host, areq->mrq); |
443 | 541 | ||
444 | if (host->areq) | 542 | if (host->areq) |
445 | mmc_post_req(host, host->areq->mrq, 0); | 543 | mmc_post_req(host, host->areq->mrq, 0); |
446 | 544 | ||
447 | /* Cancel a prepared request if it was not started. */ | 545 | /* Cancel a prepared request if it was not started. */ |
448 | if ((err || start_err) && areq) | 546 | if ((err || start_err) && areq) |
449 | mmc_post_req(host, areq->mrq, -EINVAL); | 547 | mmc_post_req(host, areq->mrq, -EINVAL); |
450 | 548 | ||
451 | if (err) | 549 | if (err) |
452 | host->areq = NULL; | 550 | host->areq = NULL; |
@@ -1137,7 +1235,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, | |||
1137 | */ | 1235 | */ |
1138 | voltage = regulator_get_voltage(supply); | 1236 | voltage = regulator_get_voltage(supply); |
1139 | 1237 | ||
1140 | if (regulator_count_voltages(supply) == 1) | 1238 | if (!regulator_can_change_voltage(supply)) |
1141 | min_uV = max_uV = voltage; | 1239 | min_uV = max_uV = voltage; |
1142 | 1240 | ||
1143 | if (voltage < 0) | 1241 | if (voltage < 0) |
@@ -1219,10 +1317,30 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) | |||
1219 | return ocr; | 1317 | return ocr; |
1220 | } | 1318 | } |
1221 | 1319 | ||
1222 | int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11) | 1320 | int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) |
1321 | { | ||
1322 | int err = 0; | ||
1323 | int old_signal_voltage = host->ios.signal_voltage; | ||
1324 | |||
1325 | host->ios.signal_voltage = signal_voltage; | ||
1326 | if (host->ops->start_signal_voltage_switch) { | ||
1327 | mmc_host_clk_hold(host); | ||
1328 | err = host->ops->start_signal_voltage_switch(host, &host->ios); | ||
1329 | mmc_host_clk_release(host); | ||
1330 | } | ||
1331 | |||
1332 | if (err) | ||
1333 | host->ios.signal_voltage = old_signal_voltage; | ||
1334 | |||
1335 | return err; | ||
1336 | |||
1337 | } | ||
1338 | |||
1339 | int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) | ||
1223 | { | 1340 | { |
1224 | struct mmc_command cmd = {0}; | 1341 | struct mmc_command cmd = {0}; |
1225 | int err = 0; | 1342 | int err = 0; |
1343 | u32 clock; | ||
1226 | 1344 | ||
1227 | BUG_ON(!host); | 1345 | BUG_ON(!host); |
1228 | 1346 | ||
@@ -1230,27 +1348,81 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11 | |||
1230 | * Send CMD11 only if the request is to switch the card to | 1348 | * Send CMD11 only if the request is to switch the card to |
1231 | * 1.8V signalling. | 1349 | * 1.8V signalling. |
1232 | */ | 1350 | */ |
1233 | if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) { | 1351 | if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) |
1234 | cmd.opcode = SD_SWITCH_VOLTAGE; | 1352 | return __mmc_set_signal_voltage(host, signal_voltage); |
1235 | cmd.arg = 0; | ||
1236 | cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
1237 | 1353 | ||
1238 | err = mmc_wait_for_cmd(host, &cmd, 0); | 1354 | /* |
1239 | if (err) | 1355 | * If we cannot switch voltages, return failure so the caller |
1240 | return err; | 1356 | * can continue without UHS mode |
1357 | */ | ||
1358 | if (!host->ops->start_signal_voltage_switch) | ||
1359 | return -EPERM; | ||
1360 | if (!host->ops->card_busy) | ||
1361 | pr_warning("%s: cannot verify signal voltage switch\n", | ||
1362 | mmc_hostname(host)); | ||
1363 | |||
1364 | cmd.opcode = SD_SWITCH_VOLTAGE; | ||
1365 | cmd.arg = 0; | ||
1366 | cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
1241 | 1367 | ||
1242 | if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) | 1368 | err = mmc_wait_for_cmd(host, &cmd, 0); |
1243 | return -EIO; | 1369 | if (err) |
1370 | return err; | ||
1371 | |||
1372 | if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) | ||
1373 | return -EIO; | ||
1374 | |||
1375 | mmc_host_clk_hold(host); | ||
1376 | /* | ||
1377 | * The card should drive cmd and dat[0:3] low immediately | ||
1378 | * after the response of cmd11, but wait 1 ms to be sure | ||
1379 | */ | ||
1380 | mmc_delay(1); | ||
1381 | if (host->ops->card_busy && !host->ops->card_busy(host)) { | ||
1382 | err = -EAGAIN; | ||
1383 | goto power_cycle; | ||
1244 | } | 1384 | } |
1385 | /* | ||
1386 | * During a signal voltage level switch, the clock must be gated | ||
1387 | * for 5 ms according to the SD spec | ||
1388 | */ | ||
1389 | clock = host->ios.clock; | ||
1390 | host->ios.clock = 0; | ||
1391 | mmc_set_ios(host); | ||
1245 | 1392 | ||
1246 | host->ios.signal_voltage = signal_voltage; | 1393 | if (__mmc_set_signal_voltage(host, signal_voltage)) { |
1394 | /* | ||
1395 | * Voltages may not have been switched, but we've already | ||
1396 | * sent CMD11, so a power cycle is required anyway | ||
1397 | */ | ||
1398 | err = -EAGAIN; | ||
1399 | goto power_cycle; | ||
1400 | } | ||
1247 | 1401 | ||
1248 | if (host->ops->start_signal_voltage_switch) { | 1402 | /* Keep clock gated for at least 5 ms */ |
1249 | mmc_host_clk_hold(host); | 1403 | mmc_delay(5); |
1250 | err = host->ops->start_signal_voltage_switch(host, &host->ios); | 1404 | host->ios.clock = clock; |
1251 | mmc_host_clk_release(host); | 1405 | mmc_set_ios(host); |
1406 | |||
1407 | /* Wait for at least 1 ms according to spec */ | ||
1408 | mmc_delay(1); | ||
1409 | |||
1410 | /* | ||
1411 | * Failure to switch is indicated by the card holding | ||
1412 | * dat[0:3] low | ||
1413 | */ | ||
1414 | if (host->ops->card_busy && host->ops->card_busy(host)) | ||
1415 | err = -EAGAIN; | ||
1416 | |||
1417 | power_cycle: | ||
1418 | if (err) { | ||
1419 | pr_debug("%s: Signal voltage switch failed, " | ||
1420 | "power cycling card\n", mmc_hostname(host)); | ||
1421 | mmc_power_cycle(host); | ||
1252 | } | 1422 | } |
1253 | 1423 | ||
1424 | mmc_host_clk_release(host); | ||
1425 | |||
1254 | return err; | 1426 | return err; |
1255 | } | 1427 | } |
1256 | 1428 | ||
@@ -1314,7 +1486,7 @@ static void mmc_power_up(struct mmc_host *host) | |||
1314 | mmc_set_ios(host); | 1486 | mmc_set_ios(host); |
1315 | 1487 | ||
1316 | /* Set signal voltage to 3.3V */ | 1488 | /* Set signal voltage to 3.3V */ |
1317 | mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false); | 1489 | __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330); |
1318 | 1490 | ||
1319 | /* | 1491 | /* |
1320 | * This delay should be sufficient to allow the power supply | 1492 | * This delay should be sufficient to allow the power supply |
@@ -1372,6 +1544,14 @@ void mmc_power_off(struct mmc_host *host) | |||
1372 | mmc_host_clk_release(host); | 1544 | mmc_host_clk_release(host); |
1373 | } | 1545 | } |
1374 | 1546 | ||
1547 | void mmc_power_cycle(struct mmc_host *host) | ||
1548 | { | ||
1549 | mmc_power_off(host); | ||
1550 | /* Wait at least 1 ms according to SD spec */ | ||
1551 | mmc_delay(1); | ||
1552 | mmc_power_up(host); | ||
1553 | } | ||
1554 | |||
1375 | /* | 1555 | /* |
1376 | * Cleanup when the last reference to the bus operator is dropped. | 1556 | * Cleanup when the last reference to the bus operator is dropped. |
1377 | */ | 1557 | */ |
@@ -2388,6 +2568,7 @@ EXPORT_SYMBOL(mmc_flush_cache); | |||
2388 | * Turn the cache ON/OFF. | 2568 | * Turn the cache ON/OFF. |
2389 | * Turning the cache OFF shall trigger flushing of the data | 2569 | * Turning the cache OFF shall trigger flushing of the data |
2390 | * to the non-volatile storage. | 2570 | * to the non-volatile storage. |
2571 | * This function should be called with host claimed | ||
2391 | */ | 2572 | */ |
2392 | int mmc_cache_ctrl(struct mmc_host *host, u8 enable) | 2573 | int mmc_cache_ctrl(struct mmc_host *host, u8 enable) |
2393 | { | 2574 | { |
@@ -2399,7 +2580,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable) | |||
2399 | mmc_card_is_removable(host)) | 2580 | mmc_card_is_removable(host)) |
2400 | return err; | 2581 | return err; |
2401 | 2582 | ||
2402 | mmc_claim_host(host); | ||
2403 | if (card && mmc_card_mmc(card) && | 2583 | if (card && mmc_card_mmc(card) && |
2404 | (card->ext_csd.cache_size > 0)) { | 2584 | (card->ext_csd.cache_size > 0)) { |
2405 | enable = !!enable; | 2585 | enable = !!enable; |
@@ -2417,7 +2597,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable) | |||
2417 | card->ext_csd.cache_ctrl = enable; | 2597 | card->ext_csd.cache_ctrl = enable; |
2418 | } | 2598 | } |
2419 | } | 2599 | } |
2420 | mmc_release_host(host); | ||
2421 | 2600 | ||
2422 | return err; | 2601 | return err; |
2423 | } | 2602 | } |
@@ -2436,10 +2615,6 @@ int mmc_suspend_host(struct mmc_host *host) | |||
2436 | cancel_delayed_work(&host->detect); | 2615 | cancel_delayed_work(&host->detect); |
2437 | mmc_flush_scheduled_work(); | 2616 | mmc_flush_scheduled_work(); |
2438 | 2617 | ||
2439 | err = mmc_cache_ctrl(host, 0); | ||
2440 | if (err) | ||
2441 | goto out; | ||
2442 | |||
2443 | mmc_bus_get(host); | 2618 | mmc_bus_get(host); |
2444 | if (host->bus_ops && !host->bus_dead) { | 2619 | if (host->bus_ops && !host->bus_dead) { |
2445 | if (host->bus_ops->suspend) { | 2620 | if (host->bus_ops->suspend) { |
@@ -2581,6 +2756,23 @@ int mmc_pm_notify(struct notifier_block *notify_block, | |||
2581 | } | 2756 | } |
2582 | #endif | 2757 | #endif |
2583 | 2758 | ||
2759 | /** | ||
2760 | * mmc_init_context_info() - init synchronization context | ||
2761 | * @host: mmc host | ||
2762 | * | ||
2763 | * Init struct context_info needed to implement asynchronous | ||
2764 | * request mechanism, used by mmc core, host driver and mmc requests | ||
2765 | * supplier. | ||
2766 | */ | ||
2767 | void mmc_init_context_info(struct mmc_host *host) | ||
2768 | { | ||
2769 | spin_lock_init(&host->context_info.lock); | ||
2770 | host->context_info.is_new_req = false; | ||
2771 | host->context_info.is_done_rcv = false; | ||
2772 | host->context_info.is_waiting_last_req = false; | ||
2773 | init_waitqueue_head(&host->context_info.wait); | ||
2774 | } | ||
2775 | |||
2584 | static int __init mmc_init(void) | 2776 | static int __init mmc_init(void) |
2585 | { | 2777 | { |
2586 | int ret; | 2778 | int ret; |
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 3bdafbca354f..b9f18a2a8874 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h | |||
@@ -40,11 +40,12 @@ void mmc_set_ungated(struct mmc_host *host); | |||
40 | void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); | 40 | void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); |
41 | void mmc_set_bus_width(struct mmc_host *host, unsigned int width); | 41 | void mmc_set_bus_width(struct mmc_host *host, unsigned int width); |
42 | u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); | 42 | u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); |
43 | int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, | 43 | int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); |
44 | bool cmd11); | 44 | int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); |
45 | void mmc_set_timing(struct mmc_host *host, unsigned int timing); | 45 | void mmc_set_timing(struct mmc_host *host, unsigned int timing); |
46 | void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); | 46 | void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); |
47 | void mmc_power_off(struct mmc_host *host); | 47 | void mmc_power_off(struct mmc_host *host); |
48 | void mmc_power_cycle(struct mmc_host *host); | ||
48 | 49 | ||
49 | static inline void mmc_delay(unsigned int ms) | 50 | static inline void mmc_delay(unsigned int ms) |
50 | { | 51 | { |
@@ -76,5 +77,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host); | |||
76 | void mmc_add_card_debugfs(struct mmc_card *card); | 77 | void mmc_add_card_debugfs(struct mmc_card *card); |
77 | void mmc_remove_card_debugfs(struct mmc_card *card); | 78 | void mmc_remove_card_debugfs(struct mmc_card *card); |
78 | 79 | ||
80 | void mmc_init_context_info(struct mmc_host *host); | ||
79 | #endif | 81 | #endif |
80 | 82 | ||
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index ee2e16b17017..821cd8224137 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/idr.h> | 17 | #include <linux/idr.h> |
18 | #include <linux/of.h> | ||
19 | #include <linux/of_gpio.h> | ||
18 | #include <linux/pagemap.h> | 20 | #include <linux/pagemap.h> |
19 | #include <linux/export.h> | 21 | #include <linux/export.h> |
20 | #include <linux/leds.h> | 22 | #include <linux/leds.h> |
@@ -23,6 +25,7 @@ | |||
23 | 25 | ||
24 | #include <linux/mmc/host.h> | 26 | #include <linux/mmc/host.h> |
25 | #include <linux/mmc/card.h> | 27 | #include <linux/mmc/card.h> |
28 | #include <linux/mmc/slot-gpio.h> | ||
26 | 29 | ||
27 | #include "core.h" | 30 | #include "core.h" |
28 | #include "host.h" | 31 | #include "host.h" |
@@ -295,6 +298,126 @@ static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) | |||
295 | #endif | 298 | #endif |
296 | 299 | ||
297 | /** | 300 | /** |
301 | * mmc_of_parse() - parse host's device-tree node | ||
302 | * @host: host whose node should be parsed. | ||
303 | * | ||
304 | * To keep the rest of the MMC subsystem unaware of whether DT has been | ||
305 | * used to to instantiate and configure this host instance or not, we | ||
306 | * parse the properties and set respective generic mmc-host flags and | ||
307 | * parameters. | ||
308 | */ | ||
309 | void mmc_of_parse(struct mmc_host *host) | ||
310 | { | ||
311 | struct device_node *np; | ||
312 | u32 bus_width; | ||
313 | bool explicit_inv_wp, gpio_inv_wp = false; | ||
314 | enum of_gpio_flags flags; | ||
315 | int len, ret, gpio; | ||
316 | |||
317 | if (!host->parent || !host->parent->of_node) | ||
318 | return; | ||
319 | |||
320 | np = host->parent->of_node; | ||
321 | |||
322 | /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ | ||
323 | if (of_property_read_u32(np, "bus-width", &bus_width) < 0) { | ||
324 | dev_dbg(host->parent, | ||
325 | "\"bus-width\" property is missing, assuming 1 bit.\n"); | ||
326 | bus_width = 1; | ||
327 | } | ||
328 | |||
329 | switch (bus_width) { | ||
330 | case 8: | ||
331 | host->caps |= MMC_CAP_8_BIT_DATA; | ||
332 | /* Hosts capable of 8-bit transfers can also do 4 bits */ | ||
333 | case 4: | ||
334 | host->caps |= MMC_CAP_4_BIT_DATA; | ||
335 | break; | ||
336 | case 1: | ||
337 | break; | ||
338 | default: | ||
339 | dev_err(host->parent, | ||
340 | "Invalid \"bus-width\" value %ud!\n", bus_width); | ||
341 | } | ||
342 | |||
343 | /* f_max is obtained from the optional "max-frequency" property */ | ||
344 | of_property_read_u32(np, "max-frequency", &host->f_max); | ||
345 | |||
346 | /* | ||
347 | * Configure CD and WP pins. They are both by default active low to | ||
348 | * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the | ||
349 | * mmc-gpio helpers are used to attach, configure and use them. If | ||
350 | * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH | ||
351 | * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the | ||
352 | * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability | ||
353 | * is set. If the "non-removable" property is found, the | ||
354 | * MMC_CAP_NONREMOVABLE capability is set and no card-detection | ||
355 | * configuration is performed. | ||
356 | */ | ||
357 | |||
358 | /* Parse Card Detection */ | ||
359 | if (of_find_property(np, "non-removable", &len)) { | ||
360 | host->caps |= MMC_CAP_NONREMOVABLE; | ||
361 | } else { | ||
362 | bool explicit_inv_cd, gpio_inv_cd = false; | ||
363 | |||
364 | explicit_inv_cd = of_property_read_bool(np, "cd-inverted"); | ||
365 | |||
366 | if (of_find_property(np, "broken-cd", &len)) | ||
367 | host->caps |= MMC_CAP_NEEDS_POLL; | ||
368 | |||
369 | gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags); | ||
370 | if (gpio_is_valid(gpio)) { | ||
371 | if (!(flags & OF_GPIO_ACTIVE_LOW)) | ||
372 | gpio_inv_cd = true; | ||
373 | |||
374 | ret = mmc_gpio_request_cd(host, gpio); | ||
375 | if (ret < 0) | ||
376 | dev_err(host->parent, | ||
377 | "Failed to request CD GPIO #%d: %d!\n", | ||
378 | gpio, ret); | ||
379 | else | ||
380 | dev_info(host->parent, "Got CD GPIO #%d.\n", | ||
381 | gpio); | ||
382 | } | ||
383 | |||
384 | if (explicit_inv_cd ^ gpio_inv_cd) | ||
385 | host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; | ||
386 | } | ||
387 | |||
388 | /* Parse Write Protection */ | ||
389 | explicit_inv_wp = of_property_read_bool(np, "wp-inverted"); | ||
390 | |||
391 | gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags); | ||
392 | if (gpio_is_valid(gpio)) { | ||
393 | if (!(flags & OF_GPIO_ACTIVE_LOW)) | ||
394 | gpio_inv_wp = true; | ||
395 | |||
396 | ret = mmc_gpio_request_ro(host, gpio); | ||
397 | if (ret < 0) | ||
398 | dev_err(host->parent, | ||
399 | "Failed to request WP GPIO: %d!\n", ret); | ||
400 | } | ||
401 | if (explicit_inv_wp ^ gpio_inv_wp) | ||
402 | host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; | ||
403 | |||
404 | if (of_find_property(np, "cap-sd-highspeed", &len)) | ||
405 | host->caps |= MMC_CAP_SD_HIGHSPEED; | ||
406 | if (of_find_property(np, "cap-mmc-highspeed", &len)) | ||
407 | host->caps |= MMC_CAP_MMC_HIGHSPEED; | ||
408 | if (of_find_property(np, "cap-power-off-card", &len)) | ||
409 | host->caps |= MMC_CAP_POWER_OFF_CARD; | ||
410 | if (of_find_property(np, "cap-sdio-irq", &len)) | ||
411 | host->caps |= MMC_CAP_SDIO_IRQ; | ||
412 | if (of_find_property(np, "keep-power-in-suspend", &len)) | ||
413 | host->pm_caps |= MMC_PM_KEEP_POWER; | ||
414 | if (of_find_property(np, "enable-sdio-wakeup", &len)) | ||
415 | host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; | ||
416 | } | ||
417 | |||
418 | EXPORT_SYMBOL(mmc_of_parse); | ||
419 | |||
420 | /** | ||
298 | * mmc_alloc_host - initialise the per-host structure. | 421 | * mmc_alloc_host - initialise the per-host structure. |
299 | * @extra: sizeof private data structure | 422 | * @extra: sizeof private data structure |
300 | * @dev: pointer to host device model structure | 423 | * @dev: pointer to host device model structure |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index e6e39111e05b..c8f3d6e0684e 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -496,7 +496,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
496 | * RPMB regions are defined in multiples of 128K. | 496 | * RPMB regions are defined in multiples of 128K. |
497 | */ | 497 | */ |
498 | card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; | 498 | card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; |
499 | if (ext_csd[EXT_CSD_RPMB_MULT]) { | 499 | if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) { |
500 | mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, | 500 | mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, |
501 | EXT_CSD_PART_CONFIG_ACC_RPMB, | 501 | EXT_CSD_PART_CONFIG_ACC_RPMB, |
502 | "rpmb", 0, false, | 502 | "rpmb", 0, false, |
@@ -538,6 +538,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
538 | } else { | 538 | } else { |
539 | card->ext_csd.data_tag_unit_size = 0; | 539 | card->ext_csd.data_tag_unit_size = 0; |
540 | } | 540 | } |
541 | |||
542 | card->ext_csd.max_packed_writes = | ||
543 | ext_csd[EXT_CSD_MAX_PACKED_WRITES]; | ||
544 | card->ext_csd.max_packed_reads = | ||
545 | ext_csd[EXT_CSD_MAX_PACKED_READS]; | ||
541 | } else { | 546 | } else { |
542 | card->ext_csd.data_sector_size = 512; | 547 | card->ext_csd.data_sector_size = 512; |
543 | } | 548 | } |
@@ -769,11 +774,11 @@ static int mmc_select_hs200(struct mmc_card *card) | |||
769 | 774 | ||
770 | if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && | 775 | if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && |
771 | host->caps2 & MMC_CAP2_HS200_1_2V_SDR) | 776 | host->caps2 & MMC_CAP2_HS200_1_2V_SDR) |
772 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0); | 777 | err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); |
773 | 778 | ||
774 | if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V && | 779 | if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V && |
775 | host->caps2 & MMC_CAP2_HS200_1_8V_SDR) | 780 | host->caps2 & MMC_CAP2_HS200_1_8V_SDR) |
776 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0); | 781 | err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); |
777 | 782 | ||
778 | /* If fails try again during next card power cycle */ | 783 | /* If fails try again during next card power cycle */ |
779 | if (err) | 784 | if (err) |
@@ -1221,8 +1226,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
1221 | * WARNING: eMMC rules are NOT the same as SD DDR | 1226 | * WARNING: eMMC rules are NOT the same as SD DDR |
1222 | */ | 1227 | */ |
1223 | if (ddr == MMC_1_2V_DDR_MODE) { | 1228 | if (ddr == MMC_1_2V_DDR_MODE) { |
1224 | err = mmc_set_signal_voltage(host, | 1229 | err = __mmc_set_signal_voltage(host, |
1225 | MMC_SIGNAL_VOLTAGE_120, 0); | 1230 | MMC_SIGNAL_VOLTAGE_120); |
1226 | if (err) | 1231 | if (err) |
1227 | goto err; | 1232 | goto err; |
1228 | } | 1233 | } |
@@ -1275,6 +1280,29 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
1275 | } | 1280 | } |
1276 | } | 1281 | } |
1277 | 1282 | ||
1283 | /* | ||
1284 | * The mandatory minimum values are defined for packed command. | ||
1285 | * read: 5, write: 3 | ||
1286 | */ | ||
1287 | if (card->ext_csd.max_packed_writes >= 3 && | ||
1288 | card->ext_csd.max_packed_reads >= 5 && | ||
1289 | host->caps2 & MMC_CAP2_PACKED_CMD) { | ||
1290 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
1291 | EXT_CSD_EXP_EVENTS_CTRL, | ||
1292 | EXT_CSD_PACKED_EVENT_EN, | ||
1293 | card->ext_csd.generic_cmd6_time); | ||
1294 | if (err && err != -EBADMSG) | ||
1295 | goto free_card; | ||
1296 | if (err) { | ||
1297 | pr_warn("%s: Enabling packed event failed\n", | ||
1298 | mmc_hostname(card->host)); | ||
1299 | card->ext_csd.packed_event_en = 0; | ||
1300 | err = 0; | ||
1301 | } else { | ||
1302 | card->ext_csd.packed_event_en = 1; | ||
1303 | } | ||
1304 | } | ||
1305 | |||
1278 | if (!oldcard) | 1306 | if (!oldcard) |
1279 | host->card = card; | 1307 | host->card = card; |
1280 | 1308 | ||
@@ -1379,6 +1407,11 @@ static int mmc_suspend(struct mmc_host *host) | |||
1379 | BUG_ON(!host->card); | 1407 | BUG_ON(!host->card); |
1380 | 1408 | ||
1381 | mmc_claim_host(host); | 1409 | mmc_claim_host(host); |
1410 | |||
1411 | err = mmc_cache_ctrl(host, 0); | ||
1412 | if (err) | ||
1413 | goto out; | ||
1414 | |||
1382 | if (mmc_can_poweroff_notify(host->card)) | 1415 | if (mmc_can_poweroff_notify(host->card)) |
1383 | err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT); | 1416 | err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT); |
1384 | else if (mmc_card_can_sleep(host)) | 1417 | else if (mmc_card_can_sleep(host)) |
@@ -1386,8 +1419,9 @@ static int mmc_suspend(struct mmc_host *host) | |||
1386 | else if (!mmc_host_is_spi(host)) | 1419 | else if (!mmc_host_is_spi(host)) |
1387 | err = mmc_deselect_cards(host); | 1420 | err = mmc_deselect_cards(host); |
1388 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); | 1421 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); |
1389 | mmc_release_host(host); | ||
1390 | 1422 | ||
1423 | out: | ||
1424 | mmc_release_host(host); | ||
1391 | return err; | 1425 | return err; |
1392 | } | 1426 | } |
1393 | 1427 | ||
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 6d8f7012d73a..49f04bc9d0eb 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c | |||
@@ -363,6 +363,7 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
363 | return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, | 363 | return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, |
364 | ext_csd, 512); | 364 | ext_csd, 512); |
365 | } | 365 | } |
366 | EXPORT_SYMBOL_GPL(mmc_send_ext_csd); | ||
366 | 367 | ||
367 | int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) | 368 | int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) |
368 | { | 369 | { |
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 74972c241dff..9e645e19cec6 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c | |||
@@ -444,8 +444,7 @@ static void sd_update_bus_speed_mode(struct mmc_card *card) | |||
444 | * If the host doesn't support any of the UHS-I modes, fallback on | 444 | * If the host doesn't support any of the UHS-I modes, fallback on |
445 | * default speed. | 445 | * default speed. |
446 | */ | 446 | */ |
447 | if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | | 447 | if (!mmc_host_uhs(card->host)) { |
448 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) { | ||
449 | card->sd_bus_speed = 0; | 448 | card->sd_bus_speed = 0; |
450 | return; | 449 | return; |
451 | } | 450 | } |
@@ -713,6 +712,14 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) | |||
713 | { | 712 | { |
714 | int err; | 713 | int err; |
715 | u32 max_current; | 714 | u32 max_current; |
715 | int retries = 10; | ||
716 | |||
717 | try_again: | ||
718 | if (!retries) { | ||
719 | ocr &= ~SD_OCR_S18R; | ||
720 | pr_warning("%s: Skipping voltage switch\n", | ||
721 | mmc_hostname(host)); | ||
722 | } | ||
716 | 723 | ||
717 | /* | 724 | /* |
718 | * Since we're changing the OCR value, we seem to | 725 | * Since we're changing the OCR value, we seem to |
@@ -734,10 +741,10 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) | |||
734 | 741 | ||
735 | /* | 742 | /* |
736 | * If the host supports one of UHS-I modes, request the card | 743 | * If the host supports one of UHS-I modes, request the card |
737 | * to switch to 1.8V signaling level. | 744 | * to switch to 1.8V signaling level. If the card has failed |
745 | * repeatedly to switch however, skip this. | ||
738 | */ | 746 | */ |
739 | if (host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | | 747 | if (retries && mmc_host_uhs(host)) |
740 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)) | ||
741 | ocr |= SD_OCR_S18R; | 748 | ocr |= SD_OCR_S18R; |
742 | 749 | ||
743 | /* | 750 | /* |
@@ -748,7 +755,6 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) | |||
748 | if (max_current > 150) | 755 | if (max_current > 150) |
749 | ocr |= SD_OCR_XPC; | 756 | ocr |= SD_OCR_XPC; |
750 | 757 | ||
751 | try_again: | ||
752 | err = mmc_send_app_op_cond(host, ocr, rocr); | 758 | err = mmc_send_app_op_cond(host, ocr, rocr); |
753 | if (err) | 759 | if (err) |
754 | return err; | 760 | return err; |
@@ -759,9 +765,12 @@ try_again: | |||
759 | */ | 765 | */ |
760 | if (!mmc_host_is_spi(host) && rocr && | 766 | if (!mmc_host_is_spi(host) && rocr && |
761 | ((*rocr & 0x41000000) == 0x41000000)) { | 767 | ((*rocr & 0x41000000) == 0x41000000)) { |
762 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true); | 768 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); |
763 | if (err) { | 769 | if (err == -EAGAIN) { |
764 | ocr &= ~SD_OCR_S18R; | 770 | retries--; |
771 | goto try_again; | ||
772 | } else if (err) { | ||
773 | retries = 0; | ||
765 | goto try_again; | 774 | goto try_again; |
766 | } | 775 | } |
767 | } | 776 | } |
@@ -960,16 +969,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, | |||
960 | 969 | ||
961 | /* Card is an ultra-high-speed card */ | 970 | /* Card is an ultra-high-speed card */ |
962 | mmc_card_set_uhs(card); | 971 | mmc_card_set_uhs(card); |
963 | |||
964 | /* | ||
965 | * Since initialization is now complete, enable preset | ||
966 | * value registers for UHS-I cards. | ||
967 | */ | ||
968 | if (host->ops->enable_preset_value) { | ||
969 | mmc_host_clk_hold(card->host); | ||
970 | host->ops->enable_preset_value(host, true); | ||
971 | mmc_host_clk_release(card->host); | ||
972 | } | ||
973 | } else { | 972 | } else { |
974 | /* | 973 | /* |
975 | * Attempt to change to high-speed (if supported) | 974 | * Attempt to change to high-speed (if supported) |
@@ -1148,13 +1147,6 @@ int mmc_attach_sd(struct mmc_host *host) | |||
1148 | BUG_ON(!host); | 1147 | BUG_ON(!host); |
1149 | WARN_ON(!host->claimed); | 1148 | WARN_ON(!host->claimed); |
1150 | 1149 | ||
1151 | /* Disable preset value enable if already set since last time */ | ||
1152 | if (host->ops->enable_preset_value) { | ||
1153 | mmc_host_clk_hold(host); | ||
1154 | host->ops->enable_preset_value(host, false); | ||
1155 | mmc_host_clk_release(host); | ||
1156 | } | ||
1157 | |||
1158 | err = mmc_send_app_op_cond(host, 0, &ocr); | 1150 | err = mmc_send_app_op_cond(host, 0, &ocr); |
1159 | if (err) | 1151 | if (err) |
1160 | return err; | 1152 | return err; |
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 2273ce6b6c1a..aa0719a4dfd1 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c | |||
@@ -157,10 +157,7 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr) | |||
157 | if (ret) | 157 | if (ret) |
158 | goto out; | 158 | goto out; |
159 | 159 | ||
160 | if (card->host->caps & | 160 | if (mmc_host_uhs(card->host)) { |
161 | (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | | ||
162 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | | ||
163 | MMC_CAP_UHS_DDR50)) { | ||
164 | if (data & SDIO_UHS_DDR50) | 161 | if (data & SDIO_UHS_DDR50) |
165 | card->sw_caps.sd3_bus_mode | 162 | card->sw_caps.sd3_bus_mode |
166 | |= SD_MODE_UHS_DDR50; | 163 | |= SD_MODE_UHS_DDR50; |
@@ -478,8 +475,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card) | |||
478 | * If the host doesn't support any of the UHS-I modes, fallback on | 475 | * If the host doesn't support any of the UHS-I modes, fallback on |
479 | * default speed. | 476 | * default speed. |
480 | */ | 477 | */ |
481 | if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | | 478 | if (!mmc_host_uhs(card->host)) |
482 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) | ||
483 | return 0; | 479 | return 0; |
484 | 480 | ||
485 | bus_speed = SDIO_SPEED_SDR12; | 481 | bus_speed = SDIO_SPEED_SDR12; |
@@ -489,23 +485,27 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card) | |||
489 | bus_speed = SDIO_SPEED_SDR104; | 485 | bus_speed = SDIO_SPEED_SDR104; |
490 | timing = MMC_TIMING_UHS_SDR104; | 486 | timing = MMC_TIMING_UHS_SDR104; |
491 | card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; | 487 | card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; |
488 | card->sd_bus_speed = UHS_SDR104_BUS_SPEED; | ||
492 | } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && | 489 | } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && |
493 | (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { | 490 | (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { |
494 | bus_speed = SDIO_SPEED_DDR50; | 491 | bus_speed = SDIO_SPEED_DDR50; |
495 | timing = MMC_TIMING_UHS_DDR50; | 492 | timing = MMC_TIMING_UHS_DDR50; |
496 | card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; | 493 | card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; |
494 | card->sd_bus_speed = UHS_DDR50_BUS_SPEED; | ||
497 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | | 495 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | |
498 | MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & | 496 | MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & |
499 | SD_MODE_UHS_SDR50)) { | 497 | SD_MODE_UHS_SDR50)) { |
500 | bus_speed = SDIO_SPEED_SDR50; | 498 | bus_speed = SDIO_SPEED_SDR50; |
501 | timing = MMC_TIMING_UHS_SDR50; | 499 | timing = MMC_TIMING_UHS_SDR50; |
502 | card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; | 500 | card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; |
501 | card->sd_bus_speed = UHS_SDR50_BUS_SPEED; | ||
503 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | | 502 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | |
504 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && | 503 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && |
505 | (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { | 504 | (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { |
506 | bus_speed = SDIO_SPEED_SDR25; | 505 | bus_speed = SDIO_SPEED_SDR25; |
507 | timing = MMC_TIMING_UHS_SDR25; | 506 | timing = MMC_TIMING_UHS_SDR25; |
508 | card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; | 507 | card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; |
508 | card->sd_bus_speed = UHS_SDR25_BUS_SPEED; | ||
509 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | | 509 | } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | |
510 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | | 510 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | |
511 | MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & | 511 | MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & |
@@ -513,6 +513,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card) | |||
513 | bus_speed = SDIO_SPEED_SDR12; | 513 | bus_speed = SDIO_SPEED_SDR12; |
514 | timing = MMC_TIMING_UHS_SDR12; | 514 | timing = MMC_TIMING_UHS_SDR12; |
515 | card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; | 515 | card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; |
516 | card->sd_bus_speed = UHS_SDR12_BUS_SPEED; | ||
516 | } | 517 | } |
517 | 518 | ||
518 | err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed); | 519 | err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed); |
@@ -583,10 +584,19 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, | |||
583 | { | 584 | { |
584 | struct mmc_card *card; | 585 | struct mmc_card *card; |
585 | int err; | 586 | int err; |
587 | int retries = 10; | ||
586 | 588 | ||
587 | BUG_ON(!host); | 589 | BUG_ON(!host); |
588 | WARN_ON(!host->claimed); | 590 | WARN_ON(!host->claimed); |
589 | 591 | ||
592 | try_again: | ||
593 | if (!retries) { | ||
594 | pr_warning("%s: Skipping voltage switch\n", | ||
595 | mmc_hostname(host)); | ||
596 | ocr &= ~R4_18V_PRESENT; | ||
597 | host->ocr &= ~R4_18V_PRESENT; | ||
598 | } | ||
599 | |||
590 | /* | 600 | /* |
591 | * Inform the card of the voltage | 601 | * Inform the card of the voltage |
592 | */ | 602 | */ |
@@ -645,14 +655,16 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, | |||
645 | * systems that claim 1.8v signalling in fact do not support | 655 | * systems that claim 1.8v signalling in fact do not support |
646 | * it. | 656 | * it. |
647 | */ | 657 | */ |
648 | if ((ocr & R4_18V_PRESENT) && | 658 | if (!powered_resume && (ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) { |
649 | (host->caps & | 659 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); |
650 | (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | | 660 | if (err == -EAGAIN) { |
651 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | | 661 | sdio_reset(host); |
652 | MMC_CAP_UHS_DDR50))) { | 662 | mmc_go_idle(host); |
653 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, | 663 | mmc_send_if_cond(host, host->ocr_avail); |
654 | true); | 664 | mmc_remove_card(card); |
655 | if (err) { | 665 | retries--; |
666 | goto try_again; | ||
667 | } else if (err) { | ||
656 | ocr &= ~R4_18V_PRESENT; | 668 | ocr &= ~R4_18V_PRESENT; |
657 | host->ocr &= ~R4_18V_PRESENT; | 669 | host->ocr &= ~R4_18V_PRESENT; |
658 | } | 670 | } |
@@ -937,10 +949,12 @@ static int mmc_sdio_resume(struct mmc_host *host) | |||
937 | mmc_claim_host(host); | 949 | mmc_claim_host(host); |
938 | 950 | ||
939 | /* No need to reinitialize powered-resumed nonremovable cards */ | 951 | /* No need to reinitialize powered-resumed nonremovable cards */ |
940 | if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) | 952 | if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) { |
953 | sdio_reset(host); | ||
954 | mmc_go_idle(host); | ||
941 | err = mmc_sdio_init_card(host, host->ocr, host->card, | 955 | err = mmc_sdio_init_card(host, host->ocr, host->card, |
942 | mmc_card_keep_power(host)); | 956 | mmc_card_keep_power(host)); |
943 | else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { | 957 | } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { |
944 | /* We may have switched to 1-bit mode during suspend */ | 958 | /* We may have switched to 1-bit mode during suspend */ |
945 | err = sdio_enable_4bit_bus(host->card); | 959 | err = sdio_enable_4bit_bus(host->card); |
946 | if (err > 0) { | 960 | if (err > 0) { |
@@ -1020,6 +1034,10 @@ static int mmc_sdio_power_restore(struct mmc_host *host) | |||
1020 | goto out; | 1034 | goto out; |
1021 | } | 1035 | } |
1022 | 1036 | ||
1037 | if (mmc_host_uhs(host)) | ||
1038 | /* to query card if 1.8V signalling is supported */ | ||
1039 | host->ocr |= R4_18V_PRESENT; | ||
1040 | |||
1023 | ret = mmc_sdio_init_card(host, host->ocr, host->card, | 1041 | ret = mmc_sdio_init_card(host, host->ocr, host->card, |
1024 | mmc_card_keep_power(host)); | 1042 | mmc_card_keep_power(host)); |
1025 | if (!ret && host->sdio_irqs) | 1043 | if (!ret && host->sdio_irqs) |
@@ -1085,6 +1103,10 @@ int mmc_attach_sdio(struct mmc_host *host) | |||
1085 | /* | 1103 | /* |
1086 | * Detect and init the card. | 1104 | * Detect and init the card. |
1087 | */ | 1105 | */ |
1106 | if (mmc_host_uhs(host)) | ||
1107 | /* to query card if 1.8V signalling is supported */ | ||
1108 | host->ocr |= R4_18V_PRESENT; | ||
1109 | |||
1088 | err = mmc_sdio_init_card(host, host->ocr, NULL, 0); | 1110 | err = mmc_sdio_init_card(host, host->ocr, NULL, 0); |
1089 | if (err) { | 1111 | if (err) { |
1090 | if (err == -EAGAIN) { | 1112 | if (err == -EAGAIN) { |
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 16a1c0b6f264..324235105519 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c | |||
@@ -92,6 +92,20 @@ int mmc_gpio_get_cd(struct mmc_host *host) | |||
92 | } | 92 | } |
93 | EXPORT_SYMBOL(mmc_gpio_get_cd); | 93 | EXPORT_SYMBOL(mmc_gpio_get_cd); |
94 | 94 | ||
95 | /** | ||
96 | * mmc_gpio_request_ro - request a gpio for write-protection | ||
97 | * @host: mmc host | ||
98 | * @gpio: gpio number requested | ||
99 | * | ||
100 | * As devm_* managed functions are used in mmc_gpio_request_ro(), client | ||
101 | * drivers do not need to explicitly call mmc_gpio_free_ro() for freeing up, | ||
102 | * if the requesting and freeing are only needed at probing and unbinding time | ||
103 | * for once. However, if client drivers do something special like runtime | ||
104 | * switching for write-protection, they are responsible for calling | ||
105 | * mmc_gpio_request_ro() and mmc_gpio_free_ro() as a pair on their own. | ||
106 | * | ||
107 | * Returns zero on success, else an error. | ||
108 | */ | ||
95 | int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) | 109 | int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) |
96 | { | 110 | { |
97 | struct mmc_gpio *ctx; | 111 | struct mmc_gpio *ctx; |
@@ -106,7 +120,8 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) | |||
106 | 120 | ||
107 | ctx = host->slot.handler_priv; | 121 | ctx = host->slot.handler_priv; |
108 | 122 | ||
109 | ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label); | 123 | ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN, |
124 | ctx->ro_label); | ||
110 | if (ret < 0) | 125 | if (ret < 0) |
111 | return ret; | 126 | return ret; |
112 | 127 | ||
@@ -116,6 +131,20 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) | |||
116 | } | 131 | } |
117 | EXPORT_SYMBOL(mmc_gpio_request_ro); | 132 | EXPORT_SYMBOL(mmc_gpio_request_ro); |
118 | 133 | ||
134 | /** | ||
135 | * mmc_gpio_request_cd - request a gpio for card-detection | ||
136 | * @host: mmc host | ||
137 | * @gpio: gpio number requested | ||
138 | * | ||
139 | * As devm_* managed functions are used in mmc_gpio_request_cd(), client | ||
140 | * drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up, | ||
141 | * if the requesting and freeing are only needed at probing and unbinding time | ||
142 | * for once. However, if client drivers do something special like runtime | ||
143 | * switching for card-detection, they are responsible for calling | ||
144 | * mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own. | ||
145 | * | ||
146 | * Returns zero on success, else an error. | ||
147 | */ | ||
119 | int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio) | 148 | int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio) |
120 | { | 149 | { |
121 | struct mmc_gpio *ctx; | 150 | struct mmc_gpio *ctx; |
@@ -128,7 +157,8 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio) | |||
128 | 157 | ||
129 | ctx = host->slot.handler_priv; | 158 | ctx = host->slot.handler_priv; |
130 | 159 | ||
131 | ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->cd_label); | 160 | ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN, |
161 | ctx->cd_label); | ||
132 | if (ret < 0) | 162 | if (ret < 0) |
133 | /* | 163 | /* |
134 | * don't bother freeing memory. It might still get used by other | 164 | * don't bother freeing memory. It might still get used by other |
@@ -146,7 +176,8 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio) | |||
146 | irq = -EINVAL; | 176 | irq = -EINVAL; |
147 | 177 | ||
148 | if (irq >= 0) { | 178 | if (irq >= 0) { |
149 | ret = request_threaded_irq(irq, NULL, mmc_gpio_cd_irqt, | 179 | ret = devm_request_threaded_irq(&host->class_dev, irq, |
180 | NULL, mmc_gpio_cd_irqt, | ||
150 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | 181 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, |
151 | ctx->cd_label, host); | 182 | ctx->cd_label, host); |
152 | if (ret < 0) | 183 | if (ret < 0) |
@@ -164,6 +195,13 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio) | |||
164 | } | 195 | } |
165 | EXPORT_SYMBOL(mmc_gpio_request_cd); | 196 | EXPORT_SYMBOL(mmc_gpio_request_cd); |
166 | 197 | ||
198 | /** | ||
199 | * mmc_gpio_free_ro - free the write-protection gpio | ||
200 | * @host: mmc host | ||
201 | * | ||
202 | * It's provided only for cases that client drivers need to manually free | ||
203 | * up the write-protection gpio requested by mmc_gpio_request_ro(). | ||
204 | */ | ||
167 | void mmc_gpio_free_ro(struct mmc_host *host) | 205 | void mmc_gpio_free_ro(struct mmc_host *host) |
168 | { | 206 | { |
169 | struct mmc_gpio *ctx = host->slot.handler_priv; | 207 | struct mmc_gpio *ctx = host->slot.handler_priv; |
@@ -175,10 +213,17 @@ void mmc_gpio_free_ro(struct mmc_host *host) | |||
175 | gpio = ctx->ro_gpio; | 213 | gpio = ctx->ro_gpio; |
176 | ctx->ro_gpio = -EINVAL; | 214 | ctx->ro_gpio = -EINVAL; |
177 | 215 | ||
178 | gpio_free(gpio); | 216 | devm_gpio_free(&host->class_dev, gpio); |
179 | } | 217 | } |
180 | EXPORT_SYMBOL(mmc_gpio_free_ro); | 218 | EXPORT_SYMBOL(mmc_gpio_free_ro); |
181 | 219 | ||
220 | /** | ||
221 | * mmc_gpio_free_cd - free the card-detection gpio | ||
222 | * @host: mmc host | ||
223 | * | ||
224 | * It's provided only for cases that client drivers need to manually free | ||
225 | * up the card-detection gpio requested by mmc_gpio_request_cd(). | ||
226 | */ | ||
182 | void mmc_gpio_free_cd(struct mmc_host *host) | 227 | void mmc_gpio_free_cd(struct mmc_host *host) |
183 | { | 228 | { |
184 | struct mmc_gpio *ctx = host->slot.handler_priv; | 229 | struct mmc_gpio *ctx = host->slot.handler_priv; |
@@ -188,13 +233,13 @@ void mmc_gpio_free_cd(struct mmc_host *host) | |||
188 | return; | 233 | return; |
189 | 234 | ||
190 | if (host->slot.cd_irq >= 0) { | 235 | if (host->slot.cd_irq >= 0) { |
191 | free_irq(host->slot.cd_irq, host); | 236 | devm_free_irq(&host->class_dev, host->slot.cd_irq, host); |
192 | host->slot.cd_irq = -EINVAL; | 237 | host->slot.cd_irq = -EINVAL; |
193 | } | 238 | } |
194 | 239 | ||
195 | gpio = ctx->cd_gpio; | 240 | gpio = ctx->cd_gpio; |
196 | ctx->cd_gpio = -EINVAL; | 241 | ctx->cd_gpio = -EINVAL; |
197 | 242 | ||
198 | gpio_free(gpio); | 243 | devm_gpio_free(&host->class_dev, gpio); |
199 | } | 244 | } |
200 | EXPORT_SYMBOL(mmc_gpio_free_cd); | 245 | EXPORT_SYMBOL(mmc_gpio_free_cd); |
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 3be8b94d7914..d88219e1d86e 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -238,6 +238,17 @@ config MMC_SDHCI_S3C_DMA | |||
238 | 238 | ||
239 | YMMV. | 239 | YMMV. |
240 | 240 | ||
241 | config MMC_SDHCI_BCM2835 | ||
242 | tristate "SDHCI platform support for the BCM2835 SD/MMC Controller" | ||
243 | depends on ARCH_BCM2835 | ||
244 | depends on MMC_SDHCI_PLTFM | ||
245 | select MMC_SDHCI_IO_ACCESSORS | ||
246 | help | ||
247 | This selects the BCM2835 SD/MMC controller. If you have a BCM2835 | ||
248 | platform with SD or MMC devices, say Y or M here. | ||
249 | |||
250 | If unsure, say N. | ||
251 | |||
241 | config MMC_OMAP | 252 | config MMC_OMAP |
242 | tristate "TI OMAP Multimedia Card Interface support" | 253 | tristate "TI OMAP Multimedia Card Interface support" |
243 | depends on ARCH_OMAP | 254 | depends on ARCH_OMAP |
@@ -361,6 +372,13 @@ config MMC_DAVINCI | |||
361 | If you have an DAVINCI board with a Multimedia Card slot, | 372 | If you have an DAVINCI board with a Multimedia Card slot, |
362 | say Y or M here. If unsure, say N. | 373 | say Y or M here. If unsure, say N. |
363 | 374 | ||
375 | config MMC_GOLDFISH | ||
376 | tristate "goldfish qemu Multimedia Card Interface support" | ||
377 | depends on GOLDFISH | ||
378 | help | ||
379 | This selects the Goldfish Multimedia card Interface emulation | ||
380 | found on the Goldfish Android virtual device emulation. | ||
381 | |||
364 | config MMC_SPI | 382 | config MMC_SPI |
365 | tristate "MMC/SD/SDIO over SPI" | 383 | tristate "MMC/SD/SDIO over SPI" |
366 | depends on SPI_MASTER && !HIGHMEM && HAS_DMA | 384 | depends on SPI_MASTER && !HIGHMEM && HAS_DMA |
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index e4e218c930bd..c380e3cf0a3b 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile | |||
@@ -23,6 +23,7 @@ obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o | |||
23 | obj-$(CONFIG_MMC_MSM) += msm_sdcc.o | 23 | obj-$(CONFIG_MMC_MSM) += msm_sdcc.o |
24 | obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o | 24 | obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o |
25 | obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o | 25 | obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o |
26 | obj-$(CONFIG_MMC_GOLDFISH) += android-goldfish.o | ||
26 | obj-$(CONFIG_MMC_SPI) += mmc_spi.o | 27 | obj-$(CONFIG_MMC_SPI) += mmc_spi.o |
27 | ifeq ($(CONFIG_OF),y) | 28 | ifeq ($(CONFIG_OF),y) |
28 | obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o | 29 | obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o |
@@ -58,6 +59,7 @@ obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o | |||
58 | obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o | 59 | obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o |
59 | obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o | 60 | obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o |
60 | obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o | 61 | obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o |
62 | obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o | ||
61 | 63 | ||
62 | ifeq ($(CONFIG_CB710_DEBUG),y) | 64 | ifeq ($(CONFIG_CB710_DEBUG),y) |
63 | CFLAGS-cb710-mmc += -DDEBUG | 65 | CFLAGS-cb710-mmc += -DDEBUG |
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c new file mode 100644 index 000000000000..ef3aef0f376d --- /dev/null +++ b/drivers/mmc/host/android-goldfish.c | |||
@@ -0,0 +1,570 @@ | |||
1 | /* | ||
2 | * Copyright 2007, Google Inc. | ||
3 | * Copyright 2012, Intel Inc. | ||
4 | * | ||
5 | * based on omap.c driver, which was | ||
6 | * Copyright (C) 2004 Nokia Corporation | ||
7 | * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com> | ||
8 | * Misc hacks here and there by Tony Lindgren <tony@atomide.com> | ||
9 | * Other hacks (DMA, SD, etc) by David Brownell | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/major.h> | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/fs.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/hdreg.h> | ||
28 | #include <linux/kdev_t.h> | ||
29 | #include <linux/blkdev.h> | ||
30 | #include <linux/mutex.h> | ||
31 | #include <linux/scatterlist.h> | ||
32 | #include <linux/mmc/mmc.h> | ||
33 | #include <linux/mmc/sdio.h> | ||
34 | #include <linux/mmc/host.h> | ||
35 | #include <linux/mmc/card.h> | ||
36 | |||
37 | #include <linux/moduleparam.h> | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/ioport.h> | ||
40 | #include <linux/dma-mapping.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/spinlock.h> | ||
43 | #include <linux/timer.h> | ||
44 | #include <linux/clk.h> | ||
45 | |||
46 | #include <asm/io.h> | ||
47 | #include <asm/irq.h> | ||
48 | #include <asm/scatterlist.h> | ||
49 | |||
50 | #include <asm/types.h> | ||
51 | #include <asm/io.h> | ||
52 | #include <asm/uaccess.h> | ||
53 | |||
54 | #define DRIVER_NAME "goldfish_mmc" | ||
55 | |||
56 | #define BUFFER_SIZE 16384 | ||
57 | |||
58 | #define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr)) | ||
59 | #define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr)) | ||
60 | |||
61 | enum { | ||
62 | /* status register */ | ||
63 | MMC_INT_STATUS = 0x00, | ||
64 | /* set this to enable IRQ */ | ||
65 | MMC_INT_ENABLE = 0x04, | ||
66 | /* set this to specify buffer address */ | ||
67 | MMC_SET_BUFFER = 0x08, | ||
68 | |||
69 | /* MMC command number */ | ||
70 | MMC_CMD = 0x0C, | ||
71 | |||
72 | /* MMC argument */ | ||
73 | MMC_ARG = 0x10, | ||
74 | |||
75 | /* MMC response (or R2 bits 0 - 31) */ | ||
76 | MMC_RESP_0 = 0x14, | ||
77 | |||
78 | /* MMC R2 response bits 32 - 63 */ | ||
79 | MMC_RESP_1 = 0x18, | ||
80 | |||
81 | /* MMC R2 response bits 64 - 95 */ | ||
82 | MMC_RESP_2 = 0x1C, | ||
83 | |||
84 | /* MMC R2 response bits 96 - 127 */ | ||
85 | MMC_RESP_3 = 0x20, | ||
86 | |||
87 | MMC_BLOCK_LENGTH = 0x24, | ||
88 | MMC_BLOCK_COUNT = 0x28, | ||
89 | |||
90 | /* MMC state flags */ | ||
91 | MMC_STATE = 0x2C, | ||
92 | |||
93 | /* MMC_INT_STATUS bits */ | ||
94 | |||
95 | MMC_STAT_END_OF_CMD = 1U << 0, | ||
96 | MMC_STAT_END_OF_DATA = 1U << 1, | ||
97 | MMC_STAT_STATE_CHANGE = 1U << 2, | ||
98 | MMC_STAT_CMD_TIMEOUT = 1U << 3, | ||
99 | |||
100 | /* MMC_STATE bits */ | ||
101 | MMC_STATE_INSERTED = 1U << 0, | ||
102 | MMC_STATE_READ_ONLY = 1U << 1, | ||
103 | }; | ||
104 | |||
105 | /* | ||
106 | * Command types | ||
107 | */ | ||
108 | #define OMAP_MMC_CMDTYPE_BC 0 | ||
109 | #define OMAP_MMC_CMDTYPE_BCR 1 | ||
110 | #define OMAP_MMC_CMDTYPE_AC 2 | ||
111 | #define OMAP_MMC_CMDTYPE_ADTC 3 | ||
112 | |||
113 | |||
114 | struct goldfish_mmc_host { | ||
115 | struct mmc_request *mrq; | ||
116 | struct mmc_command *cmd; | ||
117 | struct mmc_data *data; | ||
118 | struct mmc_host *mmc; | ||
119 | struct device *dev; | ||
120 | unsigned char id; /* 16xx chips have 2 MMC blocks */ | ||
121 | void __iomem *virt_base; | ||
122 | unsigned int phys_base; | ||
123 | int irq; | ||
124 | unsigned char bus_mode; | ||
125 | unsigned char hw_bus_mode; | ||
126 | |||
127 | unsigned int sg_len; | ||
128 | unsigned dma_done:1; | ||
129 | unsigned dma_in_use:1; | ||
130 | |||
131 | void __iomem *reg_base; | ||
132 | }; | ||
133 | |||
134 | static inline int | ||
135 | goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host) | ||
136 | { | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static ssize_t | ||
141 | goldfish_mmc_show_cover_switch(struct device *dev, | ||
142 | struct device_attribute *attr, char *buf) | ||
143 | { | ||
144 | struct goldfish_mmc_host *host = dev_get_drvdata(dev); | ||
145 | |||
146 | return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" : | ||
147 | "closed"); | ||
148 | } | ||
149 | |||
150 | static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL); | ||
151 | |||
152 | static void | ||
153 | goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd) | ||
154 | { | ||
155 | u32 cmdreg; | ||
156 | u32 resptype; | ||
157 | u32 cmdtype; | ||
158 | |||
159 | host->cmd = cmd; | ||
160 | |||
161 | resptype = 0; | ||
162 | cmdtype = 0; | ||
163 | |||
164 | /* Our hardware needs to know exact type */ | ||
165 | switch (mmc_resp_type(cmd)) { | ||
166 | case MMC_RSP_NONE: | ||
167 | break; | ||
168 | case MMC_RSP_R1: | ||
169 | case MMC_RSP_R1B: | ||
170 | /* resp 1, 1b, 6, 7 */ | ||
171 | resptype = 1; | ||
172 | break; | ||
173 | case MMC_RSP_R2: | ||
174 | resptype = 2; | ||
175 | break; | ||
176 | case MMC_RSP_R3: | ||
177 | resptype = 3; | ||
178 | break; | ||
179 | default: | ||
180 | dev_err(mmc_dev(host->mmc), | ||
181 | "Invalid response type: %04x\n", mmc_resp_type(cmd)); | ||
182 | break; | ||
183 | } | ||
184 | |||
185 | if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) | ||
186 | cmdtype = OMAP_MMC_CMDTYPE_ADTC; | ||
187 | else if (mmc_cmd_type(cmd) == MMC_CMD_BC) | ||
188 | cmdtype = OMAP_MMC_CMDTYPE_BC; | ||
189 | else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) | ||
190 | cmdtype = OMAP_MMC_CMDTYPE_BCR; | ||
191 | else | ||
192 | cmdtype = OMAP_MMC_CMDTYPE_AC; | ||
193 | |||
194 | cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12); | ||
195 | |||
196 | if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) | ||
197 | cmdreg |= 1 << 6; | ||
198 | |||
199 | if (cmd->flags & MMC_RSP_BUSY) | ||
200 | cmdreg |= 1 << 11; | ||
201 | |||
202 | if (host->data && !(host->data->flags & MMC_DATA_WRITE)) | ||
203 | cmdreg |= 1 << 15; | ||
204 | |||
205 | GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg); | ||
206 | GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg); | ||
207 | } | ||
208 | |||
209 | static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, | ||
210 | struct mmc_data *data) | ||
211 | { | ||
212 | if (host->dma_in_use) { | ||
213 | enum dma_data_direction dma_data_dir; | ||
214 | |||
215 | if (data->flags & MMC_DATA_WRITE) | ||
216 | dma_data_dir = DMA_TO_DEVICE; | ||
217 | else | ||
218 | dma_data_dir = DMA_FROM_DEVICE; | ||
219 | |||
220 | if (dma_data_dir == DMA_FROM_DEVICE) { | ||
221 | /* | ||
222 | * We don't really have DMA, so we need | ||
223 | * to copy from our platform driver buffer | ||
224 | */ | ||
225 | uint8_t *dest = (uint8_t *)sg_virt(data->sg); | ||
226 | memcpy(dest, host->virt_base, data->sg->length); | ||
227 | } | ||
228 | host->data->bytes_xfered += data->sg->length; | ||
229 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, | ||
230 | dma_data_dir); | ||
231 | } | ||
232 | |||
233 | host->data = NULL; | ||
234 | host->sg_len = 0; | ||
235 | |||
236 | /* | ||
237 | * NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing | ||
238 | * dozens of requests until the card finishes writing data. | ||
239 | * It'd be cheaper to just wait till an EOFB interrupt arrives... | ||
240 | */ | ||
241 | |||
242 | if (!data->stop) { | ||
243 | host->mrq = NULL; | ||
244 | mmc_request_done(host->mmc, data->mrq); | ||
245 | return; | ||
246 | } | ||
247 | |||
248 | goldfish_mmc_start_command(host, data->stop); | ||
249 | } | ||
250 | |||
251 | static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host, | ||
252 | struct mmc_data *data) | ||
253 | { | ||
254 | if (!host->dma_in_use) { | ||
255 | goldfish_mmc_xfer_done(host, data); | ||
256 | return; | ||
257 | } | ||
258 | if (host->dma_done) | ||
259 | goldfish_mmc_xfer_done(host, data); | ||
260 | } | ||
261 | |||
262 | static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host, | ||
263 | struct mmc_command *cmd) | ||
264 | { | ||
265 | host->cmd = NULL; | ||
266 | if (cmd->flags & MMC_RSP_PRESENT) { | ||
267 | if (cmd->flags & MMC_RSP_136) { | ||
268 | /* response type 2 */ | ||
269 | cmd->resp[3] = | ||
270 | GOLDFISH_MMC_READ(host, MMC_RESP_0); | ||
271 | cmd->resp[2] = | ||
272 | GOLDFISH_MMC_READ(host, MMC_RESP_1); | ||
273 | cmd->resp[1] = | ||
274 | GOLDFISH_MMC_READ(host, MMC_RESP_2); | ||
275 | cmd->resp[0] = | ||
276 | GOLDFISH_MMC_READ(host, MMC_RESP_3); | ||
277 | } else { | ||
278 | /* response types 1, 1b, 3, 4, 5, 6 */ | ||
279 | cmd->resp[0] = | ||
280 | GOLDFISH_MMC_READ(host, MMC_RESP_0); | ||
281 | } | ||
282 | } | ||
283 | |||
284 | if (host->data == NULL || cmd->error) { | ||
285 | host->mrq = NULL; | ||
286 | mmc_request_done(host->mmc, cmd->mrq); | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id) | ||
291 | { | ||
292 | struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id; | ||
293 | u16 status; | ||
294 | int end_command = 0; | ||
295 | int end_transfer = 0; | ||
296 | int transfer_error = 0; | ||
297 | int state_changed = 0; | ||
298 | int cmd_timeout = 0; | ||
299 | |||
300 | while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) { | ||
301 | GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status); | ||
302 | |||
303 | if (status & MMC_STAT_END_OF_CMD) | ||
304 | end_command = 1; | ||
305 | |||
306 | if (status & MMC_STAT_END_OF_DATA) | ||
307 | end_transfer = 1; | ||
308 | |||
309 | if (status & MMC_STAT_STATE_CHANGE) | ||
310 | state_changed = 1; | ||
311 | |||
312 | if (status & MMC_STAT_CMD_TIMEOUT) { | ||
313 | end_command = 0; | ||
314 | cmd_timeout = 1; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | if (cmd_timeout) { | ||
319 | struct mmc_request *mrq = host->mrq; | ||
320 | mrq->cmd->error = -ETIMEDOUT; | ||
321 | host->mrq = NULL; | ||
322 | mmc_request_done(host->mmc, mrq); | ||
323 | } | ||
324 | |||
325 | if (end_command) | ||
326 | goldfish_mmc_cmd_done(host, host->cmd); | ||
327 | |||
328 | if (transfer_error) | ||
329 | goldfish_mmc_xfer_done(host, host->data); | ||
330 | else if (end_transfer) { | ||
331 | host->dma_done = 1; | ||
332 | goldfish_mmc_end_of_data(host, host->data); | ||
333 | } else if (host->data != NULL) { | ||
334 | /* | ||
335 | * WORKAROUND -- after porting this driver from 2.6 to 3.4, | ||
336 | * during device initialization, cases where host->data is | ||
337 | * non-null but end_transfer is false would occur. Doing | ||
338 | * nothing in such cases results in no further interrupts, | ||
339 | * and initialization failure. | ||
340 | * TODO -- find the real cause. | ||
341 | */ | ||
342 | host->dma_done = 1; | ||
343 | goldfish_mmc_end_of_data(host, host->data); | ||
344 | } | ||
345 | |||
346 | if (state_changed) { | ||
347 | u32 state = GOLDFISH_MMC_READ(host, MMC_STATE); | ||
348 | pr_info("%s: Card detect now %d\n", __func__, | ||
349 | (state & MMC_STATE_INSERTED)); | ||
350 | mmc_detect_change(host->mmc, 0); | ||
351 | } | ||
352 | |||
353 | if (!end_command && !end_transfer && | ||
354 | !transfer_error && !state_changed && !cmd_timeout) { | ||
355 | status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS); | ||
356 | dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status); | ||
357 | if (status != 0) { | ||
358 | GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status); | ||
359 | GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | return IRQ_HANDLED; | ||
364 | } | ||
365 | |||
366 | static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, | ||
367 | struct mmc_request *req) | ||
368 | { | ||
369 | struct mmc_data *data = req->data; | ||
370 | int block_size; | ||
371 | unsigned sg_len; | ||
372 | enum dma_data_direction dma_data_dir; | ||
373 | |||
374 | host->data = data; | ||
375 | if (data == NULL) { | ||
376 | GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0); | ||
377 | GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0); | ||
378 | host->dma_in_use = 0; | ||
379 | return; | ||
380 | } | ||
381 | |||
382 | block_size = data->blksz; | ||
383 | |||
384 | GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1); | ||
385 | GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1); | ||
386 | |||
387 | /* | ||
388 | * Cope with calling layer confusion; it issues "single | ||
389 | * block" writes using multi-block scatterlists. | ||
390 | */ | ||
391 | sg_len = (data->blocks == 1) ? 1 : data->sg_len; | ||
392 | |||
393 | if (data->flags & MMC_DATA_WRITE) | ||
394 | dma_data_dir = DMA_TO_DEVICE; | ||
395 | else | ||
396 | dma_data_dir = DMA_FROM_DEVICE; | ||
397 | |||
398 | host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, | ||
399 | sg_len, dma_data_dir); | ||
400 | host->dma_done = 0; | ||
401 | host->dma_in_use = 1; | ||
402 | |||
403 | if (dma_data_dir == DMA_TO_DEVICE) { | ||
404 | /* | ||
405 | * We don't really have DMA, so we need to copy to our | ||
406 | * platform driver buffer | ||
407 | */ | ||
408 | const uint8_t *src = (uint8_t *)sg_virt(data->sg); | ||
409 | memcpy(host->virt_base, src, data->sg->length); | ||
410 | } | ||
411 | } | ||
412 | |||
413 | static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req) | ||
414 | { | ||
415 | struct goldfish_mmc_host *host = mmc_priv(mmc); | ||
416 | |||
417 | WARN_ON(host->mrq != NULL); | ||
418 | |||
419 | host->mrq = req; | ||
420 | goldfish_mmc_prepare_data(host, req); | ||
421 | goldfish_mmc_start_command(host, req->cmd); | ||
422 | |||
423 | /* | ||
424 | * This is to avoid accidentally being detected as an SDIO card | ||
425 | * in mmc_attach_sdio(). | ||
426 | */ | ||
427 | if (req->cmd->opcode == SD_IO_SEND_OP_COND && | ||
428 | req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR)) | ||
429 | req->cmd->error = -EINVAL; | ||
430 | } | ||
431 | |||
432 | static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
433 | { | ||
434 | struct goldfish_mmc_host *host = mmc_priv(mmc); | ||
435 | |||
436 | host->bus_mode = ios->bus_mode; | ||
437 | host->hw_bus_mode = host->bus_mode; | ||
438 | } | ||
439 | |||
440 | static int goldfish_mmc_get_ro(struct mmc_host *mmc) | ||
441 | { | ||
442 | uint32_t state; | ||
443 | struct goldfish_mmc_host *host = mmc_priv(mmc); | ||
444 | |||
445 | state = GOLDFISH_MMC_READ(host, MMC_STATE); | ||
446 | return ((state & MMC_STATE_READ_ONLY) != 0); | ||
447 | } | ||
448 | |||
449 | static const struct mmc_host_ops goldfish_mmc_ops = { | ||
450 | .request = goldfish_mmc_request, | ||
451 | .set_ios = goldfish_mmc_set_ios, | ||
452 | .get_ro = goldfish_mmc_get_ro, | ||
453 | }; | ||
454 | |||
455 | static int goldfish_mmc_probe(struct platform_device *pdev) | ||
456 | { | ||
457 | struct mmc_host *mmc; | ||
458 | struct goldfish_mmc_host *host = NULL; | ||
459 | struct resource *res; | ||
460 | int ret = 0; | ||
461 | int irq; | ||
462 | dma_addr_t buf_addr; | ||
463 | |||
464 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
465 | irq = platform_get_irq(pdev, 0); | ||
466 | if (res == NULL || irq < 0) | ||
467 | return -ENXIO; | ||
468 | |||
469 | mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev); | ||
470 | if (mmc == NULL) { | ||
471 | ret = -ENOMEM; | ||
472 | goto err_alloc_host_failed; | ||
473 | } | ||
474 | |||
475 | host = mmc_priv(mmc); | ||
476 | host->mmc = mmc; | ||
477 | |||
478 | pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end); | ||
479 | host->reg_base = ioremap(res->start, res->end - res->start + 1); | ||
480 | if (host->reg_base == NULL) { | ||
481 | ret = -ENOMEM; | ||
482 | goto ioremap_failed; | ||
483 | } | ||
484 | host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, | ||
485 | &buf_addr, GFP_KERNEL); | ||
486 | |||
487 | if (host->virt_base == 0) { | ||
488 | ret = -ENOMEM; | ||
489 | goto dma_alloc_failed; | ||
490 | } | ||
491 | host->phys_base = buf_addr; | ||
492 | |||
493 | host->id = pdev->id; | ||
494 | host->irq = irq; | ||
495 | |||
496 | mmc->ops = &goldfish_mmc_ops; | ||
497 | mmc->f_min = 400000; | ||
498 | mmc->f_max = 24000000; | ||
499 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
500 | mmc->caps = MMC_CAP_4_BIT_DATA; | ||
501 | |||
502 | /* Use scatterlist DMA to reduce per-transfer costs. | ||
503 | * NOTE max_seg_size assumption that small blocks aren't | ||
504 | * normally used (except e.g. for reading SD registers). | ||
505 | */ | ||
506 | mmc->max_segs = 32; | ||
507 | mmc->max_blk_size = 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */ | ||
508 | mmc->max_blk_count = 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */ | ||
509 | mmc->max_req_size = BUFFER_SIZE; | ||
510 | mmc->max_seg_size = mmc->max_req_size; | ||
511 | |||
512 | ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host); | ||
513 | if (ret) { | ||
514 | dev_err(&pdev->dev, "Failed IRQ Adding goldfish MMC\n"); | ||
515 | goto err_request_irq_failed; | ||
516 | } | ||
517 | |||
518 | host->dev = &pdev->dev; | ||
519 | platform_set_drvdata(pdev, host); | ||
520 | |||
521 | ret = device_create_file(&pdev->dev, &dev_attr_cover_switch); | ||
522 | if (ret) | ||
523 | dev_warn(mmc_dev(host->mmc), | ||
524 | "Unable to create sysfs attributes\n"); | ||
525 | |||
526 | GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base); | ||
527 | GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, | ||
528 | MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA | | ||
529 | MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT); | ||
530 | |||
531 | mmc_add_host(mmc); | ||
532 | return 0; | ||
533 | |||
534 | err_request_irq_failed: | ||
535 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, | ||
536 | host->phys_base); | ||
537 | dma_alloc_failed: | ||
538 | iounmap(host->reg_base); | ||
539 | ioremap_failed: | ||
540 | mmc_free_host(host->mmc); | ||
541 | err_alloc_host_failed: | ||
542 | return ret; | ||
543 | } | ||
544 | |||
545 | static int goldfish_mmc_remove(struct platform_device *pdev) | ||
546 | { | ||
547 | struct goldfish_mmc_host *host = platform_get_drvdata(pdev); | ||
548 | |||
549 | platform_set_drvdata(pdev, NULL); | ||
550 | |||
551 | BUG_ON(host == NULL); | ||
552 | |||
553 | mmc_remove_host(host->mmc); | ||
554 | free_irq(host->irq, host); | ||
555 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base); | ||
556 | iounmap(host->reg_base); | ||
557 | mmc_free_host(host->mmc); | ||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static struct platform_driver goldfish_mmc_driver = { | ||
562 | .probe = goldfish_mmc_probe, | ||
563 | .remove = goldfish_mmc_remove, | ||
564 | .driver = { | ||
565 | .name = DRIVER_NAME, | ||
566 | }, | ||
567 | }; | ||
568 | |||
569 | module_platform_driver(goldfish_mmc_driver); | ||
570 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 4d50da618166..72fd0f2c9013 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c | |||
@@ -175,16 +175,6 @@ static int dw_mci_exynos_setup_bus(struct dw_mci *host, | |||
175 | } | 175 | } |
176 | } | 176 | } |
177 | 177 | ||
178 | gpio = of_get_named_gpio(slot_np, "wp-gpios", 0); | ||
179 | if (gpio_is_valid(gpio)) { | ||
180 | if (devm_gpio_request(host->dev, gpio, "dw-mci-wp")) | ||
181 | dev_info(host->dev, "gpio [%d] request failed\n", | ||
182 | gpio); | ||
183 | } else { | ||
184 | dev_info(host->dev, "wp gpio not available"); | ||
185 | host->pdata->quirks |= DW_MCI_QUIRK_NO_WRITE_PROTECT; | ||
186 | } | ||
187 | |||
188 | if (host->pdata->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) | 178 | if (host->pdata->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) |
189 | return 0; | 179 | return 0; |
190 | 180 | ||
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 323c5022c2ca..60063ccb4c4b 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/regulator/consumer.h> | 34 | #include <linux/regulator/consumer.h> |
35 | #include <linux/workqueue.h> | 35 | #include <linux/workqueue.h> |
36 | #include <linux/of.h> | 36 | #include <linux/of.h> |
37 | #include <linux/of_gpio.h> | ||
37 | 38 | ||
38 | #include "dw_mmc.h" | 39 | #include "dw_mmc.h" |
39 | 40 | ||
@@ -74,6 +75,8 @@ struct idmac_desc { | |||
74 | * struct dw_mci_slot - MMC slot state | 75 | * struct dw_mci_slot - MMC slot state |
75 | * @mmc: The mmc_host representing this slot. | 76 | * @mmc: The mmc_host representing this slot. |
76 | * @host: The MMC controller this slot is using. | 77 | * @host: The MMC controller this slot is using. |
78 | * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX) | ||
79 | * @wp_gpio: If gpio_is_valid() we'll use this to read write protect. | ||
77 | * @ctype: Card type for this slot. | 80 | * @ctype: Card type for this slot. |
78 | * @mrq: mmc_request currently being processed or waiting to be | 81 | * @mrq: mmc_request currently being processed or waiting to be |
79 | * processed, or NULL when the slot is idle. | 82 | * processed, or NULL when the slot is idle. |
@@ -88,6 +91,9 @@ struct dw_mci_slot { | |||
88 | struct mmc_host *mmc; | 91 | struct mmc_host *mmc; |
89 | struct dw_mci *host; | 92 | struct dw_mci *host; |
90 | 93 | ||
94 | int quirks; | ||
95 | int wp_gpio; | ||
96 | |||
91 | u32 ctype; | 97 | u32 ctype; |
92 | 98 | ||
93 | struct mmc_request *mrq; | 99 | struct mmc_request *mrq; |
@@ -825,10 +831,12 @@ static int dw_mci_get_ro(struct mmc_host *mmc) | |||
825 | struct dw_mci_board *brd = slot->host->pdata; | 831 | struct dw_mci_board *brd = slot->host->pdata; |
826 | 832 | ||
827 | /* Use platform get_ro function, else try on board write protect */ | 833 | /* Use platform get_ro function, else try on board write protect */ |
828 | if (brd->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT) | 834 | if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) |
829 | read_only = 0; | 835 | read_only = 0; |
830 | else if (brd->get_ro) | 836 | else if (brd->get_ro) |
831 | read_only = brd->get_ro(slot->id); | 837 | read_only = brd->get_ro(slot->id); |
838 | else if (gpio_is_valid(slot->wp_gpio)) | ||
839 | read_only = gpio_get_value(slot->wp_gpio); | ||
832 | else | 840 | else |
833 | read_only = | 841 | read_only = |
834 | mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; | 842 | mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; |
@@ -1785,6 +1793,30 @@ static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) | |||
1785 | return NULL; | 1793 | return NULL; |
1786 | } | 1794 | } |
1787 | 1795 | ||
1796 | static struct dw_mci_of_slot_quirks { | ||
1797 | char *quirk; | ||
1798 | int id; | ||
1799 | } of_slot_quirks[] = { | ||
1800 | { | ||
1801 | .quirk = "disable-wp", | ||
1802 | .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT, | ||
1803 | }, | ||
1804 | }; | ||
1805 | |||
1806 | static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) | ||
1807 | { | ||
1808 | struct device_node *np = dw_mci_of_find_slot_node(dev, slot); | ||
1809 | int quirks = 0; | ||
1810 | int idx; | ||
1811 | |||
1812 | /* get quirks */ | ||
1813 | for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++) | ||
1814 | if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) | ||
1815 | quirks |= of_slot_quirks[idx].id; | ||
1816 | |||
1817 | return quirks; | ||
1818 | } | ||
1819 | |||
1788 | /* find out bus-width for a given slot */ | 1820 | /* find out bus-width for a given slot */ |
1789 | static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) | 1821 | static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) |
1790 | { | 1822 | { |
@@ -1799,7 +1831,34 @@ static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) | |||
1799 | " as 1\n"); | 1831 | " as 1\n"); |
1800 | return bus_wd; | 1832 | return bus_wd; |
1801 | } | 1833 | } |
1834 | |||
1835 | /* find the write protect gpio for a given slot; or -1 if none specified */ | ||
1836 | static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot) | ||
1837 | { | ||
1838 | struct device_node *np = dw_mci_of_find_slot_node(dev, slot); | ||
1839 | int gpio; | ||
1840 | |||
1841 | if (!np) | ||
1842 | return -EINVAL; | ||
1843 | |||
1844 | gpio = of_get_named_gpio(np, "wp-gpios", 0); | ||
1845 | |||
1846 | /* Having a missing entry is valid; return silently */ | ||
1847 | if (!gpio_is_valid(gpio)) | ||
1848 | return -EINVAL; | ||
1849 | |||
1850 | if (devm_gpio_request(dev, gpio, "dw-mci-wp")) { | ||
1851 | dev_warn(dev, "gpio [%d] request failed\n", gpio); | ||
1852 | return -EINVAL; | ||
1853 | } | ||
1854 | |||
1855 | return gpio; | ||
1856 | } | ||
1802 | #else /* CONFIG_OF */ | 1857 | #else /* CONFIG_OF */ |
1858 | static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) | ||
1859 | { | ||
1860 | return 0; | ||
1861 | } | ||
1803 | static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) | 1862 | static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) |
1804 | { | 1863 | { |
1805 | return 1; | 1864 | return 1; |
@@ -1808,6 +1867,10 @@ static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) | |||
1808 | { | 1867 | { |
1809 | return NULL; | 1868 | return NULL; |
1810 | } | 1869 | } |
1870 | static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot) | ||
1871 | { | ||
1872 | return -EINVAL; | ||
1873 | } | ||
1811 | #endif /* CONFIG_OF */ | 1874 | #endif /* CONFIG_OF */ |
1812 | 1875 | ||
1813 | static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) | 1876 | static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) |
@@ -1828,6 +1891,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
1828 | slot->host = host; | 1891 | slot->host = host; |
1829 | host->slot[id] = slot; | 1892 | host->slot[id] = slot; |
1830 | 1893 | ||
1894 | slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id); | ||
1895 | |||
1831 | mmc->ops = &dw_mci_ops; | 1896 | mmc->ops = &dw_mci_ops; |
1832 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); | 1897 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); |
1833 | mmc->f_max = host->bus_hz; | 1898 | mmc->f_max = host->bus_hz; |
@@ -1923,6 +1988,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
1923 | else | 1988 | else |
1924 | clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); | 1989 | clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); |
1925 | 1990 | ||
1991 | slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id); | ||
1992 | |||
1926 | mmc_add_host(mmc); | 1993 | mmc_add_host(mmc); |
1927 | 1994 | ||
1928 | #if defined(CONFIG_DEBUG_FS) | 1995 | #if defined(CONFIG_DEBUG_FS) |
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index f8dd36102949..145cdaf000d1 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c | |||
@@ -21,7 +21,11 @@ | |||
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/gpio.h> | 23 | #include <linux/gpio.h> |
24 | #include <linux/of_gpio.h> | ||
25 | #include <linux/of_irq.h> | ||
24 | #include <linux/mmc/host.h> | 26 | #include <linux/mmc/host.h> |
27 | #include <linux/mmc/slot-gpio.h> | ||
28 | #include <linux/pinctrl/consumer.h> | ||
25 | 29 | ||
26 | #include <asm/sizes.h> | 30 | #include <asm/sizes.h> |
27 | #include <asm/unaligned.h> | 31 | #include <asm/unaligned.h> |
@@ -51,8 +55,6 @@ struct mvsd_host { | |||
51 | struct mmc_host *mmc; | 55 | struct mmc_host *mmc; |
52 | struct device *dev; | 56 | struct device *dev; |
53 | struct clk *clk; | 57 | struct clk *clk; |
54 | int gpio_card_detect; | ||
55 | int gpio_write_protect; | ||
56 | }; | 58 | }; |
57 | 59 | ||
58 | #define mvsd_write(offs, val) writel(val, iobase + (offs)) | 60 | #define mvsd_write(offs, val) writel(val, iobase + (offs)) |
@@ -538,13 +540,6 @@ static void mvsd_timeout_timer(unsigned long data) | |||
538 | mmc_request_done(host->mmc, mrq); | 540 | mmc_request_done(host->mmc, mrq); |
539 | } | 541 | } |
540 | 542 | ||
541 | static irqreturn_t mvsd_card_detect_irq(int irq, void *dev) | ||
542 | { | ||
543 | struct mvsd_host *host = dev; | ||
544 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
545 | return IRQ_HANDLED; | ||
546 | } | ||
547 | |||
548 | static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable) | 543 | static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable) |
549 | { | 544 | { |
550 | struct mvsd_host *host = mmc_priv(mmc); | 545 | struct mvsd_host *host = mmc_priv(mmc); |
@@ -564,20 +559,6 @@ static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
564 | spin_unlock_irqrestore(&host->lock, flags); | 559 | spin_unlock_irqrestore(&host->lock, flags); |
565 | } | 560 | } |
566 | 561 | ||
567 | static int mvsd_get_ro(struct mmc_host *mmc) | ||
568 | { | ||
569 | struct mvsd_host *host = mmc_priv(mmc); | ||
570 | |||
571 | if (host->gpio_write_protect) | ||
572 | return gpio_get_value(host->gpio_write_protect); | ||
573 | |||
574 | /* | ||
575 | * Board doesn't support read only detection; let the mmc core | ||
576 | * decide what to do. | ||
577 | */ | ||
578 | return -ENOSYS; | ||
579 | } | ||
580 | |||
581 | static void mvsd_power_up(struct mvsd_host *host) | 562 | static void mvsd_power_up(struct mvsd_host *host) |
582 | { | 563 | { |
583 | void __iomem *iobase = host->base; | 564 | void __iomem *iobase = host->base; |
@@ -674,7 +655,7 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
674 | 655 | ||
675 | static const struct mmc_host_ops mvsd_ops = { | 656 | static const struct mmc_host_ops mvsd_ops = { |
676 | .request = mvsd_request, | 657 | .request = mvsd_request, |
677 | .get_ro = mvsd_get_ro, | 658 | .get_ro = mmc_gpio_get_ro, |
678 | .set_ios = mvsd_set_ios, | 659 | .set_ios = mvsd_set_ios, |
679 | .enable_sdio_irq = mvsd_enable_sdio_irq, | 660 | .enable_sdio_irq = mvsd_enable_sdio_irq, |
680 | }; | 661 | }; |
@@ -703,17 +684,18 @@ mv_conf_mbus_windows(struct mvsd_host *host, | |||
703 | 684 | ||
704 | static int __init mvsd_probe(struct platform_device *pdev) | 685 | static int __init mvsd_probe(struct platform_device *pdev) |
705 | { | 686 | { |
687 | struct device_node *np = pdev->dev.of_node; | ||
706 | struct mmc_host *mmc = NULL; | 688 | struct mmc_host *mmc = NULL; |
707 | struct mvsd_host *host = NULL; | 689 | struct mvsd_host *host = NULL; |
708 | const struct mvsdio_platform_data *mvsd_data; | ||
709 | const struct mbus_dram_target_info *dram; | 690 | const struct mbus_dram_target_info *dram; |
710 | struct resource *r; | 691 | struct resource *r; |
711 | int ret, irq; | 692 | int ret, irq; |
693 | int gpio_card_detect, gpio_write_protect; | ||
694 | struct pinctrl *pinctrl; | ||
712 | 695 | ||
713 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 696 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
714 | irq = platform_get_irq(pdev, 0); | 697 | irq = platform_get_irq(pdev, 0); |
715 | mvsd_data = pdev->dev.platform_data; | 698 | if (!r || irq < 0) |
716 | if (!r || irq < 0 || !mvsd_data) | ||
717 | return -ENXIO; | 699 | return -ENXIO; |
718 | 700 | ||
719 | mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); | 701 | mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); |
@@ -725,8 +707,43 @@ static int __init mvsd_probe(struct platform_device *pdev) | |||
725 | host = mmc_priv(mmc); | 707 | host = mmc_priv(mmc); |
726 | host->mmc = mmc; | 708 | host->mmc = mmc; |
727 | host->dev = &pdev->dev; | 709 | host->dev = &pdev->dev; |
728 | host->base_clock = mvsd_data->clock / 2; | 710 | |
729 | host->clk = ERR_PTR(-EINVAL); | 711 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); |
712 | if (IS_ERR(pinctrl)) | ||
713 | dev_warn(&pdev->dev, "no pins associated\n"); | ||
714 | |||
715 | /* | ||
716 | * Some non-DT platforms do not pass a clock, and the clock | ||
717 | * frequency is passed through platform_data. On DT platforms, | ||
718 | * a clock must always be passed, even if there is no gatable | ||
719 | * clock associated to the SDIO interface (it can simply be a | ||
720 | * fixed rate clock). | ||
721 | */ | ||
722 | host->clk = devm_clk_get(&pdev->dev, NULL); | ||
723 | if (!IS_ERR(host->clk)) | ||
724 | clk_prepare_enable(host->clk); | ||
725 | |||
726 | if (np) { | ||
727 | if (IS_ERR(host->clk)) { | ||
728 | dev_err(&pdev->dev, "DT platforms must have a clock associated\n"); | ||
729 | ret = -EINVAL; | ||
730 | goto out; | ||
731 | } | ||
732 | |||
733 | host->base_clock = clk_get_rate(host->clk) / 2; | ||
734 | gpio_card_detect = of_get_named_gpio(np, "cd-gpios", 0); | ||
735 | gpio_write_protect = of_get_named_gpio(np, "wp-gpios", 0); | ||
736 | } else { | ||
737 | const struct mvsdio_platform_data *mvsd_data; | ||
738 | mvsd_data = pdev->dev.platform_data; | ||
739 | if (!mvsd_data) { | ||
740 | ret = -ENXIO; | ||
741 | goto out; | ||
742 | } | ||
743 | host->base_clock = mvsd_data->clock / 2; | ||
744 | gpio_card_detect = mvsd_data->gpio_card_detect; | ||
745 | gpio_write_protect = mvsd_data->gpio_write_protect; | ||
746 | } | ||
730 | 747 | ||
731 | mmc->ops = &mvsd_ops; | 748 | mmc->ops = &mvsd_ops; |
732 | 749 | ||
@@ -765,43 +782,14 @@ static int __init mvsd_probe(struct platform_device *pdev) | |||
765 | goto out; | 782 | goto out; |
766 | } | 783 | } |
767 | 784 | ||
768 | /* Not all platforms can gate the clock, so it is not | 785 | if (gpio_is_valid(gpio_card_detect)) { |
769 | an error if the clock does not exists. */ | 786 | ret = mmc_gpio_request_cd(mmc, gpio_card_detect); |
770 | host->clk = devm_clk_get(&pdev->dev, NULL); | 787 | if (ret) |
771 | if (!IS_ERR(host->clk)) | 788 | goto out; |
772 | clk_prepare_enable(host->clk); | 789 | } else |
773 | |||
774 | if (mvsd_data->gpio_card_detect) { | ||
775 | ret = devm_gpio_request_one(&pdev->dev, | ||
776 | mvsd_data->gpio_card_detect, | ||
777 | GPIOF_IN, DRIVER_NAME " cd"); | ||
778 | if (ret == 0) { | ||
779 | irq = gpio_to_irq(mvsd_data->gpio_card_detect); | ||
780 | ret = devm_request_irq(&pdev->dev, irq, | ||
781 | mvsd_card_detect_irq, | ||
782 | IRQ_TYPE_EDGE_RISING | | ||
783 | IRQ_TYPE_EDGE_FALLING, | ||
784 | DRIVER_NAME " cd", host); | ||
785 | if (ret == 0) | ||
786 | host->gpio_card_detect = | ||
787 | mvsd_data->gpio_card_detect; | ||
788 | else | ||
789 | devm_gpio_free(&pdev->dev, | ||
790 | mvsd_data->gpio_card_detect); | ||
791 | } | ||
792 | } | ||
793 | if (!host->gpio_card_detect) | ||
794 | mmc->caps |= MMC_CAP_NEEDS_POLL; | 790 | mmc->caps |= MMC_CAP_NEEDS_POLL; |
795 | 791 | ||
796 | if (mvsd_data->gpio_write_protect) { | 792 | mmc_gpio_request_ro(mmc, gpio_write_protect); |
797 | ret = devm_gpio_request_one(&pdev->dev, | ||
798 | mvsd_data->gpio_write_protect, | ||
799 | GPIOF_IN, DRIVER_NAME " wp"); | ||
800 | if (ret == 0) { | ||
801 | host->gpio_write_protect = | ||
802 | mvsd_data->gpio_write_protect; | ||
803 | } | ||
804 | } | ||
805 | 793 | ||
806 | setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host); | 794 | setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host); |
807 | platform_set_drvdata(pdev, mmc); | 795 | platform_set_drvdata(pdev, mmc); |
@@ -811,15 +799,17 @@ static int __init mvsd_probe(struct platform_device *pdev) | |||
811 | 799 | ||
812 | pr_notice("%s: %s driver initialized, ", | 800 | pr_notice("%s: %s driver initialized, ", |
813 | mmc_hostname(mmc), DRIVER_NAME); | 801 | mmc_hostname(mmc), DRIVER_NAME); |
814 | if (host->gpio_card_detect) | 802 | if (!(mmc->caps & MMC_CAP_NEEDS_POLL)) |
815 | printk("using GPIO %d for card detection\n", | 803 | printk("using GPIO %d for card detection\n", |
816 | host->gpio_card_detect); | 804 | gpio_card_detect); |
817 | else | 805 | else |
818 | printk("lacking card detect (fall back to polling)\n"); | 806 | printk("lacking card detect (fall back to polling)\n"); |
819 | return 0; | 807 | return 0; |
820 | 808 | ||
821 | out: | 809 | out: |
822 | if (mmc) { | 810 | if (mmc) { |
811 | mmc_gpio_free_cd(mmc); | ||
812 | mmc_gpio_free_ro(mmc); | ||
823 | if (!IS_ERR(host->clk)) | 813 | if (!IS_ERR(host->clk)) |
824 | clk_disable_unprepare(host->clk); | 814 | clk_disable_unprepare(host->clk); |
825 | mmc_free_host(mmc); | 815 | mmc_free_host(mmc); |
@@ -834,6 +824,8 @@ static int __exit mvsd_remove(struct platform_device *pdev) | |||
834 | 824 | ||
835 | struct mvsd_host *host = mmc_priv(mmc); | 825 | struct mvsd_host *host = mmc_priv(mmc); |
836 | 826 | ||
827 | mmc_gpio_free_cd(mmc); | ||
828 | mmc_gpio_free_ro(mmc); | ||
837 | mmc_remove_host(mmc); | 829 | mmc_remove_host(mmc); |
838 | del_timer_sync(&host->timer); | 830 | del_timer_sync(&host->timer); |
839 | mvsd_power_down(host); | 831 | mvsd_power_down(host); |
@@ -873,12 +865,19 @@ static int mvsd_resume(struct platform_device *dev) | |||
873 | #define mvsd_resume NULL | 865 | #define mvsd_resume NULL |
874 | #endif | 866 | #endif |
875 | 867 | ||
868 | static const struct of_device_id mvsdio_dt_ids[] = { | ||
869 | { .compatible = "marvell,orion-sdio" }, | ||
870 | { /* sentinel */ } | ||
871 | }; | ||
872 | MODULE_DEVICE_TABLE(of, mvsdio_dt_ids); | ||
873 | |||
876 | static struct platform_driver mvsd_driver = { | 874 | static struct platform_driver mvsd_driver = { |
877 | .remove = __exit_p(mvsd_remove), | 875 | .remove = __exit_p(mvsd_remove), |
878 | .suspend = mvsd_suspend, | 876 | .suspend = mvsd_suspend, |
879 | .resume = mvsd_resume, | 877 | .resume = mvsd_resume, |
880 | .driver = { | 878 | .driver = { |
881 | .name = DRIVER_NAME, | 879 | .name = DRIVER_NAME, |
880 | .of_match_table = mvsdio_dt_ids, | ||
882 | }, | 881 | }, |
883 | }; | 882 | }; |
884 | 883 | ||
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 5b665551a6f3..4efe3021b217 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
@@ -354,7 +354,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
354 | struct dma_async_tx_descriptor *desc; | 354 | struct dma_async_tx_descriptor *desc; |
355 | struct scatterlist *sgl = data->sg, *sg; | 355 | struct scatterlist *sgl = data->sg, *sg; |
356 | unsigned int sg_len = data->sg_len; | 356 | unsigned int sg_len = data->sg_len; |
357 | int i; | 357 | unsigned int i; |
358 | 358 | ||
359 | unsigned short dma_data_dir, timeout; | 359 | unsigned short dma_data_dir, timeout; |
360 | enum dma_transfer_direction slave_dirn; | 360 | enum dma_transfer_direction slave_dirn; |
@@ -804,3 +804,4 @@ module_platform_driver(mxs_mmc_driver); | |||
804 | MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral"); | 804 | MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral"); |
805 | MODULE_AUTHOR("Freescale Semiconductor"); | 805 | MODULE_AUTHOR("Freescale Semiconductor"); |
806 | MODULE_LICENSE("GPL"); | 806 | MODULE_LICENSE("GPL"); |
807 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 1534b582c419..d720b5e05b9c 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c | |||
@@ -146,7 +146,7 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) | |||
146 | oms->pdata.get_ro = of_mmc_spi_get_ro; | 146 | oms->pdata.get_ro = of_mmc_spi_get_ro; |
147 | 147 | ||
148 | oms->detect_irq = irq_of_parse_and_map(np, 0); | 148 | oms->detect_irq = irq_of_parse_and_map(np, 0); |
149 | if (oms->detect_irq != NO_IRQ) { | 149 | if (oms->detect_irq != 0) { |
150 | oms->pdata.init = of_mmc_spi_init; | 150 | oms->pdata.init = of_mmc_spi_init; |
151 | oms->pdata.exit = of_mmc_spi_exit; | 151 | oms->pdata.exit = of_mmc_spi_exit; |
152 | } else { | 152 | } else { |
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 468c92303167..f981f7d1f6e3 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c | |||
@@ -1097,11 +1097,6 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1097 | voltage = OUTPUT_1V8; | 1097 | voltage = OUTPUT_1V8; |
1098 | 1098 | ||
1099 | if (voltage == OUTPUT_1V8) { | 1099 | if (voltage == OUTPUT_1V8) { |
1100 | err = rtsx_pci_write_register(pcr, | ||
1101 | SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B); | ||
1102 | if (err < 0) | ||
1103 | goto out; | ||
1104 | |||
1105 | err = sd_wait_voltage_stable_1(host); | 1100 | err = sd_wait_voltage_stable_1(host); |
1106 | if (err < 0) | 1101 | if (err < 0) |
1107 | goto out; | 1102 | goto out; |
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c new file mode 100644 index 000000000000..8ffea05152c6 --- /dev/null +++ b/drivers/mmc/host/sdhci-bcm2835.c | |||
@@ -0,0 +1,210 @@ | |||
1 | /* | ||
2 | * BCM2835 SDHCI | ||
3 | * Copyright (C) 2012 Stephen Warren | ||
4 | * Based on U-Boot's MMC driver for the BCM2835 by Oleksandr Tymoshenko & me | ||
5 | * Portions of the code there were obviously based on the Linux kernel at: | ||
6 | * git://github.com/raspberrypi/linux.git rpi-3.6.y | ||
7 | * commit f5b930b "Main bcm2708 linux port" signed-off-by Dom Cobley. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms and conditions of the GNU General Public License, | ||
11 | * version 2, as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #include <linux/delay.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/mmc/host.h> | ||
25 | #include "sdhci-pltfm.h" | ||
26 | |||
27 | /* | ||
28 | * 400KHz is max freq for card ID etc. Use that as min card clock. We need to | ||
29 | * know the min to enable static calculation of max BCM2835_SDHCI_WRITE_DELAY. | ||
30 | */ | ||
31 | #define MIN_FREQ 400000 | ||
32 | |||
33 | /* | ||
34 | * The Arasan has a bugette whereby it may lose the content of successive | ||
35 | * writes to registers that are within two SD-card clock cycles of each other | ||
36 | * (a clock domain crossing problem). It seems, however, that the data | ||
37 | * register does not have this problem, which is just as well - otherwise we'd | ||
38 | * have to nobble the DMA engine too. | ||
39 | * | ||
40 | * This should probably be dynamically calculated based on the actual card | ||
41 | * frequency. However, this is the longest we'll have to wait, and doesn't | ||
42 | * seem to slow access down too much, so the added complexity doesn't seem | ||
43 | * worth it for now. | ||
44 | * | ||
45 | * 1/MIN_FREQ is (max) time per tick of eMMC clock. | ||
46 | * 2/MIN_FREQ is time for two ticks. | ||
47 | * Multiply by 1000000 to get uS per two ticks. | ||
48 | * *1000000 for uSecs. | ||
49 | * +1 for hack rounding. | ||
50 | */ | ||
51 | #define BCM2835_SDHCI_WRITE_DELAY (((2 * 1000000) / MIN_FREQ) + 1) | ||
52 | |||
53 | struct bcm2835_sdhci { | ||
54 | u32 shadow; | ||
55 | }; | ||
56 | |||
57 | static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg) | ||
58 | { | ||
59 | writel(val, host->ioaddr + reg); | ||
60 | |||
61 | udelay(BCM2835_SDHCI_WRITE_DELAY); | ||
62 | } | ||
63 | |||
64 | static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg) | ||
65 | { | ||
66 | u32 val = readl(host->ioaddr + reg); | ||
67 | |||
68 | if (reg == SDHCI_CAPABILITIES) | ||
69 | val |= SDHCI_CAN_VDD_330; | ||
70 | |||
71 | return val; | ||
72 | } | ||
73 | |||
74 | static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg) | ||
75 | { | ||
76 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
77 | struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv; | ||
78 | u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow : | ||
79 | bcm2835_sdhci_readl(host, reg & ~3); | ||
80 | u32 word_num = (reg >> 1) & 1; | ||
81 | u32 word_shift = word_num * 16; | ||
82 | u32 mask = 0xffff << word_shift; | ||
83 | u32 newval = (oldval & ~mask) | (val << word_shift); | ||
84 | |||
85 | if (reg == SDHCI_TRANSFER_MODE) | ||
86 | bcm2835_host->shadow = newval; | ||
87 | else | ||
88 | bcm2835_sdhci_writel(host, newval, reg & ~3); | ||
89 | } | ||
90 | |||
91 | static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg) | ||
92 | { | ||
93 | u32 val = bcm2835_sdhci_readl(host, (reg & ~3)); | ||
94 | u32 word_num = (reg >> 1) & 1; | ||
95 | u32 word_shift = word_num * 16; | ||
96 | u32 word = (val >> word_shift) & 0xffff; | ||
97 | |||
98 | return word; | ||
99 | } | ||
100 | |||
101 | static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg) | ||
102 | { | ||
103 | u32 oldval = bcm2835_sdhci_readl(host, reg & ~3); | ||
104 | u32 byte_num = reg & 3; | ||
105 | u32 byte_shift = byte_num * 8; | ||
106 | u32 mask = 0xff << byte_shift; | ||
107 | u32 newval = (oldval & ~mask) | (val << byte_shift); | ||
108 | |||
109 | bcm2835_sdhci_writel(host, newval, reg & ~3); | ||
110 | } | ||
111 | |||
112 | static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg) | ||
113 | { | ||
114 | u32 val = bcm2835_sdhci_readl(host, (reg & ~3)); | ||
115 | u32 byte_num = reg & 3; | ||
116 | u32 byte_shift = byte_num * 8; | ||
117 | u32 byte = (val >> byte_shift) & 0xff; | ||
118 | |||
119 | return byte; | ||
120 | } | ||
121 | |||
122 | unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host) | ||
123 | { | ||
124 | return MIN_FREQ; | ||
125 | } | ||
126 | |||
127 | static struct sdhci_ops bcm2835_sdhci_ops = { | ||
128 | .write_l = bcm2835_sdhci_writel, | ||
129 | .write_w = bcm2835_sdhci_writew, | ||
130 | .write_b = bcm2835_sdhci_writeb, | ||
131 | .read_l = bcm2835_sdhci_readl, | ||
132 | .read_w = bcm2835_sdhci_readw, | ||
133 | .read_b = bcm2835_sdhci_readb, | ||
134 | .get_max_clock = sdhci_pltfm_clk_get_max_clock, | ||
135 | .get_min_clock = bcm2835_sdhci_get_min_clock, | ||
136 | }; | ||
137 | |||
138 | static struct sdhci_pltfm_data bcm2835_sdhci_pdata = { | ||
139 | .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | | ||
140 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, | ||
141 | .ops = &bcm2835_sdhci_ops, | ||
142 | }; | ||
143 | |||
144 | static int bcm2835_sdhci_probe(struct platform_device *pdev) | ||
145 | { | ||
146 | struct sdhci_host *host; | ||
147 | struct bcm2835_sdhci *bcm2835_host; | ||
148 | struct sdhci_pltfm_host *pltfm_host; | ||
149 | int ret; | ||
150 | |||
151 | host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata); | ||
152 | if (IS_ERR(host)) | ||
153 | return PTR_ERR(host); | ||
154 | |||
155 | bcm2835_host = devm_kzalloc(&pdev->dev, sizeof(*bcm2835_host), | ||
156 | GFP_KERNEL); | ||
157 | if (!bcm2835_host) { | ||
158 | dev_err(mmc_dev(host->mmc), | ||
159 | "failed to allocate bcm2835_sdhci\n"); | ||
160 | return -ENOMEM; | ||
161 | } | ||
162 | |||
163 | pltfm_host = sdhci_priv(host); | ||
164 | pltfm_host->priv = bcm2835_host; | ||
165 | |||
166 | pltfm_host->clk = devm_clk_get(&pdev->dev, NULL); | ||
167 | if (IS_ERR(pltfm_host->clk)) { | ||
168 | ret = PTR_ERR(pltfm_host->clk); | ||
169 | goto err; | ||
170 | } | ||
171 | |||
172 | return sdhci_add_host(host); | ||
173 | |||
174 | err: | ||
175 | sdhci_pltfm_free(pdev); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | static int bcm2835_sdhci_remove(struct platform_device *pdev) | ||
180 | { | ||
181 | struct sdhci_host *host = platform_get_drvdata(pdev); | ||
182 | int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); | ||
183 | |||
184 | sdhci_remove_host(host, dead); | ||
185 | sdhci_pltfm_free(pdev); | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static const struct of_device_id bcm2835_sdhci_of_match[] = { | ||
191 | { .compatible = "brcm,bcm2835-sdhci" }, | ||
192 | { } | ||
193 | }; | ||
194 | MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match); | ||
195 | |||
196 | static struct platform_driver bcm2835_sdhci_driver = { | ||
197 | .driver = { | ||
198 | .name = "sdhci-bcm2835", | ||
199 | .owner = THIS_MODULE, | ||
200 | .of_match_table = bcm2835_sdhci_of_match, | ||
201 | .pm = SDHCI_PLTFM_PMOPS, | ||
202 | }, | ||
203 | .probe = bcm2835_sdhci_probe, | ||
204 | .remove = bcm2835_sdhci_remove, | ||
205 | }; | ||
206 | module_platform_driver(bcm2835_sdhci_driver); | ||
207 | |||
208 | MODULE_DESCRIPTION("BCM2835 SDHCI driver"); | ||
209 | MODULE_AUTHOR("Stephen Warren"); | ||
210 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index e07df812ff1e..78ac00227c1a 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/mmc/host.h> | 21 | #include <linux/mmc/host.h> |
22 | #include <linux/mmc/mmc.h> | 22 | #include <linux/mmc/mmc.h> |
23 | #include <linux/mmc/sdio.h> | 23 | #include <linux/mmc/sdio.h> |
24 | #include <linux/mmc/slot-gpio.h> | ||
24 | #include <linux/of.h> | 25 | #include <linux/of.h> |
25 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
26 | #include <linux/of_gpio.h> | 27 | #include <linux/of_gpio.h> |
@@ -29,12 +30,22 @@ | |||
29 | #include "sdhci-pltfm.h" | 30 | #include "sdhci-pltfm.h" |
30 | #include "sdhci-esdhc.h" | 31 | #include "sdhci-esdhc.h" |
31 | 32 | ||
32 | #define SDHCI_CTRL_D3CD 0x08 | 33 | #define ESDHC_CTRL_D3CD 0x08 |
33 | /* VENDOR SPEC register */ | 34 | /* VENDOR SPEC register */ |
34 | #define SDHCI_VENDOR_SPEC 0xC0 | 35 | #define ESDHC_VENDOR_SPEC 0xc0 |
35 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 | 36 | #define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1) |
36 | #define SDHCI_WTMK_LVL 0x44 | 37 | #define ESDHC_WTMK_LVL 0x44 |
37 | #define SDHCI_MIX_CTRL 0x48 | 38 | #define ESDHC_MIX_CTRL 0x48 |
39 | #define ESDHC_MIX_CTRL_AC23EN (1 << 7) | ||
40 | /* Bits 3 and 6 are not SDHCI standard definitions */ | ||
41 | #define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7 | ||
42 | |||
43 | /* | ||
44 | * Our interpretation of the SDHCI_HOST_CONTROL register | ||
45 | */ | ||
46 | #define ESDHC_CTRL_4BITBUS (0x1 << 1) | ||
47 | #define ESDHC_CTRL_8BITBUS (0x2 << 1) | ||
48 | #define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1) | ||
38 | 49 | ||
39 | /* | 50 | /* |
40 | * There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC: | 51 | * There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC: |
@@ -42,7 +53,7 @@ | |||
42 | * but bit28 is used as the INT DMA ERR in fsl eSDHC design. | 53 | * but bit28 is used as the INT DMA ERR in fsl eSDHC design. |
43 | * Define this macro DMA error INT for fsl eSDHC | 54 | * Define this macro DMA error INT for fsl eSDHC |
44 | */ | 55 | */ |
45 | #define SDHCI_INT_VENDOR_SPEC_DMA_ERR 0x10000000 | 56 | #define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28) |
46 | 57 | ||
47 | /* | 58 | /* |
48 | * The CMDTYPE of the CMD register (offset 0xE) should be set to | 59 | * The CMDTYPE of the CMD register (offset 0xE) should be set to |
@@ -143,23 +154,8 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i | |||
143 | 154 | ||
144 | static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | 155 | static u32 esdhc_readl_le(struct sdhci_host *host, int reg) |
145 | { | 156 | { |
146 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
147 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
148 | struct esdhc_platform_data *boarddata = &imx_data->boarddata; | ||
149 | |||
150 | /* fake CARD_PRESENT flag */ | ||
151 | u32 val = readl(host->ioaddr + reg); | 157 | u32 val = readl(host->ioaddr + reg); |
152 | 158 | ||
153 | if (unlikely((reg == SDHCI_PRESENT_STATE) | ||
154 | && gpio_is_valid(boarddata->cd_gpio))) { | ||
155 | if (gpio_get_value(boarddata->cd_gpio)) | ||
156 | /* no card, if a valid gpio says so... */ | ||
157 | val &= ~SDHCI_CARD_PRESENT; | ||
158 | else | ||
159 | /* ... in all other cases assume card is present */ | ||
160 | val |= SDHCI_CARD_PRESENT; | ||
161 | } | ||
162 | |||
163 | if (unlikely(reg == SDHCI_CAPABILITIES)) { | 159 | if (unlikely(reg == SDHCI_CAPABILITIES)) { |
164 | /* In FSL esdhc IC module, only bit20 is used to indicate the | 160 | /* In FSL esdhc IC module, only bit20 is used to indicate the |
165 | * ADMA2 capability of esdhc, but this bit is messed up on | 161 | * ADMA2 capability of esdhc, but this bit is messed up on |
@@ -175,8 +171,8 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | |||
175 | } | 171 | } |
176 | 172 | ||
177 | if (unlikely(reg == SDHCI_INT_STATUS)) { | 173 | if (unlikely(reg == SDHCI_INT_STATUS)) { |
178 | if (val & SDHCI_INT_VENDOR_SPEC_DMA_ERR) { | 174 | if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) { |
179 | val &= ~SDHCI_INT_VENDOR_SPEC_DMA_ERR; | 175 | val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; |
180 | val |= SDHCI_INT_ADMA_ERROR; | 176 | val |= SDHCI_INT_ADMA_ERROR; |
181 | } | 177 | } |
182 | } | 178 | } |
@@ -188,17 +184,9 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | |||
188 | { | 184 | { |
189 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 185 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
190 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | 186 | struct pltfm_imx_data *imx_data = pltfm_host->priv; |
191 | struct esdhc_platform_data *boarddata = &imx_data->boarddata; | ||
192 | u32 data; | 187 | u32 data; |
193 | 188 | ||
194 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { | 189 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { |
195 | if (boarddata->cd_type == ESDHC_CD_GPIO) | ||
196 | /* | ||
197 | * These interrupts won't work with a custom | ||
198 | * card_detect gpio (only applied to mx25/35) | ||
199 | */ | ||
200 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); | ||
201 | |||
202 | if (val & SDHCI_INT_CARD_INT) { | 190 | if (val & SDHCI_INT_CARD_INT) { |
203 | /* | 191 | /* |
204 | * Clear and then set D3CD bit to avoid missing the | 192 | * Clear and then set D3CD bit to avoid missing the |
@@ -209,9 +197,9 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | |||
209 | * re-sample it by the following steps. | 197 | * re-sample it by the following steps. |
210 | */ | 198 | */ |
211 | data = readl(host->ioaddr + SDHCI_HOST_CONTROL); | 199 | data = readl(host->ioaddr + SDHCI_HOST_CONTROL); |
212 | data &= ~SDHCI_CTRL_D3CD; | 200 | data &= ~ESDHC_CTRL_D3CD; |
213 | writel(data, host->ioaddr + SDHCI_HOST_CONTROL); | 201 | writel(data, host->ioaddr + SDHCI_HOST_CONTROL); |
214 | data |= SDHCI_CTRL_D3CD; | 202 | data |= ESDHC_CTRL_D3CD; |
215 | writel(data, host->ioaddr + SDHCI_HOST_CONTROL); | 203 | writel(data, host->ioaddr + SDHCI_HOST_CONTROL); |
216 | } | 204 | } |
217 | } | 205 | } |
@@ -220,15 +208,15 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | |||
220 | && (reg == SDHCI_INT_STATUS) | 208 | && (reg == SDHCI_INT_STATUS) |
221 | && (val & SDHCI_INT_DATA_END))) { | 209 | && (val & SDHCI_INT_DATA_END))) { |
222 | u32 v; | 210 | u32 v; |
223 | v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); | 211 | v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); |
224 | v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; | 212 | v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK; |
225 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | 213 | writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); |
226 | } | 214 | } |
227 | 215 | ||
228 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { | 216 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { |
229 | if (val & SDHCI_INT_ADMA_ERROR) { | 217 | if (val & SDHCI_INT_ADMA_ERROR) { |
230 | val &= ~SDHCI_INT_ADMA_ERROR; | 218 | val &= ~SDHCI_INT_ADMA_ERROR; |
231 | val |= SDHCI_INT_VENDOR_SPEC_DMA_ERR; | 219 | val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR; |
232 | } | 220 | } |
233 | } | 221 | } |
234 | 222 | ||
@@ -237,15 +225,18 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | |||
237 | 225 | ||
238 | static u16 esdhc_readw_le(struct sdhci_host *host, int reg) | 226 | static u16 esdhc_readw_le(struct sdhci_host *host, int reg) |
239 | { | 227 | { |
228 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
229 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
230 | |||
240 | if (unlikely(reg == SDHCI_HOST_VERSION)) { | 231 | if (unlikely(reg == SDHCI_HOST_VERSION)) { |
241 | u16 val = readw(host->ioaddr + (reg ^ 2)); | 232 | reg ^= 2; |
242 | /* | 233 | if (is_imx6q_usdhc(imx_data)) { |
243 | * uSDHC supports SDHCI v3.0, but it's encoded as value | 234 | /* |
244 | * 0x3 in host controller version register, which violates | 235 | * The usdhc register returns a wrong host version. |
245 | * SDHCI_SPEC_300 definition. Work it around here. | 236 | * Correct it here. |
246 | */ | 237 | */ |
247 | if ((val & SDHCI_SPEC_VER_MASK) == 3) | 238 | return SDHCI_SPEC_300; |
248 | return --val; | 239 | } |
249 | } | 240 | } |
250 | 241 | ||
251 | return readw(host->ioaddr + reg); | 242 | return readw(host->ioaddr + reg); |
@@ -258,20 +249,32 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | |||
258 | 249 | ||
259 | switch (reg) { | 250 | switch (reg) { |
260 | case SDHCI_TRANSFER_MODE: | 251 | case SDHCI_TRANSFER_MODE: |
261 | /* | ||
262 | * Postpone this write, we must do it together with a | ||
263 | * command write that is down below. | ||
264 | */ | ||
265 | if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) | 252 | if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) |
266 | && (host->cmd->opcode == SD_IO_RW_EXTENDED) | 253 | && (host->cmd->opcode == SD_IO_RW_EXTENDED) |
267 | && (host->cmd->data->blocks > 1) | 254 | && (host->cmd->data->blocks > 1) |
268 | && (host->cmd->data->flags & MMC_DATA_READ)) { | 255 | && (host->cmd->data->flags & MMC_DATA_READ)) { |
269 | u32 v; | 256 | u32 v; |
270 | v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); | 257 | v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); |
271 | v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; | 258 | v |= ESDHC_VENDOR_SPEC_SDIO_QUIRK; |
272 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | 259 | writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); |
260 | } | ||
261 | |||
262 | if (is_imx6q_usdhc(imx_data)) { | ||
263 | u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); | ||
264 | /* Swap AC23 bit */ | ||
265 | if (val & SDHCI_TRNS_AUTO_CMD23) { | ||
266 | val &= ~SDHCI_TRNS_AUTO_CMD23; | ||
267 | val |= ESDHC_MIX_CTRL_AC23EN; | ||
268 | } | ||
269 | m = val | (m & ~ESDHC_MIX_CTRL_SDHCI_MASK); | ||
270 | writel(m, host->ioaddr + ESDHC_MIX_CTRL); | ||
271 | } else { | ||
272 | /* | ||
273 | * Postpone this write, we must do it together with a | ||
274 | * command write that is down below. | ||
275 | */ | ||
276 | imx_data->scratchpad = val; | ||
273 | } | 277 | } |
274 | imx_data->scratchpad = val; | ||
275 | return; | 278 | return; |
276 | case SDHCI_COMMAND: | 279 | case SDHCI_COMMAND: |
277 | if ((host->cmd->opcode == MMC_STOP_TRANSMISSION || | 280 | if ((host->cmd->opcode == MMC_STOP_TRANSMISSION || |
@@ -279,16 +282,12 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | |||
279 | (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) | 282 | (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) |
280 | val |= SDHCI_CMD_ABORTCMD; | 283 | val |= SDHCI_CMD_ABORTCMD; |
281 | 284 | ||
282 | if (is_imx6q_usdhc(imx_data)) { | 285 | if (is_imx6q_usdhc(imx_data)) |
283 | u32 m = readl(host->ioaddr + SDHCI_MIX_CTRL); | ||
284 | m = imx_data->scratchpad | (m & 0xffff0000); | ||
285 | writel(m, host->ioaddr + SDHCI_MIX_CTRL); | ||
286 | writel(val << 16, | 286 | writel(val << 16, |
287 | host->ioaddr + SDHCI_TRANSFER_MODE); | 287 | host->ioaddr + SDHCI_TRANSFER_MODE); |
288 | } else { | 288 | else |
289 | writel(val << 16 | imx_data->scratchpad, | 289 | writel(val << 16 | imx_data->scratchpad, |
290 | host->ioaddr + SDHCI_TRANSFER_MODE); | 290 | host->ioaddr + SDHCI_TRANSFER_MODE); |
291 | } | ||
292 | return; | 291 | return; |
293 | case SDHCI_BLOCK_SIZE: | 292 | case SDHCI_BLOCK_SIZE: |
294 | val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); | 293 | val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); |
@@ -302,6 +301,7 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) | |||
302 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 301 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
303 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | 302 | struct pltfm_imx_data *imx_data = pltfm_host->priv; |
304 | u32 new_val; | 303 | u32 new_val; |
304 | u32 mask; | ||
305 | 305 | ||
306 | switch (reg) { | 306 | switch (reg) { |
307 | case SDHCI_POWER_CONTROL: | 307 | case SDHCI_POWER_CONTROL: |
@@ -311,10 +311,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) | |||
311 | */ | 311 | */ |
312 | return; | 312 | return; |
313 | case SDHCI_HOST_CONTROL: | 313 | case SDHCI_HOST_CONTROL: |
314 | /* FSL messed up here, so we can just keep those three */ | 314 | /* FSL messed up here, so we need to manually compose it. */ |
315 | new_val = val & (SDHCI_CTRL_LED | \ | 315 | new_val = val & SDHCI_CTRL_LED; |
316 | SDHCI_CTRL_4BITBUS | \ | ||
317 | SDHCI_CTRL_D3CD); | ||
318 | /* ensure the endianness */ | 316 | /* ensure the endianness */ |
319 | new_val |= ESDHC_HOST_CONTROL_LE; | 317 | new_val |= ESDHC_HOST_CONTROL_LE; |
320 | /* bits 8&9 are reserved on mx25 */ | 318 | /* bits 8&9 are reserved on mx25 */ |
@@ -323,7 +321,13 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) | |||
323 | new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; | 321 | new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; |
324 | } | 322 | } |
325 | 323 | ||
326 | esdhc_clrset_le(host, 0xffff, new_val, reg); | 324 | /* |
325 | * Do not touch buswidth bits here. This is done in | ||
326 | * esdhc_pltfm_bus_width. | ||
327 | */ | ||
328 | mask = 0xffff & ~ESDHC_CTRL_BUSWIDTH_MASK; | ||
329 | |||
330 | esdhc_clrset_le(host, mask, new_val, reg); | ||
327 | return; | 331 | return; |
328 | } | 332 | } |
329 | esdhc_clrset_le(host, 0xff, val, reg); | 333 | esdhc_clrset_le(host, 0xff, val, reg); |
@@ -336,15 +340,15 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) | |||
336 | * circuit relies on. To work around it, we turn the clocks on back | 340 | * circuit relies on. To work around it, we turn the clocks on back |
337 | * to keep card detection circuit functional. | 341 | * to keep card detection circuit functional. |
338 | */ | 342 | */ |
339 | if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1)) | 343 | if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1)) { |
340 | esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL); | 344 | esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL); |
341 | } | 345 | /* |
342 | 346 | * The reset on usdhc fails to clear MIX_CTRL register. | |
343 | static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) | 347 | * Do it manually here. |
344 | { | 348 | */ |
345 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 349 | if (is_imx6q_usdhc(imx_data)) |
346 | 350 | writel(0, host->ioaddr + ESDHC_MIX_CTRL); | |
347 | return clk_get_rate(pltfm_host->clk); | 351 | } |
348 | } | 352 | } |
349 | 353 | ||
350 | static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) | 354 | static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) |
@@ -362,8 +366,7 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) | |||
362 | 366 | ||
363 | switch (boarddata->wp_type) { | 367 | switch (boarddata->wp_type) { |
364 | case ESDHC_WP_GPIO: | 368 | case ESDHC_WP_GPIO: |
365 | if (gpio_is_valid(boarddata->wp_gpio)) | 369 | return mmc_gpio_get_ro(host->mmc); |
366 | return gpio_get_value(boarddata->wp_gpio); | ||
367 | case ESDHC_WP_CONTROLLER: | 370 | case ESDHC_WP_CONTROLLER: |
368 | return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) & | 371 | return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) & |
369 | SDHCI_WRITE_PROTECT); | 372 | SDHCI_WRITE_PROTECT); |
@@ -374,6 +377,28 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) | |||
374 | return -ENOSYS; | 377 | return -ENOSYS; |
375 | } | 378 | } |
376 | 379 | ||
380 | static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width) | ||
381 | { | ||
382 | u32 ctrl; | ||
383 | |||
384 | switch (width) { | ||
385 | case MMC_BUS_WIDTH_8: | ||
386 | ctrl = ESDHC_CTRL_8BITBUS; | ||
387 | break; | ||
388 | case MMC_BUS_WIDTH_4: | ||
389 | ctrl = ESDHC_CTRL_4BITBUS; | ||
390 | break; | ||
391 | default: | ||
392 | ctrl = 0; | ||
393 | break; | ||
394 | } | ||
395 | |||
396 | esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl, | ||
397 | SDHCI_HOST_CONTROL); | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
377 | static struct sdhci_ops sdhci_esdhc_ops = { | 402 | static struct sdhci_ops sdhci_esdhc_ops = { |
378 | .read_l = esdhc_readl_le, | 403 | .read_l = esdhc_readl_le, |
379 | .read_w = esdhc_readw_le, | 404 | .read_w = esdhc_readw_le, |
@@ -381,9 +406,10 @@ static struct sdhci_ops sdhci_esdhc_ops = { | |||
381 | .write_w = esdhc_writew_le, | 406 | .write_w = esdhc_writew_le, |
382 | .write_b = esdhc_writeb_le, | 407 | .write_b = esdhc_writeb_le, |
383 | .set_clock = esdhc_set_clock, | 408 | .set_clock = esdhc_set_clock, |
384 | .get_max_clock = esdhc_pltfm_get_max_clock, | 409 | .get_max_clock = sdhci_pltfm_clk_get_max_clock, |
385 | .get_min_clock = esdhc_pltfm_get_min_clock, | 410 | .get_min_clock = esdhc_pltfm_get_min_clock, |
386 | .get_ro = esdhc_pltfm_get_ro, | 411 | .get_ro = esdhc_pltfm_get_ro, |
412 | .platform_bus_width = esdhc_pltfm_bus_width, | ||
387 | }; | 413 | }; |
388 | 414 | ||
389 | static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | 415 | static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { |
@@ -394,14 +420,6 @@ static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | |||
394 | .ops = &sdhci_esdhc_ops, | 420 | .ops = &sdhci_esdhc_ops, |
395 | }; | 421 | }; |
396 | 422 | ||
397 | static irqreturn_t cd_irq(int irq, void *data) | ||
398 | { | ||
399 | struct sdhci_host *sdhost = (struct sdhci_host *)data; | ||
400 | |||
401 | tasklet_schedule(&sdhost->card_tasklet); | ||
402 | return IRQ_HANDLED; | ||
403 | }; | ||
404 | |||
405 | #ifdef CONFIG_OF | 423 | #ifdef CONFIG_OF |
406 | static int | 424 | static int |
407 | sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, | 425 | sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, |
@@ -429,6 +447,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, | |||
429 | if (gpio_is_valid(boarddata->wp_gpio)) | 447 | if (gpio_is_valid(boarddata->wp_gpio)) |
430 | boarddata->wp_type = ESDHC_WP_GPIO; | 448 | boarddata->wp_type = ESDHC_WP_GPIO; |
431 | 449 | ||
450 | of_property_read_u32(np, "bus-width", &boarddata->max_bus_width); | ||
451 | |||
432 | return 0; | 452 | return 0; |
433 | } | 453 | } |
434 | #else | 454 | #else |
@@ -512,7 +532,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | |||
512 | * to something insane. Change it back here. | 532 | * to something insane. Change it back here. |
513 | */ | 533 | */ |
514 | if (is_imx6q_usdhc(imx_data)) | 534 | if (is_imx6q_usdhc(imx_data)) |
515 | writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL); | 535 | writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL); |
516 | 536 | ||
517 | boarddata = &imx_data->boarddata; | 537 | boarddata = &imx_data->boarddata; |
518 | if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { | 538 | if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { |
@@ -527,37 +547,22 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | |||
527 | 547 | ||
528 | /* write_protect */ | 548 | /* write_protect */ |
529 | if (boarddata->wp_type == ESDHC_WP_GPIO) { | 549 | if (boarddata->wp_type == ESDHC_WP_GPIO) { |
530 | err = devm_gpio_request_one(&pdev->dev, boarddata->wp_gpio, | 550 | err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); |
531 | GPIOF_IN, "ESDHC_WP"); | ||
532 | if (err) { | 551 | if (err) { |
533 | dev_warn(mmc_dev(host->mmc), | 552 | dev_err(mmc_dev(host->mmc), |
534 | "no write-protect pin available!\n"); | 553 | "failed to request write-protect gpio!\n"); |
535 | boarddata->wp_gpio = -EINVAL; | 554 | goto disable_clk; |
536 | } | 555 | } |
537 | } else { | 556 | host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; |
538 | boarddata->wp_gpio = -EINVAL; | ||
539 | } | 557 | } |
540 | 558 | ||
541 | /* card_detect */ | 559 | /* card_detect */ |
542 | if (boarddata->cd_type != ESDHC_CD_GPIO) | ||
543 | boarddata->cd_gpio = -EINVAL; | ||
544 | |||
545 | switch (boarddata->cd_type) { | 560 | switch (boarddata->cd_type) { |
546 | case ESDHC_CD_GPIO: | 561 | case ESDHC_CD_GPIO: |
547 | err = devm_gpio_request_one(&pdev->dev, boarddata->cd_gpio, | 562 | err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio); |
548 | GPIOF_IN, "ESDHC_CD"); | ||
549 | if (err) { | 563 | if (err) { |
550 | dev_err(mmc_dev(host->mmc), | 564 | dev_err(mmc_dev(host->mmc), |
551 | "no card-detect pin available!\n"); | 565 | "failed to request card-detect gpio!\n"); |
552 | goto disable_clk; | ||
553 | } | ||
554 | |||
555 | err = devm_request_irq(&pdev->dev, | ||
556 | gpio_to_irq(boarddata->cd_gpio), cd_irq, | ||
557 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | ||
558 | mmc_hostname(host->mmc), host); | ||
559 | if (err) { | ||
560 | dev_err(mmc_dev(host->mmc), "request irq error\n"); | ||
561 | goto disable_clk; | 566 | goto disable_clk; |
562 | } | 567 | } |
563 | /* fall through */ | 568 | /* fall through */ |
@@ -575,6 +580,19 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | |||
575 | break; | 580 | break; |
576 | } | 581 | } |
577 | 582 | ||
583 | switch (boarddata->max_bus_width) { | ||
584 | case 8: | ||
585 | host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; | ||
586 | break; | ||
587 | case 4: | ||
588 | host->mmc->caps |= MMC_CAP_4_BIT_DATA; | ||
589 | break; | ||
590 | case 1: | ||
591 | default: | ||
592 | host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; | ||
593 | break; | ||
594 | } | ||
595 | |||
578 | err = sdhci_add_host(host); | 596 | err = sdhci_add_host(host); |
579 | if (err) | 597 | if (err) |
580 | goto disable_clk; | 598 | goto disable_clk; |
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index c7dd0cbc99de..c7ccf3034dad 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -935,7 +935,7 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host) | |||
935 | return 0; | 935 | return 0; |
936 | } | 936 | } |
937 | 937 | ||
938 | static int sdhci_pci_8bit_width(struct sdhci_host *host, int width) | 938 | static int sdhci_pci_bus_width(struct sdhci_host *host, int width) |
939 | { | 939 | { |
940 | u8 ctrl; | 940 | u8 ctrl; |
941 | 941 | ||
@@ -977,7 +977,7 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host) | |||
977 | 977 | ||
978 | static struct sdhci_ops sdhci_pci_ops = { | 978 | static struct sdhci_ops sdhci_pci_ops = { |
979 | .enable_dma = sdhci_pci_enable_dma, | 979 | .enable_dma = sdhci_pci_enable_dma, |
980 | .platform_8bit_width = sdhci_pci_8bit_width, | 980 | .platform_bus_width = sdhci_pci_bus_width, |
981 | .hw_reset = sdhci_pci_hw_reset, | 981 | .hw_reset = sdhci_pci_hw_reset, |
982 | }; | 982 | }; |
983 | 983 | ||
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index d4283ef5917a..3145a780b035 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c | |||
@@ -36,6 +36,14 @@ | |||
36 | #endif | 36 | #endif |
37 | #include "sdhci-pltfm.h" | 37 | #include "sdhci-pltfm.h" |
38 | 38 | ||
39 | unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host) | ||
40 | { | ||
41 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
42 | |||
43 | return clk_get_rate(pltfm_host->clk); | ||
44 | } | ||
45 | EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock); | ||
46 | |||
39 | static struct sdhci_ops sdhci_pltfm_ops = { | 47 | static struct sdhci_ops sdhci_pltfm_ops = { |
40 | }; | 48 | }; |
41 | 49 | ||
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index 37e0e184a0bb..153b6c509ebe 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h | |||
@@ -98,6 +98,8 @@ extern int sdhci_pltfm_register(struct platform_device *pdev, | |||
98 | struct sdhci_pltfm_data *pdata); | 98 | struct sdhci_pltfm_data *pdata); |
99 | extern int sdhci_pltfm_unregister(struct platform_device *pdev); | 99 | extern int sdhci_pltfm_unregister(struct platform_device *pdev); |
100 | 100 | ||
101 | extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host); | ||
102 | |||
101 | #ifdef CONFIG_PM | 103 | #ifdef CONFIG_PM |
102 | extern const struct dev_pm_ops sdhci_pltfm_pmops; | 104 | extern const struct dev_pm_ops sdhci_pltfm_pmops; |
103 | #define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops) | 105 | #define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops) |
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index ac854aa192a8..eeb7d439db1d 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c | |||
@@ -111,17 +111,10 @@ static int pxav2_mmc_set_width(struct sdhci_host *host, int width) | |||
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | static u32 pxav2_get_max_clock(struct sdhci_host *host) | ||
115 | { | ||
116 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
117 | |||
118 | return clk_get_rate(pltfm_host->clk); | ||
119 | } | ||
120 | |||
121 | static struct sdhci_ops pxav2_sdhci_ops = { | 114 | static struct sdhci_ops pxav2_sdhci_ops = { |
122 | .get_max_clock = pxav2_get_max_clock, | 115 | .get_max_clock = sdhci_pltfm_clk_get_max_clock, |
123 | .platform_reset_exit = pxav2_set_private_registers, | 116 | .platform_reset_exit = pxav2_set_private_registers, |
124 | .platform_8bit_width = pxav2_mmc_set_width, | 117 | .platform_bus_width = pxav2_mmc_set_width, |
125 | }; | 118 | }; |
126 | 119 | ||
127 | #ifdef CONFIG_OF | 120 | #ifdef CONFIG_OF |
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index fad0966427fd..a0cdbc570a83 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c | |||
@@ -32,10 +32,14 @@ | |||
32 | #include <linux/of.h> | 32 | #include <linux/of.h> |
33 | #include <linux/of_device.h> | 33 | #include <linux/of_device.h> |
34 | #include <linux/of_gpio.h> | 34 | #include <linux/of_gpio.h> |
35 | #include <linux/pm.h> | ||
36 | #include <linux/pm_runtime.h> | ||
35 | 37 | ||
36 | #include "sdhci.h" | 38 | #include "sdhci.h" |
37 | #include "sdhci-pltfm.h" | 39 | #include "sdhci-pltfm.h" |
38 | 40 | ||
41 | #define PXAV3_RPM_DELAY_MS 50 | ||
42 | |||
39 | #define SD_CLOCK_BURST_SIZE_SETUP 0x10A | 43 | #define SD_CLOCK_BURST_SIZE_SETUP 0x10A |
40 | #define SDCLK_SEL 0x100 | 44 | #define SDCLK_SEL 0x100 |
41 | #define SDCLK_DELAY_SHIFT 9 | 45 | #define SDCLK_DELAY_SHIFT 9 |
@@ -163,18 +167,11 @@ static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) | |||
163 | return 0; | 167 | return 0; |
164 | } | 168 | } |
165 | 169 | ||
166 | static u32 pxav3_get_max_clock(struct sdhci_host *host) | ||
167 | { | ||
168 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
169 | |||
170 | return clk_get_rate(pltfm_host->clk); | ||
171 | } | ||
172 | |||
173 | static struct sdhci_ops pxav3_sdhci_ops = { | 170 | static struct sdhci_ops pxav3_sdhci_ops = { |
174 | .platform_reset_exit = pxav3_set_private_registers, | 171 | .platform_reset_exit = pxav3_set_private_registers, |
175 | .set_uhs_signaling = pxav3_set_uhs_signaling, | 172 | .set_uhs_signaling = pxav3_set_uhs_signaling, |
176 | .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, | 173 | .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, |
177 | .get_max_clock = pxav3_get_max_clock, | 174 | .get_max_clock = sdhci_pltfm_clk_get_max_clock, |
178 | }; | 175 | }; |
179 | 176 | ||
180 | #ifdef CONFIG_OF | 177 | #ifdef CONFIG_OF |
@@ -303,20 +300,37 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) | |||
303 | 300 | ||
304 | sdhci_get_of_property(pdev); | 301 | sdhci_get_of_property(pdev); |
305 | 302 | ||
303 | pm_runtime_set_active(&pdev->dev); | ||
304 | pm_runtime_enable(&pdev->dev); | ||
305 | pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS); | ||
306 | pm_runtime_use_autosuspend(&pdev->dev); | ||
307 | pm_suspend_ignore_children(&pdev->dev, 1); | ||
308 | pm_runtime_get_noresume(&pdev->dev); | ||
309 | |||
306 | ret = sdhci_add_host(host); | 310 | ret = sdhci_add_host(host); |
307 | if (ret) { | 311 | if (ret) { |
308 | dev_err(&pdev->dev, "failed to add host\n"); | 312 | dev_err(&pdev->dev, "failed to add host\n"); |
313 | pm_runtime_forbid(&pdev->dev); | ||
314 | pm_runtime_disable(&pdev->dev); | ||
309 | goto err_add_host; | 315 | goto err_add_host; |
310 | } | 316 | } |
311 | 317 | ||
312 | platform_set_drvdata(pdev, host); | 318 | platform_set_drvdata(pdev, host); |
313 | 319 | ||
320 | if (pdata->pm_caps & MMC_PM_KEEP_POWER) { | ||
321 | device_init_wakeup(&pdev->dev, 1); | ||
322 | host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ; | ||
323 | } else { | ||
324 | device_init_wakeup(&pdev->dev, 0); | ||
325 | } | ||
326 | |||
327 | pm_runtime_put_autosuspend(&pdev->dev); | ||
328 | |||
314 | return 0; | 329 | return 0; |
315 | 330 | ||
316 | err_add_host: | 331 | err_add_host: |
317 | clk_disable_unprepare(clk); | 332 | clk_disable_unprepare(clk); |
318 | clk_put(clk); | 333 | clk_put(clk); |
319 | mmc_gpio_free_cd(host->mmc); | ||
320 | err_cd_req: | 334 | err_cd_req: |
321 | err_clk_get: | 335 | err_clk_get: |
322 | sdhci_pltfm_free(pdev); | 336 | sdhci_pltfm_free(pdev); |
@@ -329,16 +343,14 @@ static int sdhci_pxav3_remove(struct platform_device *pdev) | |||
329 | struct sdhci_host *host = platform_get_drvdata(pdev); | 343 | struct sdhci_host *host = platform_get_drvdata(pdev); |
330 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 344 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
331 | struct sdhci_pxa *pxa = pltfm_host->priv; | 345 | struct sdhci_pxa *pxa = pltfm_host->priv; |
332 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; | ||
333 | 346 | ||
347 | pm_runtime_get_sync(&pdev->dev); | ||
334 | sdhci_remove_host(host, 1); | 348 | sdhci_remove_host(host, 1); |
349 | pm_runtime_disable(&pdev->dev); | ||
335 | 350 | ||
336 | clk_disable_unprepare(pltfm_host->clk); | 351 | clk_disable_unprepare(pltfm_host->clk); |
337 | clk_put(pltfm_host->clk); | 352 | clk_put(pltfm_host->clk); |
338 | 353 | ||
339 | if (gpio_is_valid(pdata->ext_cd_gpio)) | ||
340 | mmc_gpio_free_cd(host->mmc); | ||
341 | |||
342 | sdhci_pltfm_free(pdev); | 354 | sdhci_pltfm_free(pdev); |
343 | kfree(pxa); | 355 | kfree(pxa); |
344 | 356 | ||
@@ -347,6 +359,83 @@ static int sdhci_pxav3_remove(struct platform_device *pdev) | |||
347 | return 0; | 359 | return 0; |
348 | } | 360 | } |
349 | 361 | ||
362 | #ifdef CONFIG_PM_SLEEP | ||
363 | static int sdhci_pxav3_suspend(struct device *dev) | ||
364 | { | ||
365 | int ret; | ||
366 | struct sdhci_host *host = dev_get_drvdata(dev); | ||
367 | |||
368 | pm_runtime_get_sync(dev); | ||
369 | ret = sdhci_suspend_host(host); | ||
370 | pm_runtime_mark_last_busy(dev); | ||
371 | pm_runtime_put_autosuspend(dev); | ||
372 | |||
373 | return ret; | ||
374 | } | ||
375 | |||
376 | static int sdhci_pxav3_resume(struct device *dev) | ||
377 | { | ||
378 | int ret; | ||
379 | struct sdhci_host *host = dev_get_drvdata(dev); | ||
380 | |||
381 | pm_runtime_get_sync(dev); | ||
382 | ret = sdhci_resume_host(host); | ||
383 | pm_runtime_mark_last_busy(dev); | ||
384 | pm_runtime_put_autosuspend(dev); | ||
385 | |||
386 | return ret; | ||
387 | } | ||
388 | #endif | ||
389 | |||
390 | #ifdef CONFIG_PM_RUNTIME | ||
391 | static int sdhci_pxav3_runtime_suspend(struct device *dev) | ||
392 | { | ||
393 | struct sdhci_host *host = dev_get_drvdata(dev); | ||
394 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
395 | unsigned long flags; | ||
396 | |||
397 | if (pltfm_host->clk) { | ||
398 | spin_lock_irqsave(&host->lock, flags); | ||
399 | host->runtime_suspended = true; | ||
400 | spin_unlock_irqrestore(&host->lock, flags); | ||
401 | |||
402 | clk_disable_unprepare(pltfm_host->clk); | ||
403 | } | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static int sdhci_pxav3_runtime_resume(struct device *dev) | ||
409 | { | ||
410 | struct sdhci_host *host = dev_get_drvdata(dev); | ||
411 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
412 | unsigned long flags; | ||
413 | |||
414 | if (pltfm_host->clk) { | ||
415 | clk_prepare_enable(pltfm_host->clk); | ||
416 | |||
417 | spin_lock_irqsave(&host->lock, flags); | ||
418 | host->runtime_suspended = false; | ||
419 | spin_unlock_irqrestore(&host->lock, flags); | ||
420 | } | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | #endif | ||
425 | |||
426 | #ifdef CONFIG_PM | ||
427 | static const struct dev_pm_ops sdhci_pxav3_pmops = { | ||
428 | SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume) | ||
429 | SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend, | ||
430 | sdhci_pxav3_runtime_resume, NULL) | ||
431 | }; | ||
432 | |||
433 | #define SDHCI_PXAV3_PMOPS (&sdhci_pxav3_pmops) | ||
434 | |||
435 | #else | ||
436 | #define SDHCI_PXAV3_PMOPS NULL | ||
437 | #endif | ||
438 | |||
350 | static struct platform_driver sdhci_pxav3_driver = { | 439 | static struct platform_driver sdhci_pxav3_driver = { |
351 | .driver = { | 440 | .driver = { |
352 | .name = "sdhci-pxav3", | 441 | .name = "sdhci-pxav3", |
@@ -354,7 +443,7 @@ static struct platform_driver sdhci_pxav3_driver = { | |||
354 | .of_match_table = sdhci_pxav3_of_match, | 443 | .of_match_table = sdhci_pxav3_of_match, |
355 | #endif | 444 | #endif |
356 | .owner = THIS_MODULE, | 445 | .owner = THIS_MODULE, |
357 | .pm = SDHCI_PLTFM_PMOPS, | 446 | .pm = SDHCI_PXAV3_PMOPS, |
358 | }, | 447 | }, |
359 | .probe = sdhci_pxav3_probe, | 448 | .probe = sdhci_pxav3_probe, |
360 | .remove = sdhci_pxav3_remove, | 449 | .remove = sdhci_pxav3_remove, |
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index a0c621421ee8..7363efe72287 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c | |||
@@ -332,14 +332,14 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) | |||
332 | } | 332 | } |
333 | 333 | ||
334 | /** | 334 | /** |
335 | * sdhci_s3c_platform_8bit_width - support 8bit buswidth | 335 | * sdhci_s3c_platform_bus_width - support 8bit buswidth |
336 | * @host: The SDHCI host being queried | 336 | * @host: The SDHCI host being queried |
337 | * @width: MMC_BUS_WIDTH_ macro for the bus width being requested | 337 | * @width: MMC_BUS_WIDTH_ macro for the bus width being requested |
338 | * | 338 | * |
339 | * We have 8-bit width support but is not a v3 controller. | 339 | * We have 8-bit width support but is not a v3 controller. |
340 | * So we add platform_8bit_width() and support 8bit width. | 340 | * So we add platform_bus_width() and support 8bit width. |
341 | */ | 341 | */ |
342 | static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width) | 342 | static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width) |
343 | { | 343 | { |
344 | u8 ctrl; | 344 | u8 ctrl; |
345 | 345 | ||
@@ -369,7 +369,7 @@ static struct sdhci_ops sdhci_s3c_ops = { | |||
369 | .get_max_clock = sdhci_s3c_get_max_clk, | 369 | .get_max_clock = sdhci_s3c_get_max_clk, |
370 | .set_clock = sdhci_s3c_set_clock, | 370 | .set_clock = sdhci_s3c_set_clock, |
371 | .get_min_clock = sdhci_s3c_get_min_clock, | 371 | .get_min_clock = sdhci_s3c_get_min_clock, |
372 | .platform_8bit_width = sdhci_s3c_platform_8bit_width, | 372 | .platform_bus_width = sdhci_s3c_platform_bus_width, |
373 | }; | 373 | }; |
374 | 374 | ||
375 | static void sdhci_s3c_notify_change(struct platform_device *dev, int state) | 375 | static void sdhci_s3c_notify_change(struct platform_device *dev, int state) |
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 3695b2e0cbd2..08b06e9a3a21 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c | |||
@@ -27,8 +27,6 @@ | |||
27 | 27 | ||
28 | #include <asm/gpio.h> | 28 | #include <asm/gpio.h> |
29 | 29 | ||
30 | #include <linux/platform_data/mmc-sdhci-tegra.h> | ||
31 | |||
32 | #include "sdhci-pltfm.h" | 30 | #include "sdhci-pltfm.h" |
33 | 31 | ||
34 | /* Tegra SDHOST controller vendor register definitions */ | 32 | /* Tegra SDHOST controller vendor register definitions */ |
@@ -45,8 +43,11 @@ struct sdhci_tegra_soc_data { | |||
45 | }; | 43 | }; |
46 | 44 | ||
47 | struct sdhci_tegra { | 45 | struct sdhci_tegra { |
48 | const struct tegra_sdhci_platform_data *plat; | ||
49 | const struct sdhci_tegra_soc_data *soc_data; | 46 | const struct sdhci_tegra_soc_data *soc_data; |
47 | int cd_gpio; | ||
48 | int wp_gpio; | ||
49 | int power_gpio; | ||
50 | int is_8bit; | ||
50 | }; | 51 | }; |
51 | 52 | ||
52 | static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) | 53 | static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) |
@@ -108,12 +109,11 @@ static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) | |||
108 | { | 109 | { |
109 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 110 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
110 | struct sdhci_tegra *tegra_host = pltfm_host->priv; | 111 | struct sdhci_tegra *tegra_host = pltfm_host->priv; |
111 | const struct tegra_sdhci_platform_data *plat = tegra_host->plat; | ||
112 | 112 | ||
113 | if (!gpio_is_valid(plat->wp_gpio)) | 113 | if (!gpio_is_valid(tegra_host->wp_gpio)) |
114 | return -1; | 114 | return -1; |
115 | 115 | ||
116 | return gpio_get_value(plat->wp_gpio); | 116 | return gpio_get_value(tegra_host->wp_gpio); |
117 | } | 117 | } |
118 | 118 | ||
119 | static irqreturn_t carddetect_irq(int irq, void *data) | 119 | static irqreturn_t carddetect_irq(int irq, void *data) |
@@ -143,15 +143,14 @@ static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask) | |||
143 | } | 143 | } |
144 | } | 144 | } |
145 | 145 | ||
146 | static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) | 146 | static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width) |
147 | { | 147 | { |
148 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 148 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
149 | struct sdhci_tegra *tegra_host = pltfm_host->priv; | 149 | struct sdhci_tegra *tegra_host = pltfm_host->priv; |
150 | const struct tegra_sdhci_platform_data *plat = tegra_host->plat; | ||
151 | u32 ctrl; | 150 | u32 ctrl; |
152 | 151 | ||
153 | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); | 152 | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); |
154 | if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) { | 153 | if (tegra_host->is_8bit && bus_width == MMC_BUS_WIDTH_8) { |
155 | ctrl &= ~SDHCI_CTRL_4BITBUS; | 154 | ctrl &= ~SDHCI_CTRL_4BITBUS; |
156 | ctrl |= SDHCI_CTRL_8BITBUS; | 155 | ctrl |= SDHCI_CTRL_8BITBUS; |
157 | } else { | 156 | } else { |
@@ -170,7 +169,7 @@ static struct sdhci_ops tegra_sdhci_ops = { | |||
170 | .read_l = tegra_sdhci_readl, | 169 | .read_l = tegra_sdhci_readl, |
171 | .read_w = tegra_sdhci_readw, | 170 | .read_w = tegra_sdhci_readw, |
172 | .write_l = tegra_sdhci_writel, | 171 | .write_l = tegra_sdhci_writel, |
173 | .platform_8bit_width = tegra_sdhci_8bit, | 172 | .platform_bus_width = tegra_sdhci_buswidth, |
174 | .platform_reset_exit = tegra_sdhci_reset_exit, | 173 | .platform_reset_exit = tegra_sdhci_reset_exit, |
175 | }; | 174 | }; |
176 | 175 | ||
@@ -217,31 +216,19 @@ static const struct of_device_id sdhci_tegra_dt_match[] = { | |||
217 | }; | 216 | }; |
218 | MODULE_DEVICE_TABLE(of, sdhci_dt_ids); | 217 | MODULE_DEVICE_TABLE(of, sdhci_dt_ids); |
219 | 218 | ||
220 | static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata( | 219 | static void sdhci_tegra_parse_dt(struct device *dev, |
221 | struct platform_device *pdev) | 220 | struct sdhci_tegra *tegra_host) |
222 | { | 221 | { |
223 | struct tegra_sdhci_platform_data *plat; | 222 | struct device_node *np = dev->of_node; |
224 | struct device_node *np = pdev->dev.of_node; | ||
225 | u32 bus_width; | 223 | u32 bus_width; |
226 | 224 | ||
227 | if (!np) | 225 | tegra_host->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); |
228 | return NULL; | 226 | tegra_host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); |
229 | 227 | tegra_host->power_gpio = of_get_named_gpio(np, "power-gpios", 0); | |
230 | plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); | ||
231 | if (!plat) { | ||
232 | dev_err(&pdev->dev, "Can't allocate platform data\n"); | ||
233 | return NULL; | ||
234 | } | ||
235 | |||
236 | plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); | ||
237 | plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); | ||
238 | plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0); | ||
239 | 228 | ||
240 | if (of_property_read_u32(np, "bus-width", &bus_width) == 0 && | 229 | if (of_property_read_u32(np, "bus-width", &bus_width) == 0 && |
241 | bus_width == 8) | 230 | bus_width == 8) |
242 | plat->is_8bit = 1; | 231 | tegra_host->is_8bit = 1; |
243 | |||
244 | return plat; | ||
245 | } | 232 | } |
246 | 233 | ||
247 | static int sdhci_tegra_probe(struct platform_device *pdev) | 234 | static int sdhci_tegra_probe(struct platform_device *pdev) |
@@ -250,7 +237,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev) | |||
250 | const struct sdhci_tegra_soc_data *soc_data; | 237 | const struct sdhci_tegra_soc_data *soc_data; |
251 | struct sdhci_host *host; | 238 | struct sdhci_host *host; |
252 | struct sdhci_pltfm_host *pltfm_host; | 239 | struct sdhci_pltfm_host *pltfm_host; |
253 | struct tegra_sdhci_platform_data *plat; | ||
254 | struct sdhci_tegra *tegra_host; | 240 | struct sdhci_tegra *tegra_host; |
255 | struct clk *clk; | 241 | struct clk *clk; |
256 | int rc; | 242 | int rc; |
@@ -263,52 +249,40 @@ static int sdhci_tegra_probe(struct platform_device *pdev) | |||
263 | host = sdhci_pltfm_init(pdev, soc_data->pdata); | 249 | host = sdhci_pltfm_init(pdev, soc_data->pdata); |
264 | if (IS_ERR(host)) | 250 | if (IS_ERR(host)) |
265 | return PTR_ERR(host); | 251 | return PTR_ERR(host); |
266 | |||
267 | pltfm_host = sdhci_priv(host); | 252 | pltfm_host = sdhci_priv(host); |
268 | 253 | ||
269 | plat = pdev->dev.platform_data; | ||
270 | |||
271 | if (plat == NULL) | ||
272 | plat = sdhci_tegra_dt_parse_pdata(pdev); | ||
273 | |||
274 | if (plat == NULL) { | ||
275 | dev_err(mmc_dev(host->mmc), "missing platform data\n"); | ||
276 | rc = -ENXIO; | ||
277 | goto err_no_plat; | ||
278 | } | ||
279 | |||
280 | tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL); | 254 | tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL); |
281 | if (!tegra_host) { | 255 | if (!tegra_host) { |
282 | dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n"); | 256 | dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n"); |
283 | rc = -ENOMEM; | 257 | rc = -ENOMEM; |
284 | goto err_no_plat; | 258 | goto err_alloc_tegra_host; |
285 | } | 259 | } |
286 | |||
287 | tegra_host->plat = plat; | ||
288 | tegra_host->soc_data = soc_data; | 260 | tegra_host->soc_data = soc_data; |
289 | |||
290 | pltfm_host->priv = tegra_host; | 261 | pltfm_host->priv = tegra_host; |
291 | 262 | ||
292 | if (gpio_is_valid(plat->power_gpio)) { | 263 | sdhci_tegra_parse_dt(&pdev->dev, tegra_host); |
293 | rc = gpio_request(plat->power_gpio, "sdhci_power"); | 264 | |
265 | if (gpio_is_valid(tegra_host->power_gpio)) { | ||
266 | rc = gpio_request(tegra_host->power_gpio, "sdhci_power"); | ||
294 | if (rc) { | 267 | if (rc) { |
295 | dev_err(mmc_dev(host->mmc), | 268 | dev_err(mmc_dev(host->mmc), |
296 | "failed to allocate power gpio\n"); | 269 | "failed to allocate power gpio\n"); |
297 | goto err_power_req; | 270 | goto err_power_req; |
298 | } | 271 | } |
299 | gpio_direction_output(plat->power_gpio, 1); | 272 | gpio_direction_output(tegra_host->power_gpio, 1); |
300 | } | 273 | } |
301 | 274 | ||
302 | if (gpio_is_valid(plat->cd_gpio)) { | 275 | if (gpio_is_valid(tegra_host->cd_gpio)) { |
303 | rc = gpio_request(plat->cd_gpio, "sdhci_cd"); | 276 | rc = gpio_request(tegra_host->cd_gpio, "sdhci_cd"); |
304 | if (rc) { | 277 | if (rc) { |
305 | dev_err(mmc_dev(host->mmc), | 278 | dev_err(mmc_dev(host->mmc), |
306 | "failed to allocate cd gpio\n"); | 279 | "failed to allocate cd gpio\n"); |
307 | goto err_cd_req; | 280 | goto err_cd_req; |
308 | } | 281 | } |
309 | gpio_direction_input(plat->cd_gpio); | 282 | gpio_direction_input(tegra_host->cd_gpio); |
310 | 283 | ||
311 | rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq, | 284 | rc = request_irq(gpio_to_irq(tegra_host->cd_gpio), |
285 | carddetect_irq, | ||
312 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | 286 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, |
313 | mmc_hostname(host->mmc), host); | 287 | mmc_hostname(host->mmc), host); |
314 | 288 | ||
@@ -319,14 +293,14 @@ static int sdhci_tegra_probe(struct platform_device *pdev) | |||
319 | 293 | ||
320 | } | 294 | } |
321 | 295 | ||
322 | if (gpio_is_valid(plat->wp_gpio)) { | 296 | if (gpio_is_valid(tegra_host->wp_gpio)) { |
323 | rc = gpio_request(plat->wp_gpio, "sdhci_wp"); | 297 | rc = gpio_request(tegra_host->wp_gpio, "sdhci_wp"); |
324 | if (rc) { | 298 | if (rc) { |
325 | dev_err(mmc_dev(host->mmc), | 299 | dev_err(mmc_dev(host->mmc), |
326 | "failed to allocate wp gpio\n"); | 300 | "failed to allocate wp gpio\n"); |
327 | goto err_wp_req; | 301 | goto err_wp_req; |
328 | } | 302 | } |
329 | gpio_direction_input(plat->wp_gpio); | 303 | gpio_direction_input(tegra_host->wp_gpio); |
330 | } | 304 | } |
331 | 305 | ||
332 | clk = clk_get(mmc_dev(host->mmc), NULL); | 306 | clk = clk_get(mmc_dev(host->mmc), NULL); |
@@ -338,9 +312,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev) | |||
338 | clk_prepare_enable(clk); | 312 | clk_prepare_enable(clk); |
339 | pltfm_host->clk = clk; | 313 | pltfm_host->clk = clk; |
340 | 314 | ||
341 | host->mmc->pm_caps = plat->pm_flags; | 315 | if (tegra_host->is_8bit) |
342 | |||
343 | if (plat->is_8bit) | ||
344 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; | 316 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; |
345 | 317 | ||
346 | rc = sdhci_add_host(host); | 318 | rc = sdhci_add_host(host); |
@@ -353,19 +325,19 @@ err_add_host: | |||
353 | clk_disable_unprepare(pltfm_host->clk); | 325 | clk_disable_unprepare(pltfm_host->clk); |
354 | clk_put(pltfm_host->clk); | 326 | clk_put(pltfm_host->clk); |
355 | err_clk_get: | 327 | err_clk_get: |
356 | if (gpio_is_valid(plat->wp_gpio)) | 328 | if (gpio_is_valid(tegra_host->wp_gpio)) |
357 | gpio_free(plat->wp_gpio); | 329 | gpio_free(tegra_host->wp_gpio); |
358 | err_wp_req: | 330 | err_wp_req: |
359 | if (gpio_is_valid(plat->cd_gpio)) | 331 | if (gpio_is_valid(tegra_host->cd_gpio)) |
360 | free_irq(gpio_to_irq(plat->cd_gpio), host); | 332 | free_irq(gpio_to_irq(tegra_host->cd_gpio), host); |
361 | err_cd_irq_req: | 333 | err_cd_irq_req: |
362 | if (gpio_is_valid(plat->cd_gpio)) | 334 | if (gpio_is_valid(tegra_host->cd_gpio)) |
363 | gpio_free(plat->cd_gpio); | 335 | gpio_free(tegra_host->cd_gpio); |
364 | err_cd_req: | 336 | err_cd_req: |
365 | if (gpio_is_valid(plat->power_gpio)) | 337 | if (gpio_is_valid(tegra_host->power_gpio)) |
366 | gpio_free(plat->power_gpio); | 338 | gpio_free(tegra_host->power_gpio); |
367 | err_power_req: | 339 | err_power_req: |
368 | err_no_plat: | 340 | err_alloc_tegra_host: |
369 | sdhci_pltfm_free(pdev); | 341 | sdhci_pltfm_free(pdev); |
370 | return rc; | 342 | return rc; |
371 | } | 343 | } |
@@ -375,21 +347,20 @@ static int sdhci_tegra_remove(struct platform_device *pdev) | |||
375 | struct sdhci_host *host = platform_get_drvdata(pdev); | 347 | struct sdhci_host *host = platform_get_drvdata(pdev); |
376 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 348 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
377 | struct sdhci_tegra *tegra_host = pltfm_host->priv; | 349 | struct sdhci_tegra *tegra_host = pltfm_host->priv; |
378 | const struct tegra_sdhci_platform_data *plat = tegra_host->plat; | ||
379 | int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); | 350 | int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); |
380 | 351 | ||
381 | sdhci_remove_host(host, dead); | 352 | sdhci_remove_host(host, dead); |
382 | 353 | ||
383 | if (gpio_is_valid(plat->wp_gpio)) | 354 | if (gpio_is_valid(tegra_host->wp_gpio)) |
384 | gpio_free(plat->wp_gpio); | 355 | gpio_free(tegra_host->wp_gpio); |
385 | 356 | ||
386 | if (gpio_is_valid(plat->cd_gpio)) { | 357 | if (gpio_is_valid(tegra_host->cd_gpio)) { |
387 | free_irq(gpio_to_irq(plat->cd_gpio), host); | 358 | free_irq(gpio_to_irq(tegra_host->cd_gpio), host); |
388 | gpio_free(plat->cd_gpio); | 359 | gpio_free(tegra_host->cd_gpio); |
389 | } | 360 | } |
390 | 361 | ||
391 | if (gpio_is_valid(plat->power_gpio)) | 362 | if (gpio_is_valid(tegra_host->power_gpio)) |
392 | gpio_free(plat->power_gpio); | 363 | gpio_free(tegra_host->power_gpio); |
393 | 364 | ||
394 | clk_disable_unprepare(pltfm_host->clk); | 365 | clk_disable_unprepare(pltfm_host->clk); |
395 | clk_put(pltfm_host->clk); | 366 | clk_put(pltfm_host->clk); |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 6f0bfc0c8c9c..51bbba486f38 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -53,6 +53,7 @@ static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); | |||
53 | static void sdhci_finish_command(struct sdhci_host *); | 53 | static void sdhci_finish_command(struct sdhci_host *); |
54 | static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); | 54 | static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); |
55 | static void sdhci_tuning_timer(unsigned long data); | 55 | static void sdhci_tuning_timer(unsigned long data); |
56 | static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); | ||
56 | 57 | ||
57 | #ifdef CONFIG_PM_RUNTIME | 58 | #ifdef CONFIG_PM_RUNTIME |
58 | static int sdhci_runtime_pm_get(struct sdhci_host *host); | 59 | static int sdhci_runtime_pm_get(struct sdhci_host *host); |
@@ -1082,6 +1083,37 @@ static void sdhci_finish_command(struct sdhci_host *host) | |||
1082 | } | 1083 | } |
1083 | } | 1084 | } |
1084 | 1085 | ||
1086 | static u16 sdhci_get_preset_value(struct sdhci_host *host) | ||
1087 | { | ||
1088 | u16 ctrl, preset = 0; | ||
1089 | |||
1090 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | ||
1091 | |||
1092 | switch (ctrl & SDHCI_CTRL_UHS_MASK) { | ||
1093 | case SDHCI_CTRL_UHS_SDR12: | ||
1094 | preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); | ||
1095 | break; | ||
1096 | case SDHCI_CTRL_UHS_SDR25: | ||
1097 | preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); | ||
1098 | break; | ||
1099 | case SDHCI_CTRL_UHS_SDR50: | ||
1100 | preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); | ||
1101 | break; | ||
1102 | case SDHCI_CTRL_UHS_SDR104: | ||
1103 | preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); | ||
1104 | break; | ||
1105 | case SDHCI_CTRL_UHS_DDR50: | ||
1106 | preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); | ||
1107 | break; | ||
1108 | default: | ||
1109 | pr_warn("%s: Invalid UHS-I mode selected\n", | ||
1110 | mmc_hostname(host->mmc)); | ||
1111 | preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); | ||
1112 | break; | ||
1113 | } | ||
1114 | return preset; | ||
1115 | } | ||
1116 | |||
1085 | static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | 1117 | static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) |
1086 | { | 1118 | { |
1087 | int div = 0; /* Initialized for compiler warning */ | 1119 | int div = 0; /* Initialized for compiler warning */ |
@@ -1106,35 +1138,43 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | |||
1106 | goto out; | 1138 | goto out; |
1107 | 1139 | ||
1108 | if (host->version >= SDHCI_SPEC_300) { | 1140 | if (host->version >= SDHCI_SPEC_300) { |
1141 | if (sdhci_readw(host, SDHCI_HOST_CONTROL2) & | ||
1142 | SDHCI_CTRL_PRESET_VAL_ENABLE) { | ||
1143 | u16 pre_val; | ||
1144 | |||
1145 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); | ||
1146 | pre_val = sdhci_get_preset_value(host); | ||
1147 | div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) | ||
1148 | >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; | ||
1149 | if (host->clk_mul && | ||
1150 | (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { | ||
1151 | clk = SDHCI_PROG_CLOCK_MODE; | ||
1152 | real_div = div + 1; | ||
1153 | clk_mul = host->clk_mul; | ||
1154 | } else { | ||
1155 | real_div = max_t(int, 1, div << 1); | ||
1156 | } | ||
1157 | goto clock_set; | ||
1158 | } | ||
1159 | |||
1109 | /* | 1160 | /* |
1110 | * Check if the Host Controller supports Programmable Clock | 1161 | * Check if the Host Controller supports Programmable Clock |
1111 | * Mode. | 1162 | * Mode. |
1112 | */ | 1163 | */ |
1113 | if (host->clk_mul) { | 1164 | if (host->clk_mul) { |
1114 | u16 ctrl; | 1165 | for (div = 1; div <= 1024; div++) { |
1115 | 1166 | if ((host->max_clk * host->clk_mul / div) | |
1167 | <= clock) | ||
1168 | break; | ||
1169 | } | ||
1116 | /* | 1170 | /* |
1117 | * We need to figure out whether the Host Driver needs | 1171 | * Set Programmable Clock Mode in the Clock |
1118 | * to select Programmable Clock Mode, or the value can | 1172 | * Control register. |
1119 | * be set automatically by the Host Controller based on | ||
1120 | * the Preset Value registers. | ||
1121 | */ | 1173 | */ |
1122 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1174 | clk = SDHCI_PROG_CLOCK_MODE; |
1123 | if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { | 1175 | real_div = div; |
1124 | for (div = 1; div <= 1024; div++) { | 1176 | clk_mul = host->clk_mul; |
1125 | if (((host->max_clk * host->clk_mul) / | 1177 | div--; |
1126 | div) <= clock) | ||
1127 | break; | ||
1128 | } | ||
1129 | /* | ||
1130 | * Set Programmable Clock Mode in the Clock | ||
1131 | * Control register. | ||
1132 | */ | ||
1133 | clk = SDHCI_PROG_CLOCK_MODE; | ||
1134 | real_div = div; | ||
1135 | clk_mul = host->clk_mul; | ||
1136 | div--; | ||
1137 | } | ||
1138 | } else { | 1178 | } else { |
1139 | /* Version 3.00 divisors must be a multiple of 2. */ | 1179 | /* Version 3.00 divisors must be a multiple of 2. */ |
1140 | if (host->max_clk <= clock) | 1180 | if (host->max_clk <= clock) |
@@ -1159,6 +1199,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | |||
1159 | div >>= 1; | 1199 | div >>= 1; |
1160 | } | 1200 | } |
1161 | 1201 | ||
1202 | clock_set: | ||
1162 | if (real_div) | 1203 | if (real_div) |
1163 | host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; | 1204 | host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; |
1164 | 1205 | ||
@@ -1189,6 +1230,15 @@ out: | |||
1189 | host->clock = clock; | 1230 | host->clock = clock; |
1190 | } | 1231 | } |
1191 | 1232 | ||
1233 | static inline void sdhci_update_clock(struct sdhci_host *host) | ||
1234 | { | ||
1235 | unsigned int clock; | ||
1236 | |||
1237 | clock = host->clock; | ||
1238 | host->clock = 0; | ||
1239 | sdhci_set_clock(host, clock); | ||
1240 | } | ||
1241 | |||
1192 | static int sdhci_set_power(struct sdhci_host *host, unsigned short power) | 1242 | static int sdhci_set_power(struct sdhci_host *host, unsigned short power) |
1193 | { | 1243 | { |
1194 | u8 pwr = 0; | 1244 | u8 pwr = 0; |
@@ -1258,7 +1308,7 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power) | |||
1258 | static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | 1308 | static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) |
1259 | { | 1309 | { |
1260 | struct sdhci_host *host; | 1310 | struct sdhci_host *host; |
1261 | bool present; | 1311 | int present; |
1262 | unsigned long flags; | 1312 | unsigned long flags; |
1263 | u32 tuning_opcode; | 1313 | u32 tuning_opcode; |
1264 | 1314 | ||
@@ -1287,18 +1337,21 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1287 | 1337 | ||
1288 | host->mrq = mrq; | 1338 | host->mrq = mrq; |
1289 | 1339 | ||
1290 | /* If polling, assume that the card is always present. */ | 1340 | /* |
1291 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) | 1341 | * Firstly check card presence from cd-gpio. The return could |
1292 | present = true; | 1342 | * be one of the following possibilities: |
1293 | else | 1343 | * negative: cd-gpio is not available |
1294 | present = sdhci_readl(host, SDHCI_PRESENT_STATE) & | 1344 | * zero: cd-gpio is used, and card is removed |
1295 | SDHCI_CARD_PRESENT; | 1345 | * one: cd-gpio is used, and card is present |
1296 | 1346 | */ | |
1297 | /* If we're using a cd-gpio, testing the presence bit might fail. */ | 1347 | present = mmc_gpio_get_cd(host->mmc); |
1298 | if (!present) { | 1348 | if (present < 0) { |
1299 | int ret = mmc_gpio_get_cd(host->mmc); | 1349 | /* If polling, assume that the card is always present. */ |
1300 | if (ret > 0) | 1350 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) |
1301 | present = true; | 1351 | present = 1; |
1352 | else | ||
1353 | present = sdhci_readl(host, SDHCI_PRESENT_STATE) & | ||
1354 | SDHCI_CARD_PRESENT; | ||
1302 | } | 1355 | } |
1303 | 1356 | ||
1304 | if (!present || host->flags & SDHCI_DEVICE_DEAD) { | 1357 | if (!present || host->flags & SDHCI_DEVICE_DEAD) { |
@@ -1364,6 +1417,10 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1364 | sdhci_reinit(host); | 1417 | sdhci_reinit(host); |
1365 | } | 1418 | } |
1366 | 1419 | ||
1420 | if (host->version >= SDHCI_SPEC_300 && | ||
1421 | (ios->power_mode == MMC_POWER_UP)) | ||
1422 | sdhci_enable_preset_value(host, false); | ||
1423 | |||
1367 | sdhci_set_clock(host, ios->clock); | 1424 | sdhci_set_clock(host, ios->clock); |
1368 | 1425 | ||
1369 | if (ios->power_mode == MMC_POWER_OFF) | 1426 | if (ios->power_mode == MMC_POWER_OFF) |
@@ -1383,11 +1440,11 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1383 | /* | 1440 | /* |
1384 | * If your platform has 8-bit width support but is not a v3 controller, | 1441 | * If your platform has 8-bit width support but is not a v3 controller, |
1385 | * or if it requires special setup code, you should implement that in | 1442 | * or if it requires special setup code, you should implement that in |
1386 | * platform_8bit_width(). | 1443 | * platform_bus_width(). |
1387 | */ | 1444 | */ |
1388 | if (host->ops->platform_8bit_width) | 1445 | if (host->ops->platform_bus_width) { |
1389 | host->ops->platform_8bit_width(host, ios->bus_width); | 1446 | host->ops->platform_bus_width(host, ios->bus_width); |
1390 | else { | 1447 | } else { |
1391 | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); | 1448 | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); |
1392 | if (ios->bus_width == MMC_BUS_WIDTH_8) { | 1449 | if (ios->bus_width == MMC_BUS_WIDTH_8) { |
1393 | ctrl &= ~SDHCI_CTRL_4BITBUS; | 1450 | ctrl &= ~SDHCI_CTRL_4BITBUS; |
@@ -1415,7 +1472,6 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1415 | 1472 | ||
1416 | if (host->version >= SDHCI_SPEC_300) { | 1473 | if (host->version >= SDHCI_SPEC_300) { |
1417 | u16 clk, ctrl_2; | 1474 | u16 clk, ctrl_2; |
1418 | unsigned int clock; | ||
1419 | 1475 | ||
1420 | /* In case of UHS-I modes, set High Speed Enable */ | 1476 | /* In case of UHS-I modes, set High Speed Enable */ |
1421 | if ((ios->timing == MMC_TIMING_MMC_HS200) || | 1477 | if ((ios->timing == MMC_TIMING_MMC_HS200) || |
@@ -1455,9 +1511,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1455 | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); | 1511 | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); |
1456 | 1512 | ||
1457 | /* Re-enable SD Clock */ | 1513 | /* Re-enable SD Clock */ |
1458 | clock = host->clock; | 1514 | sdhci_update_clock(host); |
1459 | host->clock = 0; | ||
1460 | sdhci_set_clock(host, clock); | ||
1461 | } | 1515 | } |
1462 | 1516 | ||
1463 | 1517 | ||
@@ -1487,10 +1541,22 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1487 | sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); | 1541 | sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); |
1488 | } | 1542 | } |
1489 | 1543 | ||
1544 | if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && | ||
1545 | ((ios->timing == MMC_TIMING_UHS_SDR12) || | ||
1546 | (ios->timing == MMC_TIMING_UHS_SDR25) || | ||
1547 | (ios->timing == MMC_TIMING_UHS_SDR50) || | ||
1548 | (ios->timing == MMC_TIMING_UHS_SDR104) || | ||
1549 | (ios->timing == MMC_TIMING_UHS_DDR50))) { | ||
1550 | u16 preset; | ||
1551 | |||
1552 | sdhci_enable_preset_value(host, true); | ||
1553 | preset = sdhci_get_preset_value(host); | ||
1554 | ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) | ||
1555 | >> SDHCI_PRESET_DRV_SHIFT; | ||
1556 | } | ||
1557 | |||
1490 | /* Re-enable SD Clock */ | 1558 | /* Re-enable SD Clock */ |
1491 | clock = host->clock; | 1559 | sdhci_update_clock(host); |
1492 | host->clock = 0; | ||
1493 | sdhci_set_clock(host, clock); | ||
1494 | } else | 1560 | } else |
1495 | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); | 1561 | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); |
1496 | 1562 | ||
@@ -1608,141 +1674,91 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
1608 | spin_unlock_irqrestore(&host->lock, flags); | 1674 | spin_unlock_irqrestore(&host->lock, flags); |
1609 | } | 1675 | } |
1610 | 1676 | ||
1611 | static int sdhci_do_3_3v_signal_voltage_switch(struct sdhci_host *host, | 1677 | static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, |
1612 | u16 ctrl) | 1678 | struct mmc_ios *ios) |
1613 | { | 1679 | { |
1680 | u16 ctrl; | ||
1614 | int ret; | 1681 | int ret; |
1615 | 1682 | ||
1616 | /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ | 1683 | /* |
1617 | ctrl &= ~SDHCI_CTRL_VDD_180; | 1684 | * Signal Voltage Switching is only applicable for Host Controllers |
1618 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | 1685 | * v3.00 and above. |
1619 | 1686 | */ | |
1620 | if (host->vqmmc) { | 1687 | if (host->version < SDHCI_SPEC_300) |
1621 | ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000); | 1688 | return 0; |
1622 | if (ret) { | ||
1623 | pr_warning("%s: Switching to 3.3V signalling voltage " | ||
1624 | " failed\n", mmc_hostname(host->mmc)); | ||
1625 | return -EIO; | ||
1626 | } | ||
1627 | } | ||
1628 | /* Wait for 5ms */ | ||
1629 | usleep_range(5000, 5500); | ||
1630 | 1689 | ||
1631 | /* 3.3V regulator output should be stable within 5 ms */ | ||
1632 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1690 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
1633 | if (!(ctrl & SDHCI_CTRL_VDD_180)) | ||
1634 | return 0; | ||
1635 | 1691 | ||
1636 | pr_warning("%s: 3.3V regulator output did not became stable\n", | 1692 | switch (ios->signal_voltage) { |
1637 | mmc_hostname(host->mmc)); | 1693 | case MMC_SIGNAL_VOLTAGE_330: |
1694 | /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ | ||
1695 | ctrl &= ~SDHCI_CTRL_VDD_180; | ||
1696 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | ||
1638 | 1697 | ||
1639 | return -EIO; | 1698 | if (host->vqmmc) { |
1640 | } | 1699 | ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000); |
1700 | if (ret) { | ||
1701 | pr_warning("%s: Switching to 3.3V signalling voltage " | ||
1702 | " failed\n", mmc_hostname(host->mmc)); | ||
1703 | return -EIO; | ||
1704 | } | ||
1705 | } | ||
1706 | /* Wait for 5ms */ | ||
1707 | usleep_range(5000, 5500); | ||
1641 | 1708 | ||
1642 | static int sdhci_do_1_8v_signal_voltage_switch(struct sdhci_host *host, | 1709 | /* 3.3V regulator output should be stable within 5 ms */ |
1643 | u16 ctrl) | 1710 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
1644 | { | 1711 | if (!(ctrl & SDHCI_CTRL_VDD_180)) |
1645 | u8 pwr; | 1712 | return 0; |
1646 | u16 clk; | ||
1647 | u32 present_state; | ||
1648 | int ret; | ||
1649 | 1713 | ||
1650 | /* Stop SDCLK */ | 1714 | pr_warning("%s: 3.3V regulator output did not became stable\n", |
1651 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); | 1715 | mmc_hostname(host->mmc)); |
1652 | clk &= ~SDHCI_CLOCK_CARD_EN; | 1716 | |
1653 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | 1717 | return -EAGAIN; |
1718 | case MMC_SIGNAL_VOLTAGE_180: | ||
1719 | if (host->vqmmc) { | ||
1720 | ret = regulator_set_voltage(host->vqmmc, | ||
1721 | 1700000, 1950000); | ||
1722 | if (ret) { | ||
1723 | pr_warning("%s: Switching to 1.8V signalling voltage " | ||
1724 | " failed\n", mmc_hostname(host->mmc)); | ||
1725 | return -EIO; | ||
1726 | } | ||
1727 | } | ||
1654 | 1728 | ||
1655 | /* Check whether DAT[3:0] is 0000 */ | ||
1656 | present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); | ||
1657 | if (!((present_state & SDHCI_DATA_LVL_MASK) >> | ||
1658 | SDHCI_DATA_LVL_SHIFT)) { | ||
1659 | /* | 1729 | /* |
1660 | * Enable 1.8V Signal Enable in the Host Control2 | 1730 | * Enable 1.8V Signal Enable in the Host Control2 |
1661 | * register | 1731 | * register |
1662 | */ | 1732 | */ |
1663 | if (host->vqmmc) | 1733 | ctrl |= SDHCI_CTRL_VDD_180; |
1664 | ret = regulator_set_voltage(host->vqmmc, | 1734 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); |
1665 | 1700000, 1950000); | ||
1666 | else | ||
1667 | ret = 0; | ||
1668 | 1735 | ||
1669 | if (!ret) { | 1736 | /* Wait for 5ms */ |
1670 | ctrl |= SDHCI_CTRL_VDD_180; | 1737 | usleep_range(5000, 5500); |
1671 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | ||
1672 | 1738 | ||
1673 | /* Wait for 5ms */ | 1739 | /* 1.8V regulator output should be stable within 5 ms */ |
1674 | usleep_range(5000, 5500); | 1740 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
1741 | if (ctrl & SDHCI_CTRL_VDD_180) | ||
1742 | return 0; | ||
1675 | 1743 | ||
1676 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1744 | pr_warning("%s: 1.8V regulator output did not became stable\n", |
1677 | if (ctrl & SDHCI_CTRL_VDD_180) { | 1745 | mmc_hostname(host->mmc)); |
1678 | /* Provide SDCLK again and wait for 1ms */ | ||
1679 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); | ||
1680 | clk |= SDHCI_CLOCK_CARD_EN; | ||
1681 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | ||
1682 | usleep_range(1000, 1500); | ||
1683 | 1746 | ||
1684 | /* | 1747 | return -EAGAIN; |
1685 | * If DAT[3:0] level is 1111b, then the card | 1748 | case MMC_SIGNAL_VOLTAGE_120: |
1686 | * was successfully switched to 1.8V signaling. | 1749 | if (host->vqmmc) { |
1687 | */ | 1750 | ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000); |
1688 | present_state = sdhci_readl(host, | 1751 | if (ret) { |
1689 | SDHCI_PRESENT_STATE); | 1752 | pr_warning("%s: Switching to 1.2V signalling voltage " |
1690 | if ((present_state & SDHCI_DATA_LVL_MASK) == | 1753 | " failed\n", mmc_hostname(host->mmc)); |
1691 | SDHCI_DATA_LVL_MASK) | 1754 | return -EIO; |
1692 | return 0; | ||
1693 | } | 1755 | } |
1694 | } | 1756 | } |
1695 | } | ||
1696 | |||
1697 | /* | ||
1698 | * If we are here, that means the switch to 1.8V signaling | ||
1699 | * failed. We power cycle the card, and retry initialization | ||
1700 | * sequence by setting S18R to 0. | ||
1701 | */ | ||
1702 | pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); | ||
1703 | pwr &= ~SDHCI_POWER_ON; | ||
1704 | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); | ||
1705 | if (host->vmmc) | ||
1706 | regulator_disable(host->vmmc); | ||
1707 | |||
1708 | /* Wait for 1ms as per the spec */ | ||
1709 | usleep_range(1000, 1500); | ||
1710 | pwr |= SDHCI_POWER_ON; | ||
1711 | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); | ||
1712 | if (host->vmmc) | ||
1713 | regulator_enable(host->vmmc); | ||
1714 | |||
1715 | pr_warning("%s: Switching to 1.8V signalling voltage failed, " | ||
1716 | "retrying with S18R set to 0\n", mmc_hostname(host->mmc)); | ||
1717 | |||
1718 | return -EAGAIN; | ||
1719 | } | ||
1720 | |||
1721 | static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, | ||
1722 | struct mmc_ios *ios) | ||
1723 | { | ||
1724 | u16 ctrl; | ||
1725 | |||
1726 | /* | ||
1727 | * Signal Voltage Switching is only applicable for Host Controllers | ||
1728 | * v3.00 and above. | ||
1729 | */ | ||
1730 | if (host->version < SDHCI_SPEC_300) | ||
1731 | return 0; | 1757 | return 0; |
1732 | 1758 | default: | |
1733 | /* | ||
1734 | * We first check whether the request is to set signalling voltage | ||
1735 | * to 3.3V. If so, we change the voltage to 3.3V and return quickly. | ||
1736 | */ | ||
1737 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | ||
1738 | if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) | ||
1739 | return sdhci_do_3_3v_signal_voltage_switch(host, ctrl); | ||
1740 | else if (!(ctrl & SDHCI_CTRL_VDD_180) && | ||
1741 | (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) | ||
1742 | return sdhci_do_1_8v_signal_voltage_switch(host, ctrl); | ||
1743 | else | ||
1744 | /* No signal voltage switch required */ | 1759 | /* No signal voltage switch required */ |
1745 | return 0; | 1760 | return 0; |
1761 | } | ||
1746 | } | 1762 | } |
1747 | 1763 | ||
1748 | static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, | 1764 | static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, |
@@ -1759,6 +1775,19 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, | |||
1759 | return err; | 1775 | return err; |
1760 | } | 1776 | } |
1761 | 1777 | ||
1778 | static int sdhci_card_busy(struct mmc_host *mmc) | ||
1779 | { | ||
1780 | struct sdhci_host *host = mmc_priv(mmc); | ||
1781 | u32 present_state; | ||
1782 | |||
1783 | sdhci_runtime_pm_get(host); | ||
1784 | /* Check whether DAT[3:0] is 0000 */ | ||
1785 | present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); | ||
1786 | sdhci_runtime_pm_put(host); | ||
1787 | |||
1788 | return !(present_state & SDHCI_DATA_LVL_MASK); | ||
1789 | } | ||
1790 | |||
1762 | static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | 1791 | static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) |
1763 | { | 1792 | { |
1764 | struct sdhci_host *host; | 1793 | struct sdhci_host *host; |
@@ -1955,17 +1984,15 @@ out: | |||
1955 | return err; | 1984 | return err; |
1956 | } | 1985 | } |
1957 | 1986 | ||
1958 | static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable) | 1987 | |
1988 | static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) | ||
1959 | { | 1989 | { |
1960 | u16 ctrl; | 1990 | u16 ctrl; |
1961 | unsigned long flags; | ||
1962 | 1991 | ||
1963 | /* Host Controller v3.00 defines preset value registers */ | 1992 | /* Host Controller v3.00 defines preset value registers */ |
1964 | if (host->version < SDHCI_SPEC_300) | 1993 | if (host->version < SDHCI_SPEC_300) |
1965 | return; | 1994 | return; |
1966 | 1995 | ||
1967 | spin_lock_irqsave(&host->lock, flags); | ||
1968 | |||
1969 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1996 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
1970 | 1997 | ||
1971 | /* | 1998 | /* |
@@ -1981,17 +2008,6 @@ static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable) | |||
1981 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | 2008 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); |
1982 | host->flags &= ~SDHCI_PV_ENABLED; | 2009 | host->flags &= ~SDHCI_PV_ENABLED; |
1983 | } | 2010 | } |
1984 | |||
1985 | spin_unlock_irqrestore(&host->lock, flags); | ||
1986 | } | ||
1987 | |||
1988 | static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable) | ||
1989 | { | ||
1990 | struct sdhci_host *host = mmc_priv(mmc); | ||
1991 | |||
1992 | sdhci_runtime_pm_get(host); | ||
1993 | sdhci_do_enable_preset_value(host, enable); | ||
1994 | sdhci_runtime_pm_put(host); | ||
1995 | } | 2011 | } |
1996 | 2012 | ||
1997 | static void sdhci_card_event(struct mmc_host *mmc) | 2013 | static void sdhci_card_event(struct mmc_host *mmc) |
@@ -2027,8 +2043,8 @@ static const struct mmc_host_ops sdhci_ops = { | |||
2027 | .enable_sdio_irq = sdhci_enable_sdio_irq, | 2043 | .enable_sdio_irq = sdhci_enable_sdio_irq, |
2028 | .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, | 2044 | .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, |
2029 | .execute_tuning = sdhci_execute_tuning, | 2045 | .execute_tuning = sdhci_execute_tuning, |
2030 | .enable_preset_value = sdhci_enable_preset_value, | ||
2031 | .card_event = sdhci_card_event, | 2046 | .card_event = sdhci_card_event, |
2047 | .card_busy = sdhci_card_busy, | ||
2032 | }; | 2048 | }; |
2033 | 2049 | ||
2034 | /*****************************************************************************\ | 2050 | /*****************************************************************************\ |
@@ -2080,14 +2096,9 @@ static void sdhci_tasklet_finish(unsigned long param) | |||
2080 | (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { | 2096 | (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { |
2081 | 2097 | ||
2082 | /* Some controllers need this kick or reset won't work here */ | 2098 | /* Some controllers need this kick or reset won't work here */ |
2083 | if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { | 2099 | if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) |
2084 | unsigned int clock; | ||
2085 | |||
2086 | /* This is to force an update */ | 2100 | /* This is to force an update */ |
2087 | clock = host->clock; | 2101 | sdhci_update_clock(host); |
2088 | host->clock = 0; | ||
2089 | sdhci_set_clock(host, clock); | ||
2090 | } | ||
2091 | 2102 | ||
2092 | /* Spec says we should do both at the same time, but Ricoh | 2103 | /* Spec says we should do both at the same time, but Ricoh |
2093 | controllers do not like that. */ | 2104 | controllers do not like that. */ |
@@ -2455,6 +2466,32 @@ out: | |||
2455 | \*****************************************************************************/ | 2466 | \*****************************************************************************/ |
2456 | 2467 | ||
2457 | #ifdef CONFIG_PM | 2468 | #ifdef CONFIG_PM |
2469 | void sdhci_enable_irq_wakeups(struct sdhci_host *host) | ||
2470 | { | ||
2471 | u8 val; | ||
2472 | u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | ||
2473 | | SDHCI_WAKE_ON_INT; | ||
2474 | |||
2475 | val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); | ||
2476 | val |= mask ; | ||
2477 | /* Avoid fake wake up */ | ||
2478 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) | ||
2479 | val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE); | ||
2480 | sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); | ||
2481 | } | ||
2482 | EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); | ||
2483 | |||
2484 | void sdhci_disable_irq_wakeups(struct sdhci_host *host) | ||
2485 | { | ||
2486 | u8 val; | ||
2487 | u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | ||
2488 | | SDHCI_WAKE_ON_INT; | ||
2489 | |||
2490 | val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); | ||
2491 | val &= ~mask; | ||
2492 | sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); | ||
2493 | } | ||
2494 | EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups); | ||
2458 | 2495 | ||
2459 | int sdhci_suspend_host(struct sdhci_host *host) | 2496 | int sdhci_suspend_host(struct sdhci_host *host) |
2460 | { | 2497 | { |
@@ -2484,8 +2521,13 @@ int sdhci_suspend_host(struct sdhci_host *host) | |||
2484 | return ret; | 2521 | return ret; |
2485 | } | 2522 | } |
2486 | 2523 | ||
2487 | free_irq(host->irq, host); | 2524 | if (!device_may_wakeup(mmc_dev(host->mmc))) { |
2488 | 2525 | sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); | |
2526 | free_irq(host->irq, host); | ||
2527 | } else { | ||
2528 | sdhci_enable_irq_wakeups(host); | ||
2529 | enable_irq_wake(host->irq); | ||
2530 | } | ||
2489 | return ret; | 2531 | return ret; |
2490 | } | 2532 | } |
2491 | 2533 | ||
@@ -2500,10 +2542,15 @@ int sdhci_resume_host(struct sdhci_host *host) | |||
2500 | host->ops->enable_dma(host); | 2542 | host->ops->enable_dma(host); |
2501 | } | 2543 | } |
2502 | 2544 | ||
2503 | ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, | 2545 | if (!device_may_wakeup(mmc_dev(host->mmc))) { |
2504 | mmc_hostname(host->mmc), host); | 2546 | ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, |
2505 | if (ret) | 2547 | mmc_hostname(host->mmc), host); |
2506 | return ret; | 2548 | if (ret) |
2549 | return ret; | ||
2550 | } else { | ||
2551 | sdhci_disable_irq_wakeups(host); | ||
2552 | disable_irq_wake(host->irq); | ||
2553 | } | ||
2507 | 2554 | ||
2508 | if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && | 2555 | if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && |
2509 | (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { | 2556 | (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { |
@@ -2531,17 +2578,6 @@ int sdhci_resume_host(struct sdhci_host *host) | |||
2531 | } | 2578 | } |
2532 | 2579 | ||
2533 | EXPORT_SYMBOL_GPL(sdhci_resume_host); | 2580 | EXPORT_SYMBOL_GPL(sdhci_resume_host); |
2534 | |||
2535 | void sdhci_enable_irq_wakeups(struct sdhci_host *host) | ||
2536 | { | ||
2537 | u8 val; | ||
2538 | val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); | ||
2539 | val |= SDHCI_WAKE_ON_INT; | ||
2540 | sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); | ||
2541 | } | ||
2542 | |||
2543 | EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); | ||
2544 | |||
2545 | #endif /* CONFIG_PM */ | 2581 | #endif /* CONFIG_PM */ |
2546 | 2582 | ||
2547 | #ifdef CONFIG_PM_RUNTIME | 2583 | #ifdef CONFIG_PM_RUNTIME |
@@ -2600,8 +2636,12 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) | |||
2600 | sdhci_do_set_ios(host, &host->mmc->ios); | 2636 | sdhci_do_set_ios(host, &host->mmc->ios); |
2601 | 2637 | ||
2602 | sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); | 2638 | sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); |
2603 | if (host_flags & SDHCI_PV_ENABLED) | 2639 | if ((host_flags & SDHCI_PV_ENABLED) && |
2604 | sdhci_do_enable_preset_value(host, true); | 2640 | !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { |
2641 | spin_lock_irqsave(&host->lock, flags); | ||
2642 | sdhci_enable_preset_value(host, true); | ||
2643 | spin_unlock_irqrestore(&host->lock, flags); | ||
2644 | } | ||
2605 | 2645 | ||
2606 | /* Set the re-tuning expiration flag */ | 2646 | /* Set the re-tuning expiration flag */ |
2607 | if (host->flags & SDHCI_USING_RETUNING_TIMER) | 2647 | if (host->flags & SDHCI_USING_RETUNING_TIMER) |
@@ -2936,7 +2976,11 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2936 | } | 2976 | } |
2937 | 2977 | ||
2938 | #ifdef CONFIG_REGULATOR | 2978 | #ifdef CONFIG_REGULATOR |
2939 | if (host->vmmc) { | 2979 | /* |
2980 | * Voltage range check makes sense only if regulator reports | ||
2981 | * any voltage value. | ||
2982 | */ | ||
2983 | if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) { | ||
2940 | ret = regulator_is_supported_voltage(host->vmmc, 2700000, | 2984 | ret = regulator_is_supported_voltage(host->vmmc, 2700000, |
2941 | 3600000); | 2985 | 3600000); |
2942 | if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330))) | 2986 | if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330))) |
@@ -3139,6 +3183,7 @@ int sdhci_add_host(struct sdhci_host *host) | |||
3139 | #ifdef SDHCI_USE_LEDS_CLASS | 3183 | #ifdef SDHCI_USE_LEDS_CLASS |
3140 | reset: | 3184 | reset: |
3141 | sdhci_reset(host, SDHCI_RESET_ALL); | 3185 | sdhci_reset(host, SDHCI_RESET_ALL); |
3186 | sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); | ||
3142 | free_irq(host->irq, host); | 3187 | free_irq(host->irq, host); |
3143 | #endif | 3188 | #endif |
3144 | untasklet: | 3189 | untasklet: |
@@ -3181,6 +3226,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) | |||
3181 | if (!dead) | 3226 | if (!dead) |
3182 | sdhci_reset(host, SDHCI_RESET_ALL); | 3227 | sdhci_reset(host, SDHCI_RESET_ALL); |
3183 | 3228 | ||
3229 | sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); | ||
3184 | free_irq(host->irq, host); | 3230 | free_irq(host->irq, host); |
3185 | 3231 | ||
3186 | del_timer_sync(&host->timer); | 3232 | del_timer_sync(&host->timer); |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index a6d69b7bdea2..379e09d9f3c1 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -229,6 +229,18 @@ | |||
229 | 229 | ||
230 | /* 60-FB reserved */ | 230 | /* 60-FB reserved */ |
231 | 231 | ||
232 | #define SDHCI_PRESET_FOR_SDR12 0x66 | ||
233 | #define SDHCI_PRESET_FOR_SDR25 0x68 | ||
234 | #define SDHCI_PRESET_FOR_SDR50 0x6A | ||
235 | #define SDHCI_PRESET_FOR_SDR104 0x6C | ||
236 | #define SDHCI_PRESET_FOR_DDR50 0x6E | ||
237 | #define SDHCI_PRESET_DRV_MASK 0xC000 | ||
238 | #define SDHCI_PRESET_DRV_SHIFT 14 | ||
239 | #define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400 | ||
240 | #define SDHCI_PRESET_CLKGEN_SEL_SHIFT 10 | ||
241 | #define SDHCI_PRESET_SDCLK_FREQ_MASK 0x3FF | ||
242 | #define SDHCI_PRESET_SDCLK_FREQ_SHIFT 0 | ||
243 | |||
232 | #define SDHCI_SLOT_INT_STATUS 0xFC | 244 | #define SDHCI_SLOT_INT_STATUS 0xFC |
233 | 245 | ||
234 | #define SDHCI_HOST_VERSION 0xFE | 246 | #define SDHCI_HOST_VERSION 0xFE |
@@ -269,7 +281,7 @@ struct sdhci_ops { | |||
269 | unsigned int (*get_max_clock)(struct sdhci_host *host); | 281 | unsigned int (*get_max_clock)(struct sdhci_host *host); |
270 | unsigned int (*get_min_clock)(struct sdhci_host *host); | 282 | unsigned int (*get_min_clock)(struct sdhci_host *host); |
271 | unsigned int (*get_timeout_clock)(struct sdhci_host *host); | 283 | unsigned int (*get_timeout_clock)(struct sdhci_host *host); |
272 | int (*platform_8bit_width)(struct sdhci_host *host, | 284 | int (*platform_bus_width)(struct sdhci_host *host, |
273 | int width); | 285 | int width); |
274 | void (*platform_send_init_74_clocks)(struct sdhci_host *host, | 286 | void (*platform_send_init_74_clocks)(struct sdhci_host *host, |
275 | u8 power_mode); | 287 | u8 power_mode); |
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 9a4c151067dd..ba76a532ae30 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/mmc/sh_mmcif.h> | 56 | #include <linux/mmc/sh_mmcif.h> |
57 | #include <linux/mmc/slot-gpio.h> | 57 | #include <linux/mmc/slot-gpio.h> |
58 | #include <linux/mod_devicetable.h> | 58 | #include <linux/mod_devicetable.h> |
59 | #include <linux/mutex.h> | ||
59 | #include <linux/pagemap.h> | 60 | #include <linux/pagemap.h> |
60 | #include <linux/platform_device.h> | 61 | #include <linux/platform_device.h> |
61 | #include <linux/pm_qos.h> | 62 | #include <linux/pm_qos.h> |
@@ -88,6 +89,7 @@ | |||
88 | #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */ | 89 | #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */ |
89 | #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */ | 90 | #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */ |
90 | #define CMD_SET_CCSH (1 << 5) | 91 | #define CMD_SET_CCSH (1 << 5) |
92 | #define CMD_SET_DARS (1 << 2) /* Dual Data Rate */ | ||
91 | #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */ | 93 | #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */ |
92 | #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */ | 94 | #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */ |
93 | #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */ | 95 | #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */ |
@@ -127,6 +129,10 @@ | |||
127 | INT_CCSTO | INT_CRCSTO | INT_WDATTO | \ | 129 | INT_CCSTO | INT_CRCSTO | INT_WDATTO | \ |
128 | INT_RDATTO | INT_RBSYTO | INT_RSPTO) | 130 | INT_RDATTO | INT_RBSYTO | INT_RSPTO) |
129 | 131 | ||
132 | #define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \ | ||
133 | INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \ | ||
134 | INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE) | ||
135 | |||
130 | /* CE_INT_MASK */ | 136 | /* CE_INT_MASK */ |
131 | #define MASK_ALL 0x00000000 | 137 | #define MASK_ALL 0x00000000 |
132 | #define MASK_MCCSDE (1 << 29) | 138 | #define MASK_MCCSDE (1 << 29) |
@@ -158,6 +164,11 @@ | |||
158 | MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \ | 164 | MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \ |
159 | MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO) | 165 | MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO) |
160 | 166 | ||
167 | #define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \ | ||
168 | MASK_MBUFREN | MASK_MBUFWEN | \ | ||
169 | MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \ | ||
170 | MASK_MCMD12RBE | MASK_MCMD12CRE) | ||
171 | |||
161 | /* CE_HOST_STS1 */ | 172 | /* CE_HOST_STS1 */ |
162 | #define STS1_CMDSEQ (1 << 31) | 173 | #define STS1_CMDSEQ (1 << 31) |
163 | 174 | ||
@@ -195,6 +206,7 @@ enum mmcif_state { | |||
195 | STATE_IDLE, | 206 | STATE_IDLE, |
196 | STATE_REQUEST, | 207 | STATE_REQUEST, |
197 | STATE_IOS, | 208 | STATE_IOS, |
209 | STATE_TIMEOUT, | ||
198 | }; | 210 | }; |
199 | 211 | ||
200 | enum mmcif_wait_for { | 212 | enum mmcif_wait_for { |
@@ -216,6 +228,7 @@ struct sh_mmcif_host { | |||
216 | struct clk *hclk; | 228 | struct clk *hclk; |
217 | unsigned int clk; | 229 | unsigned int clk; |
218 | int bus_width; | 230 | int bus_width; |
231 | unsigned char timing; | ||
219 | bool sd_error; | 232 | bool sd_error; |
220 | bool dying; | 233 | bool dying; |
221 | long timeout; | 234 | long timeout; |
@@ -230,6 +243,7 @@ struct sh_mmcif_host { | |||
230 | int sg_blkidx; | 243 | int sg_blkidx; |
231 | bool power; | 244 | bool power; |
232 | bool card_present; | 245 | bool card_present; |
246 | struct mutex thread_lock; | ||
233 | 247 | ||
234 | /* DMA support */ | 248 | /* DMA support */ |
235 | struct dma_chan *chan_rx; | 249 | struct dma_chan *chan_rx; |
@@ -253,23 +267,14 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, | |||
253 | static void mmcif_dma_complete(void *arg) | 267 | static void mmcif_dma_complete(void *arg) |
254 | { | 268 | { |
255 | struct sh_mmcif_host *host = arg; | 269 | struct sh_mmcif_host *host = arg; |
256 | struct mmc_data *data = host->mrq->data; | 270 | struct mmc_request *mrq = host->mrq; |
257 | 271 | ||
258 | dev_dbg(&host->pd->dev, "Command completed\n"); | 272 | dev_dbg(&host->pd->dev, "Command completed\n"); |
259 | 273 | ||
260 | if (WARN(!data, "%s: NULL data in DMA completion!\n", | 274 | if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n", |
261 | dev_name(&host->pd->dev))) | 275 | dev_name(&host->pd->dev))) |
262 | return; | 276 | return; |
263 | 277 | ||
264 | if (data->flags & MMC_DATA_READ) | ||
265 | dma_unmap_sg(host->chan_rx->device->dev, | ||
266 | data->sg, data->sg_len, | ||
267 | DMA_FROM_DEVICE); | ||
268 | else | ||
269 | dma_unmap_sg(host->chan_tx->device->dev, | ||
270 | data->sg, data->sg_len, | ||
271 | DMA_TO_DEVICE); | ||
272 | |||
273 | complete(&host->dma_complete); | 278 | complete(&host->dma_complete); |
274 | } | 279 | } |
275 | 280 | ||
@@ -423,8 +428,6 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host, | |||
423 | if (ret < 0) | 428 | if (ret < 0) |
424 | goto ecfgrx; | 429 | goto ecfgrx; |
425 | 430 | ||
426 | init_completion(&host->dma_complete); | ||
427 | |||
428 | return; | 431 | return; |
429 | 432 | ||
430 | ecfgrx: | 433 | ecfgrx: |
@@ -520,13 +523,16 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host) | |||
520 | } | 523 | } |
521 | 524 | ||
522 | if (state2 & STS2_CRC_ERR) { | 525 | if (state2 & STS2_CRC_ERR) { |
523 | dev_dbg(&host->pd->dev, ": CRC error\n"); | 526 | dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n", |
527 | host->state, host->wait_for); | ||
524 | ret = -EIO; | 528 | ret = -EIO; |
525 | } else if (state2 & STS2_TIMEOUT_ERR) { | 529 | } else if (state2 & STS2_TIMEOUT_ERR) { |
526 | dev_dbg(&host->pd->dev, ": Timeout\n"); | 530 | dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n", |
531 | host->state, host->wait_for); | ||
527 | ret = -ETIMEDOUT; | 532 | ret = -ETIMEDOUT; |
528 | } else { | 533 | } else { |
529 | dev_dbg(&host->pd->dev, ": End/Index error\n"); | 534 | dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n", |
535 | host->state, host->wait_for); | ||
530 | ret = -EIO; | 536 | ret = -EIO; |
531 | } | 537 | } |
532 | return ret; | 538 | return ret; |
@@ -549,10 +555,7 @@ static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) | |||
549 | host->pio_ptr = p; | 555 | host->pio_ptr = p; |
550 | } | 556 | } |
551 | 557 | ||
552 | if (host->sg_idx == data->sg_len) | 558 | return host->sg_idx != data->sg_len; |
553 | return false; | ||
554 | |||
555 | return true; | ||
556 | } | 559 | } |
557 | 560 | ||
558 | static void sh_mmcif_single_read(struct sh_mmcif_host *host, | 561 | static void sh_mmcif_single_read(struct sh_mmcif_host *host, |
@@ -562,7 +565,6 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host, | |||
562 | BLOCK_SIZE_MASK) + 3; | 565 | BLOCK_SIZE_MASK) + 3; |
563 | 566 | ||
564 | host->wait_for = MMCIF_WAIT_FOR_READ; | 567 | host->wait_for = MMCIF_WAIT_FOR_READ; |
565 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
566 | 568 | ||
567 | /* buf read enable */ | 569 | /* buf read enable */ |
568 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | 570 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); |
@@ -576,6 +578,7 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host) | |||
576 | 578 | ||
577 | if (host->sd_error) { | 579 | if (host->sd_error) { |
578 | data->error = sh_mmcif_error_manage(host); | 580 | data->error = sh_mmcif_error_manage(host); |
581 | dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); | ||
579 | return false; | 582 | return false; |
580 | } | 583 | } |
581 | 584 | ||
@@ -604,7 +607,7 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host, | |||
604 | host->sg_idx = 0; | 607 | host->sg_idx = 0; |
605 | host->sg_blkidx = 0; | 608 | host->sg_blkidx = 0; |
606 | host->pio_ptr = sg_virt(data->sg); | 609 | host->pio_ptr = sg_virt(data->sg); |
607 | schedule_delayed_work(&host->timeout_work, host->timeout); | 610 | |
608 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | 611 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); |
609 | } | 612 | } |
610 | 613 | ||
@@ -616,6 +619,7 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) | |||
616 | 619 | ||
617 | if (host->sd_error) { | 620 | if (host->sd_error) { |
618 | data->error = sh_mmcif_error_manage(host); | 621 | data->error = sh_mmcif_error_manage(host); |
622 | dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); | ||
619 | return false; | 623 | return false; |
620 | } | 624 | } |
621 | 625 | ||
@@ -627,7 +631,6 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) | |||
627 | if (!sh_mmcif_next_block(host, p)) | 631 | if (!sh_mmcif_next_block(host, p)) |
628 | return false; | 632 | return false; |
629 | 633 | ||
630 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
631 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | 634 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); |
632 | 635 | ||
633 | return true; | 636 | return true; |
@@ -640,7 +643,6 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host, | |||
640 | BLOCK_SIZE_MASK) + 3; | 643 | BLOCK_SIZE_MASK) + 3; |
641 | 644 | ||
642 | host->wait_for = MMCIF_WAIT_FOR_WRITE; | 645 | host->wait_for = MMCIF_WAIT_FOR_WRITE; |
643 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
644 | 646 | ||
645 | /* buf write enable */ | 647 | /* buf write enable */ |
646 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | 648 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); |
@@ -654,6 +656,7 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host *host) | |||
654 | 656 | ||
655 | if (host->sd_error) { | 657 | if (host->sd_error) { |
656 | data->error = sh_mmcif_error_manage(host); | 658 | data->error = sh_mmcif_error_manage(host); |
659 | dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); | ||
657 | return false; | 660 | return false; |
658 | } | 661 | } |
659 | 662 | ||
@@ -682,7 +685,7 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host, | |||
682 | host->sg_idx = 0; | 685 | host->sg_idx = 0; |
683 | host->sg_blkidx = 0; | 686 | host->sg_blkidx = 0; |
684 | host->pio_ptr = sg_virt(data->sg); | 687 | host->pio_ptr = sg_virt(data->sg); |
685 | schedule_delayed_work(&host->timeout_work, host->timeout); | 688 | |
686 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | 689 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); |
687 | } | 690 | } |
688 | 691 | ||
@@ -694,6 +697,7 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) | |||
694 | 697 | ||
695 | if (host->sd_error) { | 698 | if (host->sd_error) { |
696 | data->error = sh_mmcif_error_manage(host); | 699 | data->error = sh_mmcif_error_manage(host); |
700 | dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); | ||
697 | return false; | 701 | return false; |
698 | } | 702 | } |
699 | 703 | ||
@@ -705,7 +709,6 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) | |||
705 | if (!sh_mmcif_next_block(host, p)) | 709 | if (!sh_mmcif_next_block(host, p)) |
706 | return false; | 710 | return false; |
707 | 711 | ||
708 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
709 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | 712 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); |
710 | 713 | ||
711 | return true; | 714 | return true; |
@@ -756,6 +759,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, | |||
756 | } | 759 | } |
757 | switch (opc) { | 760 | switch (opc) { |
758 | /* RBSY */ | 761 | /* RBSY */ |
762 | case MMC_SLEEP_AWAKE: | ||
759 | case MMC_SWITCH: | 763 | case MMC_SWITCH: |
760 | case MMC_STOP_TRANSMISSION: | 764 | case MMC_STOP_TRANSMISSION: |
761 | case MMC_SET_WRITE_PROT: | 765 | case MMC_SET_WRITE_PROT: |
@@ -781,6 +785,17 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, | |||
781 | dev_err(&host->pd->dev, "Unsupported bus width.\n"); | 785 | dev_err(&host->pd->dev, "Unsupported bus width.\n"); |
782 | break; | 786 | break; |
783 | } | 787 | } |
788 | switch (host->timing) { | ||
789 | case MMC_TIMING_UHS_DDR50: | ||
790 | /* | ||
791 | * MMC core will only set this timing, if the host | ||
792 | * advertises the MMC_CAP_UHS_DDR50 capability. MMCIF | ||
793 | * implementations with this capability, e.g. sh73a0, | ||
794 | * will have to set it in their platform data. | ||
795 | */ | ||
796 | tmp |= CMD_SET_DARS; | ||
797 | break; | ||
798 | } | ||
784 | } | 799 | } |
785 | /* DWEN */ | 800 | /* DWEN */ |
786 | if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) | 801 | if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) |
@@ -824,7 +839,7 @@ static int sh_mmcif_data_trans(struct sh_mmcif_host *host, | |||
824 | sh_mmcif_single_read(host, mrq); | 839 | sh_mmcif_single_read(host, mrq); |
825 | return 0; | 840 | return 0; |
826 | default: | 841 | default: |
827 | dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc); | 842 | dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc); |
828 | return -EINVAL; | 843 | return -EINVAL; |
829 | } | 844 | } |
830 | } | 845 | } |
@@ -838,6 +853,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, | |||
838 | 853 | ||
839 | switch (opc) { | 854 | switch (opc) { |
840 | /* response busy check */ | 855 | /* response busy check */ |
856 | case MMC_SLEEP_AWAKE: | ||
841 | case MMC_SWITCH: | 857 | case MMC_SWITCH: |
842 | case MMC_STOP_TRANSMISSION: | 858 | case MMC_STOP_TRANSMISSION: |
843 | case MMC_SET_WRITE_PROT: | 859 | case MMC_SET_WRITE_PROT: |
@@ -885,7 +901,6 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, | |||
885 | } | 901 | } |
886 | 902 | ||
887 | host->wait_for = MMCIF_WAIT_FOR_STOP; | 903 | host->wait_for = MMCIF_WAIT_FOR_STOP; |
888 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
889 | } | 904 | } |
890 | 905 | ||
891 | static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) | 906 | static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) |
@@ -895,6 +910,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
895 | 910 | ||
896 | spin_lock_irqsave(&host->lock, flags); | 911 | spin_lock_irqsave(&host->lock, flags); |
897 | if (host->state != STATE_IDLE) { | 912 | if (host->state != STATE_IDLE) { |
913 | dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); | ||
898 | spin_unlock_irqrestore(&host->lock, flags); | 914 | spin_unlock_irqrestore(&host->lock, flags); |
899 | mrq->cmd->error = -EAGAIN; | 915 | mrq->cmd->error = -EAGAIN; |
900 | mmc_request_done(mmc, mrq); | 916 | mmc_request_done(mmc, mrq); |
@@ -911,6 +927,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
911 | if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR) | 927 | if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR) |
912 | break; | 928 | break; |
913 | case MMC_APP_CMD: | 929 | case MMC_APP_CMD: |
930 | case SD_IO_RW_DIRECT: | ||
914 | host->state = STATE_IDLE; | 931 | host->state = STATE_IDLE; |
915 | mrq->cmd->error = -ETIMEDOUT; | 932 | mrq->cmd->error = -ETIMEDOUT; |
916 | mmc_request_done(mmc, mrq); | 933 | mmc_request_done(mmc, mrq); |
@@ -957,6 +974,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
957 | 974 | ||
958 | spin_lock_irqsave(&host->lock, flags); | 975 | spin_lock_irqsave(&host->lock, flags); |
959 | if (host->state != STATE_IDLE) { | 976 | if (host->state != STATE_IDLE) { |
977 | dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); | ||
960 | spin_unlock_irqrestore(&host->lock, flags); | 978 | spin_unlock_irqrestore(&host->lock, flags); |
961 | return; | 979 | return; |
962 | } | 980 | } |
@@ -981,7 +999,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
981 | } | 999 | } |
982 | } | 1000 | } |
983 | if (host->power) { | 1001 | if (host->power) { |
984 | pm_runtime_put(&host->pd->dev); | 1002 | pm_runtime_put_sync(&host->pd->dev); |
985 | clk_disable(host->hclk); | 1003 | clk_disable(host->hclk); |
986 | host->power = false; | 1004 | host->power = false; |
987 | if (ios->power_mode == MMC_POWER_OFF) | 1005 | if (ios->power_mode == MMC_POWER_OFF) |
@@ -1001,6 +1019,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1001 | sh_mmcif_clock_control(host, ios->clock); | 1019 | sh_mmcif_clock_control(host, ios->clock); |
1002 | } | 1020 | } |
1003 | 1021 | ||
1022 | host->timing = ios->timing; | ||
1004 | host->bus_width = ios->bus_width; | 1023 | host->bus_width = ios->bus_width; |
1005 | host->state = STATE_IDLE; | 1024 | host->state = STATE_IDLE; |
1006 | } | 1025 | } |
@@ -1038,14 +1057,14 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) | |||
1038 | case MMC_SELECT_CARD: | 1057 | case MMC_SELECT_CARD: |
1039 | case MMC_APP_CMD: | 1058 | case MMC_APP_CMD: |
1040 | cmd->error = -ETIMEDOUT; | 1059 | cmd->error = -ETIMEDOUT; |
1041 | host->sd_error = false; | ||
1042 | break; | 1060 | break; |
1043 | default: | 1061 | default: |
1044 | cmd->error = sh_mmcif_error_manage(host); | 1062 | cmd->error = sh_mmcif_error_manage(host); |
1045 | dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n", | ||
1046 | cmd->opcode, cmd->error); | ||
1047 | break; | 1063 | break; |
1048 | } | 1064 | } |
1065 | dev_dbg(&host->pd->dev, "CMD%d error %d\n", | ||
1066 | cmd->opcode, cmd->error); | ||
1067 | host->sd_error = false; | ||
1049 | return false; | 1068 | return false; |
1050 | } | 1069 | } |
1051 | if (!(cmd->flags & MMC_RSP_PRESENT)) { | 1070 | if (!(cmd->flags & MMC_RSP_PRESENT)) { |
@@ -1058,6 +1077,12 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) | |||
1058 | if (!data) | 1077 | if (!data) |
1059 | return false; | 1078 | return false; |
1060 | 1079 | ||
1080 | /* | ||
1081 | * Completion can be signalled from DMA callback and error, so, have to | ||
1082 | * reset here, before setting .dma_active | ||
1083 | */ | ||
1084 | init_completion(&host->dma_complete); | ||
1085 | |||
1061 | if (data->flags & MMC_DATA_READ) { | 1086 | if (data->flags & MMC_DATA_READ) { |
1062 | if (host->chan_rx) | 1087 | if (host->chan_rx) |
1063 | sh_mmcif_start_dma_rx(host); | 1088 | sh_mmcif_start_dma_rx(host); |
@@ -1068,34 +1093,47 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) | |||
1068 | 1093 | ||
1069 | if (!host->dma_active) { | 1094 | if (!host->dma_active) { |
1070 | data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); | 1095 | data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); |
1071 | if (!data->error) | 1096 | return !data->error; |
1072 | return true; | ||
1073 | return false; | ||
1074 | } | 1097 | } |
1075 | 1098 | ||
1076 | /* Running in the IRQ thread, can sleep */ | 1099 | /* Running in the IRQ thread, can sleep */ |
1077 | time = wait_for_completion_interruptible_timeout(&host->dma_complete, | 1100 | time = wait_for_completion_interruptible_timeout(&host->dma_complete, |
1078 | host->timeout); | 1101 | host->timeout); |
1102 | |||
1103 | if (data->flags & MMC_DATA_READ) | ||
1104 | dma_unmap_sg(host->chan_rx->device->dev, | ||
1105 | data->sg, data->sg_len, | ||
1106 | DMA_FROM_DEVICE); | ||
1107 | else | ||
1108 | dma_unmap_sg(host->chan_tx->device->dev, | ||
1109 | data->sg, data->sg_len, | ||
1110 | DMA_TO_DEVICE); | ||
1111 | |||
1079 | if (host->sd_error) { | 1112 | if (host->sd_error) { |
1080 | dev_err(host->mmc->parent, | 1113 | dev_err(host->mmc->parent, |
1081 | "Error IRQ while waiting for DMA completion!\n"); | 1114 | "Error IRQ while waiting for DMA completion!\n"); |
1082 | /* Woken up by an error IRQ: abort DMA */ | 1115 | /* Woken up by an error IRQ: abort DMA */ |
1083 | if (data->flags & MMC_DATA_READ) | ||
1084 | dmaengine_terminate_all(host->chan_rx); | ||
1085 | else | ||
1086 | dmaengine_terminate_all(host->chan_tx); | ||
1087 | data->error = sh_mmcif_error_manage(host); | 1116 | data->error = sh_mmcif_error_manage(host); |
1088 | } else if (!time) { | 1117 | } else if (!time) { |
1118 | dev_err(host->mmc->parent, "DMA timeout!\n"); | ||
1089 | data->error = -ETIMEDOUT; | 1119 | data->error = -ETIMEDOUT; |
1090 | } else if (time < 0) { | 1120 | } else if (time < 0) { |
1121 | dev_err(host->mmc->parent, | ||
1122 | "wait_for_completion_...() error %ld!\n", time); | ||
1091 | data->error = time; | 1123 | data->error = time; |
1092 | } | 1124 | } |
1093 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, | 1125 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, |
1094 | BUF_ACC_DMAREN | BUF_ACC_DMAWEN); | 1126 | BUF_ACC_DMAREN | BUF_ACC_DMAWEN); |
1095 | host->dma_active = false; | 1127 | host->dma_active = false; |
1096 | 1128 | ||
1097 | if (data->error) | 1129 | if (data->error) { |
1098 | data->bytes_xfered = 0; | 1130 | data->bytes_xfered = 0; |
1131 | /* Abort DMA */ | ||
1132 | if (data->flags & MMC_DATA_READ) | ||
1133 | dmaengine_terminate_all(host->chan_rx); | ||
1134 | else | ||
1135 | dmaengine_terminate_all(host->chan_tx); | ||
1136 | } | ||
1099 | 1137 | ||
1100 | return false; | 1138 | return false; |
1101 | } | 1139 | } |
@@ -1103,10 +1141,21 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) | |||
1103 | static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) | 1141 | static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) |
1104 | { | 1142 | { |
1105 | struct sh_mmcif_host *host = dev_id; | 1143 | struct sh_mmcif_host *host = dev_id; |
1106 | struct mmc_request *mrq = host->mrq; | 1144 | struct mmc_request *mrq; |
1145 | bool wait = false; | ||
1107 | 1146 | ||
1108 | cancel_delayed_work_sync(&host->timeout_work); | 1147 | cancel_delayed_work_sync(&host->timeout_work); |
1109 | 1148 | ||
1149 | mutex_lock(&host->thread_lock); | ||
1150 | |||
1151 | mrq = host->mrq; | ||
1152 | if (!mrq) { | ||
1153 | dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n", | ||
1154 | host->state, host->wait_for); | ||
1155 | mutex_unlock(&host->thread_lock); | ||
1156 | return IRQ_HANDLED; | ||
1157 | } | ||
1158 | |||
1110 | /* | 1159 | /* |
1111 | * All handlers return true, if processing continues, and false, if the | 1160 | * All handlers return true, if processing continues, and false, if the |
1112 | * request has to be completed - successfully or not | 1161 | * request has to be completed - successfully or not |
@@ -1114,35 +1163,32 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) | |||
1114 | switch (host->wait_for) { | 1163 | switch (host->wait_for) { |
1115 | case MMCIF_WAIT_FOR_REQUEST: | 1164 | case MMCIF_WAIT_FOR_REQUEST: |
1116 | /* We're too late, the timeout has already kicked in */ | 1165 | /* We're too late, the timeout has already kicked in */ |
1166 | mutex_unlock(&host->thread_lock); | ||
1117 | return IRQ_HANDLED; | 1167 | return IRQ_HANDLED; |
1118 | case MMCIF_WAIT_FOR_CMD: | 1168 | case MMCIF_WAIT_FOR_CMD: |
1119 | if (sh_mmcif_end_cmd(host)) | 1169 | /* Wait for data? */ |
1120 | /* Wait for data */ | 1170 | wait = sh_mmcif_end_cmd(host); |
1121 | return IRQ_HANDLED; | ||
1122 | break; | 1171 | break; |
1123 | case MMCIF_WAIT_FOR_MREAD: | 1172 | case MMCIF_WAIT_FOR_MREAD: |
1124 | if (sh_mmcif_mread_block(host)) | 1173 | /* Wait for more data? */ |
1125 | /* Wait for more data */ | 1174 | wait = sh_mmcif_mread_block(host); |
1126 | return IRQ_HANDLED; | ||
1127 | break; | 1175 | break; |
1128 | case MMCIF_WAIT_FOR_READ: | 1176 | case MMCIF_WAIT_FOR_READ: |
1129 | if (sh_mmcif_read_block(host)) | 1177 | /* Wait for data end? */ |
1130 | /* Wait for data end */ | 1178 | wait = sh_mmcif_read_block(host); |
1131 | return IRQ_HANDLED; | ||
1132 | break; | 1179 | break; |
1133 | case MMCIF_WAIT_FOR_MWRITE: | 1180 | case MMCIF_WAIT_FOR_MWRITE: |
1134 | if (sh_mmcif_mwrite_block(host)) | 1181 | /* Wait data to write? */ |
1135 | /* Wait data to write */ | 1182 | wait = sh_mmcif_mwrite_block(host); |
1136 | return IRQ_HANDLED; | ||
1137 | break; | 1183 | break; |
1138 | case MMCIF_WAIT_FOR_WRITE: | 1184 | case MMCIF_WAIT_FOR_WRITE: |
1139 | if (sh_mmcif_write_block(host)) | 1185 | /* Wait for data end? */ |
1140 | /* Wait for data end */ | 1186 | wait = sh_mmcif_write_block(host); |
1141 | return IRQ_HANDLED; | ||
1142 | break; | 1187 | break; |
1143 | case MMCIF_WAIT_FOR_STOP: | 1188 | case MMCIF_WAIT_FOR_STOP: |
1144 | if (host->sd_error) { | 1189 | if (host->sd_error) { |
1145 | mrq->stop->error = sh_mmcif_error_manage(host); | 1190 | mrq->stop->error = sh_mmcif_error_manage(host); |
1191 | dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error); | ||
1146 | break; | 1192 | break; |
1147 | } | 1193 | } |
1148 | sh_mmcif_get_cmd12response(host, mrq->stop); | 1194 | sh_mmcif_get_cmd12response(host, mrq->stop); |
@@ -1150,13 +1196,22 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) | |||
1150 | break; | 1196 | break; |
1151 | case MMCIF_WAIT_FOR_READ_END: | 1197 | case MMCIF_WAIT_FOR_READ_END: |
1152 | case MMCIF_WAIT_FOR_WRITE_END: | 1198 | case MMCIF_WAIT_FOR_WRITE_END: |
1153 | if (host->sd_error) | 1199 | if (host->sd_error) { |
1154 | mrq->data->error = sh_mmcif_error_manage(host); | 1200 | mrq->data->error = sh_mmcif_error_manage(host); |
1201 | dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error); | ||
1202 | } | ||
1155 | break; | 1203 | break; |
1156 | default: | 1204 | default: |
1157 | BUG(); | 1205 | BUG(); |
1158 | } | 1206 | } |
1159 | 1207 | ||
1208 | if (wait) { | ||
1209 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
1210 | /* Wait for more data */ | ||
1211 | mutex_unlock(&host->thread_lock); | ||
1212 | return IRQ_HANDLED; | ||
1213 | } | ||
1214 | |||
1160 | if (host->wait_for != MMCIF_WAIT_FOR_STOP) { | 1215 | if (host->wait_for != MMCIF_WAIT_FOR_STOP) { |
1161 | struct mmc_data *data = mrq->data; | 1216 | struct mmc_data *data = mrq->data; |
1162 | if (!mrq->cmd->error && data && !data->error) | 1217 | if (!mrq->cmd->error && data && !data->error) |
@@ -1165,8 +1220,11 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) | |||
1165 | 1220 | ||
1166 | if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { | 1221 | if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { |
1167 | sh_mmcif_stop_cmd(host, mrq); | 1222 | sh_mmcif_stop_cmd(host, mrq); |
1168 | if (!mrq->stop->error) | 1223 | if (!mrq->stop->error) { |
1224 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
1225 | mutex_unlock(&host->thread_lock); | ||
1169 | return IRQ_HANDLED; | 1226 | return IRQ_HANDLED; |
1227 | } | ||
1170 | } | 1228 | } |
1171 | } | 1229 | } |
1172 | 1230 | ||
@@ -1175,6 +1233,8 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) | |||
1175 | host->mrq = NULL; | 1233 | host->mrq = NULL; |
1176 | mmc_request_done(host->mmc, mrq); | 1234 | mmc_request_done(host->mmc, mrq); |
1177 | 1235 | ||
1236 | mutex_unlock(&host->thread_lock); | ||
1237 | |||
1178 | return IRQ_HANDLED; | 1238 | return IRQ_HANDLED; |
1179 | } | 1239 | } |
1180 | 1240 | ||
@@ -1182,56 +1242,22 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) | |||
1182 | { | 1242 | { |
1183 | struct sh_mmcif_host *host = dev_id; | 1243 | struct sh_mmcif_host *host = dev_id; |
1184 | u32 state; | 1244 | u32 state; |
1185 | int err = 0; | ||
1186 | 1245 | ||
1187 | state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); | 1246 | state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); |
1247 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); | ||
1248 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN); | ||
1188 | 1249 | ||
1189 | if (state & INT_ERR_STS) { | 1250 | if (state & ~MASK_CLEAN) |
1190 | /* error interrupts - process first */ | 1251 | dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n", |
1191 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); | 1252 | state); |
1192 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); | 1253 | |
1193 | err = 1; | 1254 | if (state & INT_ERR_STS || state & ~INT_ALL) { |
1194 | } else if (state & INT_RBSYE) { | ||
1195 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | ||
1196 | ~(INT_RBSYE | INT_CRSPE)); | ||
1197 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); | ||
1198 | } else if (state & INT_CRSPE) { | ||
1199 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE); | ||
1200 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE); | ||
1201 | } else if (state & INT_BUFREN) { | ||
1202 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN); | ||
1203 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | ||
1204 | } else if (state & INT_BUFWEN) { | ||
1205 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN); | ||
1206 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | ||
1207 | } else if (state & INT_CMD12DRE) { | ||
1208 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | ||
1209 | ~(INT_CMD12DRE | INT_CMD12RBE | | ||
1210 | INT_CMD12CRE | INT_BUFRE)); | ||
1211 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); | ||
1212 | } else if (state & INT_BUFRE) { | ||
1213 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); | ||
1214 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); | ||
1215 | } else if (state & INT_DTRANE) { | ||
1216 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | ||
1217 | ~(INT_CMD12DRE | INT_CMD12RBE | | ||
1218 | INT_CMD12CRE | INT_DTRANE)); | ||
1219 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); | ||
1220 | } else if (state & INT_CMD12RBE) { | ||
1221 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | ||
1222 | ~(INT_CMD12RBE | INT_CMD12CRE)); | ||
1223 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); | ||
1224 | } else { | ||
1225 | dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state); | ||
1226 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); | ||
1227 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); | ||
1228 | err = 1; | ||
1229 | } | ||
1230 | if (err) { | ||
1231 | host->sd_error = true; | 1255 | host->sd_error = true; |
1232 | dev_dbg(&host->pd->dev, "int err state = %08x\n", state); | 1256 | dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state); |
1233 | } | 1257 | } |
1234 | if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { | 1258 | if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { |
1259 | if (!host->mrq) | ||
1260 | dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state); | ||
1235 | if (!host->dma_active) | 1261 | if (!host->dma_active) |
1236 | return IRQ_WAKE_THREAD; | 1262 | return IRQ_WAKE_THREAD; |
1237 | else if (host->sd_error) | 1263 | else if (host->sd_error) |
@@ -1248,11 +1274,24 @@ static void mmcif_timeout_work(struct work_struct *work) | |||
1248 | struct delayed_work *d = container_of(work, struct delayed_work, work); | 1274 | struct delayed_work *d = container_of(work, struct delayed_work, work); |
1249 | struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); | 1275 | struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); |
1250 | struct mmc_request *mrq = host->mrq; | 1276 | struct mmc_request *mrq = host->mrq; |
1277 | unsigned long flags; | ||
1251 | 1278 | ||
1252 | if (host->dying) | 1279 | if (host->dying) |
1253 | /* Don't run after mmc_remove_host() */ | 1280 | /* Don't run after mmc_remove_host() */ |
1254 | return; | 1281 | return; |
1255 | 1282 | ||
1283 | dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n", | ||
1284 | host->wait_for, mrq->cmd->opcode); | ||
1285 | |||
1286 | spin_lock_irqsave(&host->lock, flags); | ||
1287 | if (host->state == STATE_IDLE) { | ||
1288 | spin_unlock_irqrestore(&host->lock, flags); | ||
1289 | return; | ||
1290 | } | ||
1291 | |||
1292 | host->state = STATE_TIMEOUT; | ||
1293 | spin_unlock_irqrestore(&host->lock, flags); | ||
1294 | |||
1256 | /* | 1295 | /* |
1257 | * Handle races with cancel_delayed_work(), unless | 1296 | * Handle races with cancel_delayed_work(), unless |
1258 | * cancel_delayed_work_sync() is used | 1297 | * cancel_delayed_work_sync() is used |
@@ -1306,10 +1345,11 @@ static int sh_mmcif_probe(struct platform_device *pdev) | |||
1306 | struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; | 1345 | struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; |
1307 | struct resource *res; | 1346 | struct resource *res; |
1308 | void __iomem *reg; | 1347 | void __iomem *reg; |
1348 | const char *name; | ||
1309 | 1349 | ||
1310 | irq[0] = platform_get_irq(pdev, 0); | 1350 | irq[0] = platform_get_irq(pdev, 0); |
1311 | irq[1] = platform_get_irq(pdev, 1); | 1351 | irq[1] = platform_get_irq(pdev, 1); |
1312 | if (irq[0] < 0 || irq[1] < 0) { | 1352 | if (irq[0] < 0) { |
1313 | dev_err(&pdev->dev, "Get irq error\n"); | 1353 | dev_err(&pdev->dev, "Get irq error\n"); |
1314 | return -ENXIO; | 1354 | return -ENXIO; |
1315 | } | 1355 | } |
@@ -1329,10 +1369,11 @@ static int sh_mmcif_probe(struct platform_device *pdev) | |||
1329 | ret = -ENOMEM; | 1369 | ret = -ENOMEM; |
1330 | goto ealloch; | 1370 | goto ealloch; |
1331 | } | 1371 | } |
1372 | mmc_of_parse(mmc); | ||
1332 | host = mmc_priv(mmc); | 1373 | host = mmc_priv(mmc); |
1333 | host->mmc = mmc; | 1374 | host->mmc = mmc; |
1334 | host->addr = reg; | 1375 | host->addr = reg; |
1335 | host->timeout = 1000; | 1376 | host->timeout = msecs_to_jiffies(1000); |
1336 | 1377 | ||
1337 | host->pd = pdev; | 1378 | host->pd = pdev; |
1338 | 1379 | ||
@@ -1341,7 +1382,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) | |||
1341 | mmc->ops = &sh_mmcif_ops; | 1382 | mmc->ops = &sh_mmcif_ops; |
1342 | sh_mmcif_init_ocr(host); | 1383 | sh_mmcif_init_ocr(host); |
1343 | 1384 | ||
1344 | mmc->caps = MMC_CAP_MMC_HIGHSPEED; | 1385 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY; |
1345 | if (pd && pd->caps) | 1386 | if (pd && pd->caps) |
1346 | mmc->caps |= pd->caps; | 1387 | mmc->caps |= pd->caps; |
1347 | mmc->max_segs = 32; | 1388 | mmc->max_segs = 32; |
@@ -1374,15 +1415,19 @@ static int sh_mmcif_probe(struct platform_device *pdev) | |||
1374 | sh_mmcif_sync_reset(host); | 1415 | sh_mmcif_sync_reset(host); |
1375 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); | 1416 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); |
1376 | 1417 | ||
1377 | ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host); | 1418 | name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error"; |
1419 | ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host); | ||
1378 | if (ret) { | 1420 | if (ret) { |
1379 | dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); | 1421 | dev_err(&pdev->dev, "request_irq error (%s)\n", name); |
1380 | goto ereqirq0; | 1422 | goto ereqirq0; |
1381 | } | 1423 | } |
1382 | ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); | 1424 | if (irq[1] >= 0) { |
1383 | if (ret) { | 1425 | ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, |
1384 | dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); | 1426 | 0, "sh_mmc:int", host); |
1385 | goto ereqirq1; | 1427 | if (ret) { |
1428 | dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); | ||
1429 | goto ereqirq1; | ||
1430 | } | ||
1386 | } | 1431 | } |
1387 | 1432 | ||
1388 | if (pd && pd->use_cd_gpio) { | 1433 | if (pd && pd->use_cd_gpio) { |
@@ -1391,6 +1436,8 @@ static int sh_mmcif_probe(struct platform_device *pdev) | |||
1391 | goto erqcd; | 1436 | goto erqcd; |
1392 | } | 1437 | } |
1393 | 1438 | ||
1439 | mutex_init(&host->thread_lock); | ||
1440 | |||
1394 | clk_disable(host->hclk); | 1441 | clk_disable(host->hclk); |
1395 | ret = mmc_add_host(mmc); | 1442 | ret = mmc_add_host(mmc); |
1396 | if (ret < 0) | 1443 | if (ret < 0) |
@@ -1404,10 +1451,9 @@ static int sh_mmcif_probe(struct platform_device *pdev) | |||
1404 | return ret; | 1451 | return ret; |
1405 | 1452 | ||
1406 | emmcaddh: | 1453 | emmcaddh: |
1407 | if (pd && pd->use_cd_gpio) | ||
1408 | mmc_gpio_free_cd(mmc); | ||
1409 | erqcd: | 1454 | erqcd: |
1410 | free_irq(irq[1], host); | 1455 | if (irq[1] >= 0) |
1456 | free_irq(irq[1], host); | ||
1411 | ereqirq1: | 1457 | ereqirq1: |
1412 | free_irq(irq[0], host); | 1458 | free_irq(irq[0], host); |
1413 | ereqirq0: | 1459 | ereqirq0: |
@@ -1427,7 +1473,6 @@ ealloch: | |||
1427 | static int sh_mmcif_remove(struct platform_device *pdev) | 1473 | static int sh_mmcif_remove(struct platform_device *pdev) |
1428 | { | 1474 | { |
1429 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); | 1475 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); |
1430 | struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; | ||
1431 | int irq[2]; | 1476 | int irq[2]; |
1432 | 1477 | ||
1433 | host->dying = true; | 1478 | host->dying = true; |
@@ -1436,9 +1481,6 @@ static int sh_mmcif_remove(struct platform_device *pdev) | |||
1436 | 1481 | ||
1437 | dev_pm_qos_hide_latency_limit(&pdev->dev); | 1482 | dev_pm_qos_hide_latency_limit(&pdev->dev); |
1438 | 1483 | ||
1439 | if (pd && pd->use_cd_gpio) | ||
1440 | mmc_gpio_free_cd(host->mmc); | ||
1441 | |||
1442 | mmc_remove_host(host->mmc); | 1484 | mmc_remove_host(host->mmc); |
1443 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); | 1485 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); |
1444 | 1486 | ||
@@ -1456,7 +1498,8 @@ static int sh_mmcif_remove(struct platform_device *pdev) | |||
1456 | irq[1] = platform_get_irq(pdev, 1); | 1498 | irq[1] = platform_get_irq(pdev, 1); |
1457 | 1499 | ||
1458 | free_irq(irq[0], host); | 1500 | free_irq(irq[0], host); |
1459 | free_irq(irq[1], host); | 1501 | if (irq[1] >= 0) |
1502 | free_irq(irq[1], host); | ||
1460 | 1503 | ||
1461 | platform_set_drvdata(pdev, NULL); | 1504 | platform_set_drvdata(pdev, NULL); |
1462 | 1505 | ||
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 524a7f773820..fe90853900b4 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/mod_devicetable.h> | 24 | #include <linux/mod_devicetable.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/of_device.h> | ||
26 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
27 | #include <linux/mmc/host.h> | 28 | #include <linux/mmc/host.h> |
28 | #include <linux/mmc/sh_mobile_sdhi.h> | 29 | #include <linux/mmc/sh_mobile_sdhi.h> |
@@ -32,6 +33,16 @@ | |||
32 | 33 | ||
33 | #include "tmio_mmc.h" | 34 | #include "tmio_mmc.h" |
34 | 35 | ||
36 | struct sh_mobile_sdhi_of_data { | ||
37 | unsigned long tmio_flags; | ||
38 | }; | ||
39 | |||
40 | static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = { | ||
41 | { | ||
42 | .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, | ||
43 | }, | ||
44 | }; | ||
45 | |||
35 | struct sh_mobile_sdhi { | 46 | struct sh_mobile_sdhi { |
36 | struct clk *clk; | 47 | struct clk *clk; |
37 | struct tmio_mmc_data mmc_data; | 48 | struct tmio_mmc_data mmc_data; |
@@ -117,8 +128,18 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = { | |||
117 | .cd_wakeup = sh_mobile_sdhi_cd_wakeup, | 128 | .cd_wakeup = sh_mobile_sdhi_cd_wakeup, |
118 | }; | 129 | }; |
119 | 130 | ||
131 | static const struct of_device_id sh_mobile_sdhi_of_match[] = { | ||
132 | { .compatible = "renesas,shmobile-sdhi" }, | ||
133 | { .compatible = "renesas,sh7372-sdhi" }, | ||
134 | { .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, | ||
135 | {}, | ||
136 | }; | ||
137 | MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); | ||
138 | |||
120 | static int sh_mobile_sdhi_probe(struct platform_device *pdev) | 139 | static int sh_mobile_sdhi_probe(struct platform_device *pdev) |
121 | { | 140 | { |
141 | const struct of_device_id *of_id = | ||
142 | of_match_device(sh_mobile_sdhi_of_match, &pdev->dev); | ||
122 | struct sh_mobile_sdhi *priv; | 143 | struct sh_mobile_sdhi *priv; |
123 | struct tmio_mmc_data *mmc_data; | 144 | struct tmio_mmc_data *mmc_data; |
124 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 145 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
@@ -126,7 +147,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
126 | int irq, ret, i = 0; | 147 | int irq, ret, i = 0; |
127 | bool multiplexed_isr = true; | 148 | bool multiplexed_isr = true; |
128 | 149 | ||
129 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); | 150 | priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL); |
130 | if (priv == NULL) { | 151 | if (priv == NULL) { |
131 | dev_err(&pdev->dev, "kzalloc failed\n"); | 152 | dev_err(&pdev->dev, "kzalloc failed\n"); |
132 | return -ENOMEM; | 153 | return -ENOMEM; |
@@ -135,15 +156,14 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
135 | mmc_data = &priv->mmc_data; | 156 | mmc_data = &priv->mmc_data; |
136 | 157 | ||
137 | if (p) { | 158 | if (p) { |
138 | p->pdata = mmc_data; | ||
139 | if (p->init) { | 159 | if (p->init) { |
140 | ret = p->init(pdev, &sdhi_ops); | 160 | ret = p->init(pdev, &sdhi_ops); |
141 | if (ret) | 161 | if (ret) |
142 | goto einit; | 162 | return ret; |
143 | } | 163 | } |
144 | } | 164 | } |
145 | 165 | ||
146 | priv->clk = clk_get(&pdev->dev, NULL); | 166 | priv->clk = devm_clk_get(&pdev->dev, NULL); |
147 | if (IS_ERR(priv->clk)) { | 167 | if (IS_ERR(priv->clk)) { |
148 | ret = PTR_ERR(priv->clk); | 168 | ret = PTR_ERR(priv->clk); |
149 | dev_err(&pdev->dev, "cannot get clock: %d\n", ret); | 169 | dev_err(&pdev->dev, "cannot get clock: %d\n", ret); |
@@ -153,10 +173,9 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
153 | mmc_data->clk_enable = sh_mobile_sdhi_clk_enable; | 173 | mmc_data->clk_enable = sh_mobile_sdhi_clk_enable; |
154 | mmc_data->clk_disable = sh_mobile_sdhi_clk_disable; | 174 | mmc_data->clk_disable = sh_mobile_sdhi_clk_disable; |
155 | mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; | 175 | mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; |
176 | mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; | ||
156 | if (p) { | 177 | if (p) { |
157 | mmc_data->flags = p->tmio_flags; | 178 | mmc_data->flags = p->tmio_flags; |
158 | if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) | ||
159 | mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; | ||
160 | mmc_data->ocr_mask = p->tmio_ocr_mask; | 179 | mmc_data->ocr_mask = p->tmio_ocr_mask; |
161 | mmc_data->capabilities |= p->tmio_caps; | 180 | mmc_data->capabilities |= p->tmio_caps; |
162 | mmc_data->capabilities2 |= p->tmio_caps2; | 181 | mmc_data->capabilities2 |= p->tmio_caps2; |
@@ -187,6 +206,11 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
187 | */ | 206 | */ |
188 | mmc_data->flags |= TMIO_MMC_SDIO_IRQ; | 207 | mmc_data->flags |= TMIO_MMC_SDIO_IRQ; |
189 | 208 | ||
209 | if (of_id && of_id->data) { | ||
210 | const struct sh_mobile_sdhi_of_data *of_data = of_id->data; | ||
211 | mmc_data->flags |= of_data->tmio_flags; | ||
212 | } | ||
213 | |||
190 | ret = tmio_mmc_host_probe(&host, pdev, mmc_data); | 214 | ret = tmio_mmc_host_probe(&host, pdev, mmc_data); |
191 | if (ret < 0) | 215 | if (ret < 0) |
192 | goto eprobe; | 216 | goto eprobe; |
@@ -199,33 +223,33 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
199 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); | 223 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); |
200 | if (irq >= 0) { | 224 | if (irq >= 0) { |
201 | multiplexed_isr = false; | 225 | multiplexed_isr = false; |
202 | ret = request_irq(irq, tmio_mmc_card_detect_irq, 0, | 226 | ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_card_detect_irq, 0, |
203 | dev_name(&pdev->dev), host); | 227 | dev_name(&pdev->dev), host); |
204 | if (ret) | 228 | if (ret) |
205 | goto eirq_card_detect; | 229 | goto eirq; |
206 | } | 230 | } |
207 | 231 | ||
208 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); | 232 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); |
209 | if (irq >= 0) { | 233 | if (irq >= 0) { |
210 | multiplexed_isr = false; | 234 | multiplexed_isr = false; |
211 | ret = request_irq(irq, tmio_mmc_sdio_irq, 0, | 235 | ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdio_irq, 0, |
212 | dev_name(&pdev->dev), host); | 236 | dev_name(&pdev->dev), host); |
213 | if (ret) | 237 | if (ret) |
214 | goto eirq_sdio; | 238 | goto eirq; |
215 | } | 239 | } |
216 | 240 | ||
217 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD); | 241 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD); |
218 | if (irq >= 0) { | 242 | if (irq >= 0) { |
219 | multiplexed_isr = false; | 243 | multiplexed_isr = false; |
220 | ret = request_irq(irq, tmio_mmc_sdcard_irq, 0, | 244 | ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdcard_irq, 0, |
221 | dev_name(&pdev->dev), host); | 245 | dev_name(&pdev->dev), host); |
222 | if (ret) | 246 | if (ret) |
223 | goto eirq_sdcard; | 247 | goto eirq; |
224 | } else if (!multiplexed_isr) { | 248 | } else if (!multiplexed_isr) { |
225 | dev_err(&pdev->dev, | 249 | dev_err(&pdev->dev, |
226 | "Principal SD-card IRQ is missing among named interrupts\n"); | 250 | "Principal SD-card IRQ is missing among named interrupts\n"); |
227 | ret = irq; | 251 | ret = irq; |
228 | goto eirq_sdcard; | 252 | goto eirq; |
229 | } | 253 | } |
230 | 254 | ||
231 | if (multiplexed_isr) { | 255 | if (multiplexed_isr) { |
@@ -234,15 +258,15 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
234 | if (irq < 0) | 258 | if (irq < 0) |
235 | break; | 259 | break; |
236 | i++; | 260 | i++; |
237 | ret = request_irq(irq, tmio_mmc_irq, 0, | 261 | ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0, |
238 | dev_name(&pdev->dev), host); | 262 | dev_name(&pdev->dev), host); |
239 | if (ret) | 263 | if (ret) |
240 | goto eirq_multiplexed; | 264 | goto eirq; |
241 | } | 265 | } |
242 | 266 | ||
243 | /* There must be at least one IRQ source */ | 267 | /* There must be at least one IRQ source */ |
244 | if (!i) | 268 | if (!i) |
245 | goto eirq_multiplexed; | 269 | goto eirq; |
246 | } | 270 | } |
247 | 271 | ||
248 | dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", | 272 | dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", |
@@ -252,28 +276,12 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
252 | 276 | ||
253 | return ret; | 277 | return ret; |
254 | 278 | ||
255 | eirq_multiplexed: | 279 | eirq: |
256 | while (i--) { | ||
257 | irq = platform_get_irq(pdev, i); | ||
258 | free_irq(irq, host); | ||
259 | } | ||
260 | eirq_sdcard: | ||
261 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); | ||
262 | if (irq >= 0) | ||
263 | free_irq(irq, host); | ||
264 | eirq_sdio: | ||
265 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); | ||
266 | if (irq >= 0) | ||
267 | free_irq(irq, host); | ||
268 | eirq_card_detect: | ||
269 | tmio_mmc_host_remove(host); | 280 | tmio_mmc_host_remove(host); |
270 | eprobe: | 281 | eprobe: |
271 | clk_put(priv->clk); | ||
272 | eclkget: | 282 | eclkget: |
273 | if (p && p->cleanup) | 283 | if (p && p->cleanup) |
274 | p->cleanup(pdev); | 284 | p->cleanup(pdev); |
275 | einit: | ||
276 | kfree(priv); | ||
277 | return ret; | 285 | return ret; |
278 | } | 286 | } |
279 | 287 | ||
@@ -281,29 +289,13 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev) | |||
281 | { | 289 | { |
282 | struct mmc_host *mmc = platform_get_drvdata(pdev); | 290 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
283 | struct tmio_mmc_host *host = mmc_priv(mmc); | 291 | struct tmio_mmc_host *host = mmc_priv(mmc); |
284 | struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); | ||
285 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 292 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
286 | int i = 0, irq; | ||
287 | |||
288 | if (p) | ||
289 | p->pdata = NULL; | ||
290 | 293 | ||
291 | tmio_mmc_host_remove(host); | 294 | tmio_mmc_host_remove(host); |
292 | 295 | ||
293 | while (1) { | ||
294 | irq = platform_get_irq(pdev, i++); | ||
295 | if (irq < 0) | ||
296 | break; | ||
297 | free_irq(irq, host); | ||
298 | } | ||
299 | |||
300 | clk_put(priv->clk); | ||
301 | |||
302 | if (p && p->cleanup) | 296 | if (p && p->cleanup) |
303 | p->cleanup(pdev); | 297 | p->cleanup(pdev); |
304 | 298 | ||
305 | kfree(priv); | ||
306 | |||
307 | return 0; | 299 | return 0; |
308 | } | 300 | } |
309 | 301 | ||
@@ -314,12 +306,6 @@ static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { | |||
314 | .runtime_resume = tmio_mmc_host_runtime_resume, | 306 | .runtime_resume = tmio_mmc_host_runtime_resume, |
315 | }; | 307 | }; |
316 | 308 | ||
317 | static const struct of_device_id sh_mobile_sdhi_of_match[] = { | ||
318 | { .compatible = "renesas,shmobile-sdhi" }, | ||
319 | { } | ||
320 | }; | ||
321 | MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); | ||
322 | |||
323 | static struct platform_driver sh_mobile_sdhi_driver = { | 309 | static struct platform_driver sh_mobile_sdhi_driver = { |
324 | .driver = { | 310 | .driver = { |
325 | .name = "sh_mobile_sdhi", | 311 | .name = "sh_mobile_sdhi", |
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index 50bf495a988b..f508ecb5b8a7 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/platform_device.h> | 43 | #include <linux/platform_device.h> |
44 | #include <linux/pm_qos.h> | 44 | #include <linux/pm_qos.h> |
45 | #include <linux/pm_runtime.h> | 45 | #include <linux/pm_runtime.h> |
46 | #include <linux/regulator/consumer.h> | ||
46 | #include <linux/scatterlist.h> | 47 | #include <linux/scatterlist.h> |
47 | #include <linux/spinlock.h> | 48 | #include <linux/spinlock.h> |
48 | #include <linux/workqueue.h> | 49 | #include <linux/workqueue.h> |
@@ -155,6 +156,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) | |||
155 | host->set_clk_div(host->pdev, (clk>>22) & 1); | 156 | host->set_clk_div(host->pdev, (clk>>22) & 1); |
156 | 157 | ||
157 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); | 158 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); |
159 | msleep(10); | ||
158 | } | 160 | } |
159 | 161 | ||
160 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | 162 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) |
@@ -768,16 +770,48 @@ static int tmio_mmc_clk_update(struct mmc_host *mmc) | |||
768 | return ret; | 770 | return ret; |
769 | } | 771 | } |
770 | 772 | ||
771 | static void tmio_mmc_set_power(struct tmio_mmc_host *host, struct mmc_ios *ios) | 773 | static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) |
772 | { | 774 | { |
773 | struct mmc_host *mmc = host->mmc; | 775 | struct mmc_host *mmc = host->mmc; |
776 | int ret = 0; | ||
777 | |||
778 | /* .set_ios() is returning void, so, no chance to report an error */ | ||
774 | 779 | ||
775 | if (host->set_pwr) | 780 | if (host->set_pwr) |
776 | host->set_pwr(host->pdev, ios->power_mode != MMC_POWER_OFF); | 781 | host->set_pwr(host->pdev, 1); |
782 | |||
783 | if (!IS_ERR(mmc->supply.vmmc)) { | ||
784 | ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); | ||
785 | /* | ||
786 | * Attention: empiric value. With a b43 WiFi SDIO card this | ||
787 | * delay proved necessary for reliable card-insertion probing. | ||
788 | * 100us were not enough. Is this the same 140us delay, as in | ||
789 | * tmio_mmc_set_ios()? | ||
790 | */ | ||
791 | udelay(200); | ||
792 | } | ||
793 | /* | ||
794 | * It seems, VccQ should be switched on after Vcc, this is also what the | ||
795 | * omap_hsmmc.c driver does. | ||
796 | */ | ||
797 | if (!IS_ERR(mmc->supply.vqmmc) && !ret) { | ||
798 | regulator_enable(mmc->supply.vqmmc); | ||
799 | udelay(200); | ||
800 | } | ||
801 | } | ||
802 | |||
803 | static void tmio_mmc_power_off(struct tmio_mmc_host *host) | ||
804 | { | ||
805 | struct mmc_host *mmc = host->mmc; | ||
806 | |||
807 | if (!IS_ERR(mmc->supply.vqmmc)) | ||
808 | regulator_disable(mmc->supply.vqmmc); | ||
809 | |||
777 | if (!IS_ERR(mmc->supply.vmmc)) | 810 | if (!IS_ERR(mmc->supply.vmmc)) |
778 | /* Errors ignored... */ | 811 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); |
779 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, | 812 | |
780 | ios->power_mode ? ios->vdd : 0); | 813 | if (host->set_pwr) |
814 | host->set_pwr(host->pdev, 0); | ||
781 | } | 815 | } |
782 | 816 | ||
783 | /* Set MMC clock / power. | 817 | /* Set MMC clock / power. |
@@ -828,18 +862,20 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
828 | if (!host->power) { | 862 | if (!host->power) { |
829 | tmio_mmc_clk_update(mmc); | 863 | tmio_mmc_clk_update(mmc); |
830 | pm_runtime_get_sync(dev); | 864 | pm_runtime_get_sync(dev); |
831 | host->power = true; | ||
832 | } | 865 | } |
833 | tmio_mmc_set_clock(host, ios->clock); | 866 | tmio_mmc_set_clock(host, ios->clock); |
834 | /* power up SD bus */ | 867 | if (!host->power) { |
835 | tmio_mmc_set_power(host, ios); | 868 | /* power up SD card and the bus */ |
869 | tmio_mmc_power_on(host, ios->vdd); | ||
870 | host->power = true; | ||
871 | } | ||
836 | /* start bus clock */ | 872 | /* start bus clock */ |
837 | tmio_mmc_clk_start(host); | 873 | tmio_mmc_clk_start(host); |
838 | } else if (ios->power_mode != MMC_POWER_UP) { | 874 | } else if (ios->power_mode != MMC_POWER_UP) { |
839 | if (ios->power_mode == MMC_POWER_OFF) | ||
840 | tmio_mmc_set_power(host, ios); | ||
841 | if (host->power) { | 875 | if (host->power) { |
842 | struct tmio_mmc_data *pdata = host->pdata; | 876 | struct tmio_mmc_data *pdata = host->pdata; |
877 | if (ios->power_mode == MMC_POWER_OFF) | ||
878 | tmio_mmc_power_off(host); | ||
843 | tmio_mmc_clk_stop(host); | 879 | tmio_mmc_clk_stop(host); |
844 | host->power = false; | 880 | host->power = false; |
845 | pm_runtime_put(dev); | 881 | pm_runtime_put(dev); |
@@ -918,6 +954,17 @@ static void tmio_mmc_init_ocr(struct tmio_mmc_host *host) | |||
918 | dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); | 954 | dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); |
919 | } | 955 | } |
920 | 956 | ||
957 | static void tmio_mmc_of_parse(struct platform_device *pdev, | ||
958 | struct tmio_mmc_data *pdata) | ||
959 | { | ||
960 | const struct device_node *np = pdev->dev.of_node; | ||
961 | if (!np) | ||
962 | return; | ||
963 | |||
964 | if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL)) | ||
965 | pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE; | ||
966 | } | ||
967 | |||
921 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, | 968 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, |
922 | struct platform_device *pdev, | 969 | struct platform_device *pdev, |
923 | struct tmio_mmc_data *pdata) | 970 | struct tmio_mmc_data *pdata) |
@@ -928,6 +975,11 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host, | |||
928 | int ret; | 975 | int ret; |
929 | u32 irq_mask = TMIO_MASK_CMD; | 976 | u32 irq_mask = TMIO_MASK_CMD; |
930 | 977 | ||
978 | tmio_mmc_of_parse(pdev, pdata); | ||
979 | |||
980 | if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT)) | ||
981 | pdata->write16_hook = NULL; | ||
982 | |||
931 | res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 983 | res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
932 | if (!res_ctl) | 984 | if (!res_ctl) |
933 | return -EINVAL; | 985 | return -EINVAL; |
@@ -936,6 +988,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host, | |||
936 | if (!mmc) | 988 | if (!mmc) |
937 | return -ENOMEM; | 989 | return -ENOMEM; |
938 | 990 | ||
991 | mmc_of_parse(mmc); | ||
992 | |||
939 | pdata->dev = &pdev->dev; | 993 | pdata->dev = &pdev->dev; |
940 | _host = mmc_priv(mmc); | 994 | _host = mmc_priv(mmc); |
941 | _host->pdata = pdata; | 995 | _host->pdata = pdata; |
@@ -956,7 +1010,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host, | |||
956 | } | 1010 | } |
957 | 1011 | ||
958 | mmc->ops = &tmio_mmc_ops; | 1012 | mmc->ops = &tmio_mmc_ops; |
959 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | 1013 | mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; |
960 | mmc->caps2 = pdata->capabilities2; | 1014 | mmc->caps2 = pdata->capabilities2; |
961 | mmc->max_segs = 32; | 1015 | mmc->max_segs = 32; |
962 | mmc->max_blk_size = 512; | 1016 | mmc->max_blk_size = 512; |
@@ -968,7 +1022,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host, | |||
968 | 1022 | ||
969 | _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || | 1023 | _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || |
970 | mmc->caps & MMC_CAP_NEEDS_POLL || | 1024 | mmc->caps & MMC_CAP_NEEDS_POLL || |
971 | mmc->caps & MMC_CAP_NONREMOVABLE); | 1025 | mmc->caps & MMC_CAP_NONREMOVABLE || |
1026 | mmc->slot.cd_irq >= 0); | ||
972 | 1027 | ||
973 | _host->power = false; | 1028 | _host->power = false; |
974 | pm_runtime_enable(&pdev->dev); | 1029 | pm_runtime_enable(&pdev->dev); |
@@ -1060,16 +1115,8 @@ EXPORT_SYMBOL(tmio_mmc_host_probe); | |||
1060 | void tmio_mmc_host_remove(struct tmio_mmc_host *host) | 1115 | void tmio_mmc_host_remove(struct tmio_mmc_host *host) |
1061 | { | 1116 | { |
1062 | struct platform_device *pdev = host->pdev; | 1117 | struct platform_device *pdev = host->pdev; |
1063 | struct tmio_mmc_data *pdata = host->pdata; | ||
1064 | struct mmc_host *mmc = host->mmc; | 1118 | struct mmc_host *mmc = host->mmc; |
1065 | 1119 | ||
1066 | if (pdata->flags & TMIO_MMC_USE_GPIO_CD) | ||
1067 | /* | ||
1068 | * This means we can miss a card-eject, but this is anyway | ||
1069 | * possible, because of delayed processing of hotplug events. | ||
1070 | */ | ||
1071 | mmc_gpio_free_cd(mmc); | ||
1072 | |||
1073 | if (!host->native_hotplug) | 1120 | if (!host->native_hotplug) |
1074 | pm_runtime_get_sync(&pdev->dev); | 1121 | pm_runtime_get_sync(&pdev->dev); |
1075 | 1122 | ||
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index 154f0e8e931c..c6d001509e5a 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c | |||
@@ -1012,7 +1012,7 @@ static const struct dev_pm_ops wmt_mci_pm = { | |||
1012 | 1012 | ||
1013 | static struct platform_driver wmt_mci_driver = { | 1013 | static struct platform_driver wmt_mci_driver = { |
1014 | .probe = wmt_mci_probe, | 1014 | .probe = wmt_mci_probe, |
1015 | .remove = __exit_p(wmt_mci_remove), | 1015 | .remove = wmt_mci_remove, |
1016 | .driver = { | 1016 | .driver = { |
1017 | .name = DRIVER_NAME, | 1017 | .name = DRIVER_NAME, |
1018 | .owner = THIS_MODULE, | 1018 | .owner = THIS_MODULE, |