diff options
Diffstat (limited to 'drivers/mmc/core/block.c')
-rw-r--r-- | drivers/mmc/core/block.c | 2336 |
1 files changed, 2336 insertions, 0 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c new file mode 100644 index 000000000000..646d1a1fa6ca --- /dev/null +++ b/drivers/mmc/core/block.c | |||
@@ -0,0 +1,2336 @@ | |||
1 | /* | ||
2 | * Block driver for media (i.e., flash cards) | ||
3 | * | ||
4 | * Copyright 2002 Hewlett-Packard Company | ||
5 | * Copyright 2005-2008 Pierre Ossman | ||
6 | * | ||
7 | * Use consistent with the GNU GPL is permitted, | ||
8 | * provided that this copyright notice is | ||
9 | * preserved in its entirety in all copies and derived works. | ||
10 | * | ||
11 | * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, | ||
12 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS | ||
13 | * FITNESS FOR ANY PARTICULAR PURPOSE. | ||
14 | * | ||
15 | * Many thanks to Alessandro Rubini and Jonathan Corbet! | ||
16 | * | ||
17 | * Author: Andrew Christian | ||
18 | * 28 May 2002 | ||
19 | */ | ||
20 | #include <linux/moduleparam.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/init.h> | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/fs.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/hdreg.h> | ||
29 | #include <linux/kdev_t.h> | ||
30 | #include <linux/blkdev.h> | ||
31 | #include <linux/mutex.h> | ||
32 | #include <linux/scatterlist.h> | ||
33 | #include <linux/string_helpers.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/capability.h> | ||
36 | #include <linux/compat.h> | ||
37 | #include <linux/pm_runtime.h> | ||
38 | #include <linux/idr.h> | ||
39 | |||
40 | #include <linux/mmc/ioctl.h> | ||
41 | #include <linux/mmc/card.h> | ||
42 | #include <linux/mmc/host.h> | ||
43 | #include <linux/mmc/mmc.h> | ||
44 | #include <linux/mmc/sd.h> | ||
45 | |||
46 | #include <asm/uaccess.h> | ||
47 | |||
48 | #include "queue.h" | ||
49 | #include "block.h" | ||
50 | |||
51 | MODULE_ALIAS("mmc:block"); | ||
52 | #ifdef MODULE_PARAM_PREFIX | ||
53 | #undef MODULE_PARAM_PREFIX | ||
54 | #endif | ||
55 | #define MODULE_PARAM_PREFIX "mmcblk." | ||
56 | |||
57 | #define INAND_CMD38_ARG_EXT_CSD 113 | ||
58 | #define INAND_CMD38_ARG_ERASE 0x00 | ||
59 | #define INAND_CMD38_ARG_TRIM 0x01 | ||
60 | #define INAND_CMD38_ARG_SECERASE 0x80 | ||
61 | #define INAND_CMD38_ARG_SECTRIM1 0x81 | ||
62 | #define INAND_CMD38_ARG_SECTRIM2 0x88 | ||
63 | #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ | ||
64 | #define MMC_SANITIZE_REQ_TIMEOUT 240000 | ||
65 | #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) | ||
66 | |||
67 | #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ | ||
68 | (rq_data_dir(req) == WRITE)) | ||
69 | static DEFINE_MUTEX(block_mutex); | ||
70 | |||
71 | /* | ||
72 | * The defaults come from config options but can be overriden by module | ||
73 | * or bootarg options. | ||
74 | */ | ||
75 | static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; | ||
76 | |||
77 | /* | ||
78 | * We've only got one major, so number of mmcblk devices is | ||
79 | * limited to (1 << 20) / number of minors per device. It is also | ||
80 | * limited by the MAX_DEVICES below. | ||
81 | */ | ||
82 | static int max_devices; | ||
83 | |||
84 | #define MAX_DEVICES 256 | ||
85 | |||
86 | static DEFINE_IDA(mmc_blk_ida); | ||
87 | static DEFINE_SPINLOCK(mmc_blk_lock); | ||
88 | |||
89 | /* | ||
90 | * There is one mmc_blk_data per slot. | ||
91 | */ | ||
92 | struct mmc_blk_data { | ||
93 | spinlock_t lock; | ||
94 | struct device *parent; | ||
95 | struct gendisk *disk; | ||
96 | struct mmc_queue queue; | ||
97 | struct list_head part; | ||
98 | |||
99 | unsigned int flags; | ||
100 | #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ | ||
101 | #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ | ||
102 | |||
103 | unsigned int usage; | ||
104 | unsigned int read_only; | ||
105 | unsigned int part_type; | ||
106 | unsigned int reset_done; | ||
107 | #define MMC_BLK_READ BIT(0) | ||
108 | #define MMC_BLK_WRITE BIT(1) | ||
109 | #define MMC_BLK_DISCARD BIT(2) | ||
110 | #define MMC_BLK_SECDISCARD BIT(3) | ||
111 | |||
112 | /* | ||
113 | * Only set in main mmc_blk_data associated | ||
114 | * with mmc_card with dev_set_drvdata, and keeps | ||
115 | * track of the current selected device partition. | ||
116 | */ | ||
117 | unsigned int part_curr; | ||
118 | struct device_attribute force_ro; | ||
119 | struct device_attribute power_ro_lock; | ||
120 | int area_type; | ||
121 | }; | ||
122 | |||
123 | static DEFINE_MUTEX(open_lock); | ||
124 | |||
125 | module_param(perdev_minors, int, 0444); | ||
126 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); | ||
127 | |||
128 | static inline int mmc_blk_part_switch(struct mmc_card *card, | ||
129 | struct mmc_blk_data *md); | ||
130 | static int get_card_status(struct mmc_card *card, u32 *status, int retries); | ||
131 | |||
132 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) | ||
133 | { | ||
134 | struct mmc_blk_data *md; | ||
135 | |||
136 | mutex_lock(&open_lock); | ||
137 | md = disk->private_data; | ||
138 | if (md && md->usage == 0) | ||
139 | md = NULL; | ||
140 | if (md) | ||
141 | md->usage++; | ||
142 | mutex_unlock(&open_lock); | ||
143 | |||
144 | return md; | ||
145 | } | ||
146 | |||
147 | static inline int mmc_get_devidx(struct gendisk *disk) | ||
148 | { | ||
149 | int devidx = disk->first_minor / perdev_minors; | ||
150 | return devidx; | ||
151 | } | ||
152 | |||
153 | static void mmc_blk_put(struct mmc_blk_data *md) | ||
154 | { | ||
155 | mutex_lock(&open_lock); | ||
156 | md->usage--; | ||
157 | if (md->usage == 0) { | ||
158 | int devidx = mmc_get_devidx(md->disk); | ||
159 | blk_cleanup_queue(md->queue.queue); | ||
160 | |||
161 | spin_lock(&mmc_blk_lock); | ||
162 | ida_remove(&mmc_blk_ida, devidx); | ||
163 | spin_unlock(&mmc_blk_lock); | ||
164 | |||
165 | put_disk(md->disk); | ||
166 | kfree(md); | ||
167 | } | ||
168 | mutex_unlock(&open_lock); | ||
169 | } | ||
170 | |||
171 | static ssize_t power_ro_lock_show(struct device *dev, | ||
172 | struct device_attribute *attr, char *buf) | ||
173 | { | ||
174 | int ret; | ||
175 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | ||
176 | struct mmc_card *card = md->queue.card; | ||
177 | int locked = 0; | ||
178 | |||
179 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) | ||
180 | locked = 2; | ||
181 | else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) | ||
182 | locked = 1; | ||
183 | |||
184 | ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); | ||
185 | |||
186 | mmc_blk_put(md); | ||
187 | |||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | static ssize_t power_ro_lock_store(struct device *dev, | ||
192 | struct device_attribute *attr, const char *buf, size_t count) | ||
193 | { | ||
194 | int ret; | ||
195 | struct mmc_blk_data *md, *part_md; | ||
196 | struct mmc_card *card; | ||
197 | unsigned long set; | ||
198 | |||
199 | if (kstrtoul(buf, 0, &set)) | ||
200 | return -EINVAL; | ||
201 | |||
202 | if (set != 1) | ||
203 | return count; | ||
204 | |||
205 | md = mmc_blk_get(dev_to_disk(dev)); | ||
206 | card = md->queue.card; | ||
207 | |||
208 | mmc_get_card(card); | ||
209 | |||
210 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, | ||
211 | card->ext_csd.boot_ro_lock | | ||
212 | EXT_CSD_BOOT_WP_B_PWR_WP_EN, | ||
213 | card->ext_csd.part_time); | ||
214 | if (ret) | ||
215 | pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); | ||
216 | else | ||
217 | card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; | ||
218 | |||
219 | mmc_put_card(card); | ||
220 | |||
221 | if (!ret) { | ||
222 | pr_info("%s: Locking boot partition ro until next power on\n", | ||
223 | md->disk->disk_name); | ||
224 | set_disk_ro(md->disk, 1); | ||
225 | |||
226 | list_for_each_entry(part_md, &md->part, part) | ||
227 | if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { | ||
228 | pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); | ||
229 | set_disk_ro(part_md->disk, 1); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | mmc_blk_put(md); | ||
234 | return count; | ||
235 | } | ||
236 | |||
237 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, | ||
238 | char *buf) | ||
239 | { | ||
240 | int ret; | ||
241 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | ||
242 | |||
243 | ret = snprintf(buf, PAGE_SIZE, "%d\n", | ||
244 | get_disk_ro(dev_to_disk(dev)) ^ | ||
245 | md->read_only); | ||
246 | mmc_blk_put(md); | ||
247 | return ret; | ||
248 | } | ||
249 | |||
250 | static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, | ||
251 | const char *buf, size_t count) | ||
252 | { | ||
253 | int ret; | ||
254 | char *end; | ||
255 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | ||
256 | unsigned long set = simple_strtoul(buf, &end, 0); | ||
257 | if (end == buf) { | ||
258 | ret = -EINVAL; | ||
259 | goto out; | ||
260 | } | ||
261 | |||
262 | set_disk_ro(dev_to_disk(dev), set || md->read_only); | ||
263 | ret = count; | ||
264 | out: | ||
265 | mmc_blk_put(md); | ||
266 | return ret; | ||
267 | } | ||
268 | |||
269 | static int mmc_blk_open(struct block_device *bdev, fmode_t mode) | ||
270 | { | ||
271 | struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); | ||
272 | int ret = -ENXIO; | ||
273 | |||
274 | mutex_lock(&block_mutex); | ||
275 | if (md) { | ||
276 | if (md->usage == 2) | ||
277 | check_disk_change(bdev); | ||
278 | ret = 0; | ||
279 | |||
280 | if ((mode & FMODE_WRITE) && md->read_only) { | ||
281 | mmc_blk_put(md); | ||
282 | ret = -EROFS; | ||
283 | } | ||
284 | } | ||
285 | mutex_unlock(&block_mutex); | ||
286 | |||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | static void mmc_blk_release(struct gendisk *disk, fmode_t mode) | ||
291 | { | ||
292 | struct mmc_blk_data *md = disk->private_data; | ||
293 | |||
294 | mutex_lock(&block_mutex); | ||
295 | mmc_blk_put(md); | ||
296 | mutex_unlock(&block_mutex); | ||
297 | } | ||
298 | |||
299 | static int | ||
300 | mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) | ||
301 | { | ||
302 | geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); | ||
303 | geo->heads = 4; | ||
304 | geo->sectors = 16; | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | struct mmc_blk_ioc_data { | ||
309 | struct mmc_ioc_cmd ic; | ||
310 | unsigned char *buf; | ||
311 | u64 buf_bytes; | ||
312 | }; | ||
313 | |||
314 | static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( | ||
315 | struct mmc_ioc_cmd __user *user) | ||
316 | { | ||
317 | struct mmc_blk_ioc_data *idata; | ||
318 | int err; | ||
319 | |||
320 | idata = kmalloc(sizeof(*idata), GFP_KERNEL); | ||
321 | if (!idata) { | ||
322 | err = -ENOMEM; | ||
323 | goto out; | ||
324 | } | ||
325 | |||
326 | if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { | ||
327 | err = -EFAULT; | ||
328 | goto idata_err; | ||
329 | } | ||
330 | |||
331 | idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; | ||
332 | if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { | ||
333 | err = -EOVERFLOW; | ||
334 | goto idata_err; | ||
335 | } | ||
336 | |||
337 | if (!idata->buf_bytes) { | ||
338 | idata->buf = NULL; | ||
339 | return idata; | ||
340 | } | ||
341 | |||
342 | idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL); | ||
343 | if (!idata->buf) { | ||
344 | err = -ENOMEM; | ||
345 | goto idata_err; | ||
346 | } | ||
347 | |||
348 | if (copy_from_user(idata->buf, (void __user *)(unsigned long) | ||
349 | idata->ic.data_ptr, idata->buf_bytes)) { | ||
350 | err = -EFAULT; | ||
351 | goto copy_err; | ||
352 | } | ||
353 | |||
354 | return idata; | ||
355 | |||
356 | copy_err: | ||
357 | kfree(idata->buf); | ||
358 | idata_err: | ||
359 | kfree(idata); | ||
360 | out: | ||
361 | return ERR_PTR(err); | ||
362 | } | ||
363 | |||
364 | static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, | ||
365 | struct mmc_blk_ioc_data *idata) | ||
366 | { | ||
367 | struct mmc_ioc_cmd *ic = &idata->ic; | ||
368 | |||
369 | if (copy_to_user(&(ic_ptr->response), ic->response, | ||
370 | sizeof(ic->response))) | ||
371 | return -EFAULT; | ||
372 | |||
373 | if (!idata->ic.write_flag) { | ||
374 | if (copy_to_user((void __user *)(unsigned long)ic->data_ptr, | ||
375 | idata->buf, idata->buf_bytes)) | ||
376 | return -EFAULT; | ||
377 | } | ||
378 | |||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, | ||
383 | u32 retries_max) | ||
384 | { | ||
385 | int err; | ||
386 | u32 retry_count = 0; | ||
387 | |||
388 | if (!status || !retries_max) | ||
389 | return -EINVAL; | ||
390 | |||
391 | do { | ||
392 | err = get_card_status(card, status, 5); | ||
393 | if (err) | ||
394 | break; | ||
395 | |||
396 | if (!R1_STATUS(*status) && | ||
397 | (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) | ||
398 | break; /* RPMB programming operation complete */ | ||
399 | |||
400 | /* | ||
401 | * Rechedule to give the MMC device a chance to continue | ||
402 | * processing the previous command without being polled too | ||
403 | * frequently. | ||
404 | */ | ||
405 | usleep_range(1000, 5000); | ||
406 | } while (++retry_count < retries_max); | ||
407 | |||
408 | if (retry_count == retries_max) | ||
409 | err = -EPERM; | ||
410 | |||
411 | return err; | ||
412 | } | ||
413 | |||
414 | static int ioctl_do_sanitize(struct mmc_card *card) | ||
415 | { | ||
416 | int err; | ||
417 | |||
418 | if (!mmc_can_sanitize(card)) { | ||
419 | pr_warn("%s: %s - SANITIZE is not supported\n", | ||
420 | mmc_hostname(card->host), __func__); | ||
421 | err = -EOPNOTSUPP; | ||
422 | goto out; | ||
423 | } | ||
424 | |||
425 | pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", | ||
426 | mmc_hostname(card->host), __func__); | ||
427 | |||
428 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
429 | EXT_CSD_SANITIZE_START, 1, | ||
430 | MMC_SANITIZE_REQ_TIMEOUT); | ||
431 | |||
432 | if (err) | ||
433 | pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", | ||
434 | mmc_hostname(card->host), __func__, err); | ||
435 | |||
436 | pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), | ||
437 | __func__); | ||
438 | out: | ||
439 | return err; | ||
440 | } | ||
441 | |||
442 | static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, | ||
443 | struct mmc_blk_ioc_data *idata) | ||
444 | { | ||
445 | struct mmc_command cmd = {0}; | ||
446 | struct mmc_data data = {0}; | ||
447 | struct mmc_request mrq = {NULL}; | ||
448 | struct scatterlist sg; | ||
449 | int err; | ||
450 | int is_rpmb = false; | ||
451 | u32 status = 0; | ||
452 | |||
453 | if (!card || !md || !idata) | ||
454 | return -EINVAL; | ||
455 | |||
456 | if (md->area_type & MMC_BLK_DATA_AREA_RPMB) | ||
457 | is_rpmb = true; | ||
458 | |||
459 | cmd.opcode = idata->ic.opcode; | ||
460 | cmd.arg = idata->ic.arg; | ||
461 | cmd.flags = idata->ic.flags; | ||
462 | |||
463 | if (idata->buf_bytes) { | ||
464 | data.sg = &sg; | ||
465 | data.sg_len = 1; | ||
466 | data.blksz = idata->ic.blksz; | ||
467 | data.blocks = idata->ic.blocks; | ||
468 | |||
469 | sg_init_one(data.sg, idata->buf, idata->buf_bytes); | ||
470 | |||
471 | if (idata->ic.write_flag) | ||
472 | data.flags = MMC_DATA_WRITE; | ||
473 | else | ||
474 | data.flags = MMC_DATA_READ; | ||
475 | |||
476 | /* data.flags must already be set before doing this. */ | ||
477 | mmc_set_data_timeout(&data, card); | ||
478 | |||
479 | /* Allow overriding the timeout_ns for empirical tuning. */ | ||
480 | if (idata->ic.data_timeout_ns) | ||
481 | data.timeout_ns = idata->ic.data_timeout_ns; | ||
482 | |||
483 | if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { | ||
484 | /* | ||
485 | * Pretend this is a data transfer and rely on the | ||
486 | * host driver to compute timeout. When all host | ||
487 | * drivers support cmd.cmd_timeout for R1B, this | ||
488 | * can be changed to: | ||
489 | * | ||
490 | * mrq.data = NULL; | ||
491 | * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; | ||
492 | */ | ||
493 | data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; | ||
494 | } | ||
495 | |||
496 | mrq.data = &data; | ||
497 | } | ||
498 | |||
499 | mrq.cmd = &cmd; | ||
500 | |||
501 | err = mmc_blk_part_switch(card, md); | ||
502 | if (err) | ||
503 | return err; | ||
504 | |||
505 | if (idata->ic.is_acmd) { | ||
506 | err = mmc_app_cmd(card->host, card); | ||
507 | if (err) | ||
508 | return err; | ||
509 | } | ||
510 | |||
511 | if (is_rpmb) { | ||
512 | err = mmc_set_blockcount(card, data.blocks, | ||
513 | idata->ic.write_flag & (1 << 31)); | ||
514 | if (err) | ||
515 | return err; | ||
516 | } | ||
517 | |||
518 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && | ||
519 | (cmd.opcode == MMC_SWITCH)) { | ||
520 | err = ioctl_do_sanitize(card); | ||
521 | |||
522 | if (err) | ||
523 | pr_err("%s: ioctl_do_sanitize() failed. err = %d", | ||
524 | __func__, err); | ||
525 | |||
526 | return err; | ||
527 | } | ||
528 | |||
529 | mmc_wait_for_req(card->host, &mrq); | ||
530 | |||
531 | if (cmd.error) { | ||
532 | dev_err(mmc_dev(card->host), "%s: cmd error %d\n", | ||
533 | __func__, cmd.error); | ||
534 | return cmd.error; | ||
535 | } | ||
536 | if (data.error) { | ||
537 | dev_err(mmc_dev(card->host), "%s: data error %d\n", | ||
538 | __func__, data.error); | ||
539 | return data.error; | ||
540 | } | ||
541 | |||
542 | /* | ||
543 | * According to the SD specs, some commands require a delay after | ||
544 | * issuing the command. | ||
545 | */ | ||
546 | if (idata->ic.postsleep_min_us) | ||
547 | usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); | ||
548 | |||
549 | memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); | ||
550 | |||
551 | if (is_rpmb) { | ||
552 | /* | ||
553 | * Ensure RPMB command has completed by polling CMD13 | ||
554 | * "Send Status". | ||
555 | */ | ||
556 | err = ioctl_rpmb_card_status_poll(card, &status, 5); | ||
557 | if (err) | ||
558 | dev_err(mmc_dev(card->host), | ||
559 | "%s: Card Status=0x%08X, error %d\n", | ||
560 | __func__, status, err); | ||
561 | } | ||
562 | |||
563 | return err; | ||
564 | } | ||
565 | |||
566 | static int mmc_blk_ioctl_cmd(struct block_device *bdev, | ||
567 | struct mmc_ioc_cmd __user *ic_ptr) | ||
568 | { | ||
569 | struct mmc_blk_ioc_data *idata; | ||
570 | struct mmc_blk_data *md; | ||
571 | struct mmc_card *card; | ||
572 | int err = 0, ioc_err = 0; | ||
573 | |||
574 | /* | ||
575 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the | ||
576 | * whole block device, not on a partition. This prevents overspray | ||
577 | * between sibling partitions. | ||
578 | */ | ||
579 | if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) | ||
580 | return -EPERM; | ||
581 | |||
582 | idata = mmc_blk_ioctl_copy_from_user(ic_ptr); | ||
583 | if (IS_ERR(idata)) | ||
584 | return PTR_ERR(idata); | ||
585 | |||
586 | md = mmc_blk_get(bdev->bd_disk); | ||
587 | if (!md) { | ||
588 | err = -EINVAL; | ||
589 | goto cmd_err; | ||
590 | } | ||
591 | |||
592 | card = md->queue.card; | ||
593 | if (IS_ERR(card)) { | ||
594 | err = PTR_ERR(card); | ||
595 | goto cmd_done; | ||
596 | } | ||
597 | |||
598 | mmc_get_card(card); | ||
599 | |||
600 | ioc_err = __mmc_blk_ioctl_cmd(card, md, idata); | ||
601 | |||
602 | /* Always switch back to main area after RPMB access */ | ||
603 | if (md->area_type & MMC_BLK_DATA_AREA_RPMB) | ||
604 | mmc_blk_part_switch(card, dev_get_drvdata(&card->dev)); | ||
605 | |||
606 | mmc_put_card(card); | ||
607 | |||
608 | err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); | ||
609 | |||
610 | cmd_done: | ||
611 | mmc_blk_put(md); | ||
612 | cmd_err: | ||
613 | kfree(idata->buf); | ||
614 | kfree(idata); | ||
615 | return ioc_err ? ioc_err : err; | ||
616 | } | ||
617 | |||
618 | static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, | ||
619 | struct mmc_ioc_multi_cmd __user *user) | ||
620 | { | ||
621 | struct mmc_blk_ioc_data **idata = NULL; | ||
622 | struct mmc_ioc_cmd __user *cmds = user->cmds; | ||
623 | struct mmc_card *card; | ||
624 | struct mmc_blk_data *md; | ||
625 | int i, err = 0, ioc_err = 0; | ||
626 | __u64 num_of_cmds; | ||
627 | |||
628 | /* | ||
629 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the | ||
630 | * whole block device, not on a partition. This prevents overspray | ||
631 | * between sibling partitions. | ||
632 | */ | ||
633 | if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) | ||
634 | return -EPERM; | ||
635 | |||
636 | if (copy_from_user(&num_of_cmds, &user->num_of_cmds, | ||
637 | sizeof(num_of_cmds))) | ||
638 | return -EFAULT; | ||
639 | |||
640 | if (num_of_cmds > MMC_IOC_MAX_CMDS) | ||
641 | return -EINVAL; | ||
642 | |||
643 | idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL); | ||
644 | if (!idata) | ||
645 | return -ENOMEM; | ||
646 | |||
647 | for (i = 0; i < num_of_cmds; i++) { | ||
648 | idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]); | ||
649 | if (IS_ERR(idata[i])) { | ||
650 | err = PTR_ERR(idata[i]); | ||
651 | num_of_cmds = i; | ||
652 | goto cmd_err; | ||
653 | } | ||
654 | } | ||
655 | |||
656 | md = mmc_blk_get(bdev->bd_disk); | ||
657 | if (!md) { | ||
658 | err = -EINVAL; | ||
659 | goto cmd_err; | ||
660 | } | ||
661 | |||
662 | card = md->queue.card; | ||
663 | if (IS_ERR(card)) { | ||
664 | err = PTR_ERR(card); | ||
665 | goto cmd_done; | ||
666 | } | ||
667 | |||
668 | mmc_get_card(card); | ||
669 | |||
670 | for (i = 0; i < num_of_cmds && !ioc_err; i++) | ||
671 | ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]); | ||
672 | |||
673 | /* Always switch back to main area after RPMB access */ | ||
674 | if (md->area_type & MMC_BLK_DATA_AREA_RPMB) | ||
675 | mmc_blk_part_switch(card, dev_get_drvdata(&card->dev)); | ||
676 | |||
677 | mmc_put_card(card); | ||
678 | |||
679 | /* copy to user if data and response */ | ||
680 | for (i = 0; i < num_of_cmds && !err; i++) | ||
681 | err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]); | ||
682 | |||
683 | cmd_done: | ||
684 | mmc_blk_put(md); | ||
685 | cmd_err: | ||
686 | for (i = 0; i < num_of_cmds; i++) { | ||
687 | kfree(idata[i]->buf); | ||
688 | kfree(idata[i]); | ||
689 | } | ||
690 | kfree(idata); | ||
691 | return ioc_err ? ioc_err : err; | ||
692 | } | ||
693 | |||
694 | static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, | ||
695 | unsigned int cmd, unsigned long arg) | ||
696 | { | ||
697 | switch (cmd) { | ||
698 | case MMC_IOC_CMD: | ||
699 | return mmc_blk_ioctl_cmd(bdev, | ||
700 | (struct mmc_ioc_cmd __user *)arg); | ||
701 | case MMC_IOC_MULTI_CMD: | ||
702 | return mmc_blk_ioctl_multi_cmd(bdev, | ||
703 | (struct mmc_ioc_multi_cmd __user *)arg); | ||
704 | default: | ||
705 | return -EINVAL; | ||
706 | } | ||
707 | } | ||
708 | |||
709 | #ifdef CONFIG_COMPAT | ||
710 | static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, | ||
711 | unsigned int cmd, unsigned long arg) | ||
712 | { | ||
713 | return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); | ||
714 | } | ||
715 | #endif | ||
716 | |||
717 | static const struct block_device_operations mmc_bdops = { | ||
718 | .open = mmc_blk_open, | ||
719 | .release = mmc_blk_release, | ||
720 | .getgeo = mmc_blk_getgeo, | ||
721 | .owner = THIS_MODULE, | ||
722 | .ioctl = mmc_blk_ioctl, | ||
723 | #ifdef CONFIG_COMPAT | ||
724 | .compat_ioctl = mmc_blk_compat_ioctl, | ||
725 | #endif | ||
726 | }; | ||
727 | |||
728 | static inline int mmc_blk_part_switch(struct mmc_card *card, | ||
729 | struct mmc_blk_data *md) | ||
730 | { | ||
731 | int ret; | ||
732 | struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); | ||
733 | |||
734 | if (main_md->part_curr == md->part_type) | ||
735 | return 0; | ||
736 | |||
737 | if (mmc_card_mmc(card)) { | ||
738 | u8 part_config = card->ext_csd.part_config; | ||
739 | |||
740 | if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) | ||
741 | mmc_retune_pause(card->host); | ||
742 | |||
743 | part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; | ||
744 | part_config |= md->part_type; | ||
745 | |||
746 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
747 | EXT_CSD_PART_CONFIG, part_config, | ||
748 | card->ext_csd.part_time); | ||
749 | if (ret) { | ||
750 | if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) | ||
751 | mmc_retune_unpause(card->host); | ||
752 | return ret; | ||
753 | } | ||
754 | |||
755 | card->ext_csd.part_config = part_config; | ||
756 | |||
757 | if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) | ||
758 | mmc_retune_unpause(card->host); | ||
759 | } | ||
760 | |||
761 | main_md->part_curr = md->part_type; | ||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) | ||
766 | { | ||
767 | int err; | ||
768 | u32 result; | ||
769 | __be32 *blocks; | ||
770 | |||
771 | struct mmc_request mrq = {NULL}; | ||
772 | struct mmc_command cmd = {0}; | ||
773 | struct mmc_data data = {0}; | ||
774 | |||
775 | struct scatterlist sg; | ||
776 | |||
777 | cmd.opcode = MMC_APP_CMD; | ||
778 | cmd.arg = card->rca << 16; | ||
779 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; | ||
780 | |||
781 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | ||
782 | if (err) | ||
783 | return (u32)-1; | ||
784 | if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) | ||
785 | return (u32)-1; | ||
786 | |||
787 | memset(&cmd, 0, sizeof(struct mmc_command)); | ||
788 | |||
789 | cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; | ||
790 | cmd.arg = 0; | ||
791 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | ||
792 | |||
793 | data.blksz = 4; | ||
794 | data.blocks = 1; | ||
795 | data.flags = MMC_DATA_READ; | ||
796 | data.sg = &sg; | ||
797 | data.sg_len = 1; | ||
798 | mmc_set_data_timeout(&data, card); | ||
799 | |||
800 | mrq.cmd = &cmd; | ||
801 | mrq.data = &data; | ||
802 | |||
803 | blocks = kmalloc(4, GFP_KERNEL); | ||
804 | if (!blocks) | ||
805 | return (u32)-1; | ||
806 | |||
807 | sg_init_one(&sg, blocks, 4); | ||
808 | |||
809 | mmc_wait_for_req(card->host, &mrq); | ||
810 | |||
811 | result = ntohl(*blocks); | ||
812 | kfree(blocks); | ||
813 | |||
814 | if (cmd.error || data.error) | ||
815 | result = (u32)-1; | ||
816 | |||
817 | return result; | ||
818 | } | ||
819 | |||
820 | static int get_card_status(struct mmc_card *card, u32 *status, int retries) | ||
821 | { | ||
822 | struct mmc_command cmd = {0}; | ||
823 | int err; | ||
824 | |||
825 | cmd.opcode = MMC_SEND_STATUS; | ||
826 | if (!mmc_host_is_spi(card->host)) | ||
827 | cmd.arg = card->rca << 16; | ||
828 | cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; | ||
829 | err = mmc_wait_for_cmd(card->host, &cmd, retries); | ||
830 | if (err == 0) | ||
831 | *status = cmd.resp[0]; | ||
832 | return err; | ||
833 | } | ||
834 | |||
835 | static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, | ||
836 | bool hw_busy_detect, struct request *req, bool *gen_err) | ||
837 | { | ||
838 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | ||
839 | int err = 0; | ||
840 | u32 status; | ||
841 | |||
842 | do { | ||
843 | err = get_card_status(card, &status, 5); | ||
844 | if (err) { | ||
845 | pr_err("%s: error %d requesting status\n", | ||
846 | req->rq_disk->disk_name, err); | ||
847 | return err; | ||
848 | } | ||
849 | |||
850 | if (status & R1_ERROR) { | ||
851 | pr_err("%s: %s: error sending status cmd, status %#x\n", | ||
852 | req->rq_disk->disk_name, __func__, status); | ||
853 | *gen_err = true; | ||
854 | } | ||
855 | |||
856 | /* We may rely on the host hw to handle busy detection.*/ | ||
857 | if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && | ||
858 | hw_busy_detect) | ||
859 | break; | ||
860 | |||
861 | /* | ||
862 | * Timeout if the device never becomes ready for data and never | ||
863 | * leaves the program state. | ||
864 | */ | ||
865 | if (time_after(jiffies, timeout)) { | ||
866 | pr_err("%s: Card stuck in programming state! %s %s\n", | ||
867 | mmc_hostname(card->host), | ||
868 | req->rq_disk->disk_name, __func__); | ||
869 | return -ETIMEDOUT; | ||
870 | } | ||
871 | |||
872 | /* | ||
873 | * Some cards mishandle the status bits, | ||
874 | * so make sure to check both the busy | ||
875 | * indication and the card state. | ||
876 | */ | ||
877 | } while (!(status & R1_READY_FOR_DATA) || | ||
878 | (R1_CURRENT_STATE(status) == R1_STATE_PRG)); | ||
879 | |||
880 | return err; | ||
881 | } | ||
882 | |||
883 | static int send_stop(struct mmc_card *card, unsigned int timeout_ms, | ||
884 | struct request *req, bool *gen_err, u32 *stop_status) | ||
885 | { | ||
886 | struct mmc_host *host = card->host; | ||
887 | struct mmc_command cmd = {0}; | ||
888 | int err; | ||
889 | bool use_r1b_resp = rq_data_dir(req) == WRITE; | ||
890 | |||
891 | /* | ||
892 | * Normally we use R1B responses for WRITE, but in cases where the host | ||
893 | * has specified a max_busy_timeout we need to validate it. A failure | ||
894 | * means we need to prevent the host from doing hw busy detection, which | ||
895 | * is done by converting to a R1 response instead. | ||
896 | */ | ||
897 | if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) | ||
898 | use_r1b_resp = false; | ||
899 | |||
900 | cmd.opcode = MMC_STOP_TRANSMISSION; | ||
901 | if (use_r1b_resp) { | ||
902 | cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | ||
903 | cmd.busy_timeout = timeout_ms; | ||
904 | } else { | ||
905 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; | ||
906 | } | ||
907 | |||
908 | err = mmc_wait_for_cmd(host, &cmd, 5); | ||
909 | if (err) | ||
910 | return err; | ||
911 | |||
912 | *stop_status = cmd.resp[0]; | ||
913 | |||
914 | /* No need to check card status in case of READ. */ | ||
915 | if (rq_data_dir(req) == READ) | ||
916 | return 0; | ||
917 | |||
918 | if (!mmc_host_is_spi(host) && | ||
919 | (*stop_status & R1_ERROR)) { | ||
920 | pr_err("%s: %s: general error sending stop command, resp %#x\n", | ||
921 | req->rq_disk->disk_name, __func__, *stop_status); | ||
922 | *gen_err = true; | ||
923 | } | ||
924 | |||
925 | return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); | ||
926 | } | ||
927 | |||
928 | #define ERR_NOMEDIUM 3 | ||
929 | #define ERR_RETRY 2 | ||
930 | #define ERR_ABORT 1 | ||
931 | #define ERR_CONTINUE 0 | ||
932 | |||
933 | static int mmc_blk_cmd_error(struct request *req, const char *name, int error, | ||
934 | bool status_valid, u32 status) | ||
935 | { | ||
936 | switch (error) { | ||
937 | case -EILSEQ: | ||
938 | /* response crc error, retry the r/w cmd */ | ||
939 | pr_err("%s: %s sending %s command, card status %#x\n", | ||
940 | req->rq_disk->disk_name, "response CRC error", | ||
941 | name, status); | ||
942 | return ERR_RETRY; | ||
943 | |||
944 | case -ETIMEDOUT: | ||
945 | pr_err("%s: %s sending %s command, card status %#x\n", | ||
946 | req->rq_disk->disk_name, "timed out", name, status); | ||
947 | |||
948 | /* If the status cmd initially failed, retry the r/w cmd */ | ||
949 | if (!status_valid) { | ||
950 | pr_err("%s: status not valid, retrying timeout\n", | ||
951 | req->rq_disk->disk_name); | ||
952 | return ERR_RETRY; | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * If it was a r/w cmd crc error, or illegal command | ||
957 | * (eg, issued in wrong state) then retry - we should | ||
958 | * have corrected the state problem above. | ||
959 | */ | ||
960 | if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) { | ||
961 | pr_err("%s: command error, retrying timeout\n", | ||
962 | req->rq_disk->disk_name); | ||
963 | return ERR_RETRY; | ||
964 | } | ||
965 | |||
966 | /* Otherwise abort the command */ | ||
967 | return ERR_ABORT; | ||
968 | |||
969 | default: | ||
970 | /* We don't understand the error code the driver gave us */ | ||
971 | pr_err("%s: unknown error %d sending read/write command, card status %#x\n", | ||
972 | req->rq_disk->disk_name, error, status); | ||
973 | return ERR_ABORT; | ||
974 | } | ||
975 | } | ||
976 | |||
977 | /* | ||
978 | * Initial r/w and stop cmd error recovery. | ||
979 | * We don't know whether the card received the r/w cmd or not, so try to | ||
980 | * restore things back to a sane state. Essentially, we do this as follows: | ||
981 | * - Obtain card status. If the first attempt to obtain card status fails, | ||
982 | * the status word will reflect the failed status cmd, not the failed | ||
983 | * r/w cmd. If we fail to obtain card status, it suggests we can no | ||
984 | * longer communicate with the card. | ||
985 | * - Check the card state. If the card received the cmd but there was a | ||
986 | * transient problem with the response, it might still be in a data transfer | ||
987 | * mode. Try to send it a stop command. If this fails, we can't recover. | ||
988 | * - If the r/w cmd failed due to a response CRC error, it was probably | ||
989 | * transient, so retry the cmd. | ||
990 | * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. | ||
991 | * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or | ||
992 | * illegal cmd, retry. | ||
993 | * Otherwise we don't understand what happened, so abort. | ||
994 | */ | ||
995 | static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | ||
996 | struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err) | ||
997 | { | ||
998 | bool prev_cmd_status_valid = true; | ||
999 | u32 status, stop_status = 0; | ||
1000 | int err, retry; | ||
1001 | |||
1002 | if (mmc_card_removed(card)) | ||
1003 | return ERR_NOMEDIUM; | ||
1004 | |||
1005 | /* | ||
1006 | * Try to get card status which indicates both the card state | ||
1007 | * and why there was no response. If the first attempt fails, | ||
1008 | * we can't be sure the returned status is for the r/w command. | ||
1009 | */ | ||
1010 | for (retry = 2; retry >= 0; retry--) { | ||
1011 | err = get_card_status(card, &status, 0); | ||
1012 | if (!err) | ||
1013 | break; | ||
1014 | |||
1015 | /* Re-tune if needed */ | ||
1016 | mmc_retune_recheck(card->host); | ||
1017 | |||
1018 | prev_cmd_status_valid = false; | ||
1019 | pr_err("%s: error %d sending status command, %sing\n", | ||
1020 | req->rq_disk->disk_name, err, retry ? "retry" : "abort"); | ||
1021 | } | ||
1022 | |||
1023 | /* We couldn't get a response from the card. Give up. */ | ||
1024 | if (err) { | ||
1025 | /* Check if the card is removed */ | ||
1026 | if (mmc_detect_card_removed(card->host)) | ||
1027 | return ERR_NOMEDIUM; | ||
1028 | return ERR_ABORT; | ||
1029 | } | ||
1030 | |||
1031 | /* Flag ECC errors */ | ||
1032 | if ((status & R1_CARD_ECC_FAILED) || | ||
1033 | (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || | ||
1034 | (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) | ||
1035 | *ecc_err = true; | ||
1036 | |||
1037 | /* Flag General errors */ | ||
1038 | if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) | ||
1039 | if ((status & R1_ERROR) || | ||
1040 | (brq->stop.resp[0] & R1_ERROR)) { | ||
1041 | pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", | ||
1042 | req->rq_disk->disk_name, __func__, | ||
1043 | brq->stop.resp[0], status); | ||
1044 | *gen_err = true; | ||
1045 | } | ||
1046 | |||
1047 | /* | ||
1048 | * Check the current card state. If it is in some data transfer | ||
1049 | * mode, tell it to stop (and hopefully transition back to TRAN.) | ||
1050 | */ | ||
1051 | if (R1_CURRENT_STATE(status) == R1_STATE_DATA || | ||
1052 | R1_CURRENT_STATE(status) == R1_STATE_RCV) { | ||
1053 | err = send_stop(card, | ||
1054 | DIV_ROUND_UP(brq->data.timeout_ns, 1000000), | ||
1055 | req, gen_err, &stop_status); | ||
1056 | if (err) { | ||
1057 | pr_err("%s: error %d sending stop command\n", | ||
1058 | req->rq_disk->disk_name, err); | ||
1059 | /* | ||
1060 | * If the stop cmd also timed out, the card is probably | ||
1061 | * not present, so abort. Other errors are bad news too. | ||
1062 | */ | ||
1063 | return ERR_ABORT; | ||
1064 | } | ||
1065 | |||
1066 | if (stop_status & R1_CARD_ECC_FAILED) | ||
1067 | *ecc_err = true; | ||
1068 | } | ||
1069 | |||
1070 | /* Check for set block count errors */ | ||
1071 | if (brq->sbc.error) | ||
1072 | return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, | ||
1073 | prev_cmd_status_valid, status); | ||
1074 | |||
1075 | /* Check for r/w command errors */ | ||
1076 | if (brq->cmd.error) | ||
1077 | return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, | ||
1078 | prev_cmd_status_valid, status); | ||
1079 | |||
1080 | /* Data errors */ | ||
1081 | if (!brq->stop.error) | ||
1082 | return ERR_CONTINUE; | ||
1083 | |||
1084 | /* Now for stop errors. These aren't fatal to the transfer. */ | ||
1085 | pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", | ||
1086 | req->rq_disk->disk_name, brq->stop.error, | ||
1087 | brq->cmd.resp[0], status); | ||
1088 | |||
1089 | /* | ||
1090 | * Subsitute in our own stop status as this will give the error | ||
1091 | * state which happened during the execution of the r/w command. | ||
1092 | */ | ||
1093 | if (stop_status) { | ||
1094 | brq->stop.resp[0] = stop_status; | ||
1095 | brq->stop.error = 0; | ||
1096 | } | ||
1097 | return ERR_CONTINUE; | ||
1098 | } | ||
1099 | |||
1100 | static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, | ||
1101 | int type) | ||
1102 | { | ||
1103 | int err; | ||
1104 | |||
1105 | if (md->reset_done & type) | ||
1106 | return -EEXIST; | ||
1107 | |||
1108 | md->reset_done |= type; | ||
1109 | err = mmc_hw_reset(host); | ||
1110 | /* Ensure we switch back to the correct partition */ | ||
1111 | if (err != -EOPNOTSUPP) { | ||
1112 | struct mmc_blk_data *main_md = | ||
1113 | dev_get_drvdata(&host->card->dev); | ||
1114 | int part_err; | ||
1115 | |||
1116 | main_md->part_curr = main_md->part_type; | ||
1117 | part_err = mmc_blk_part_switch(host->card, md); | ||
1118 | if (part_err) { | ||
1119 | /* | ||
1120 | * We have failed to get back into the correct | ||
1121 | * partition, so we need to abort the whole request. | ||
1122 | */ | ||
1123 | return -ENODEV; | ||
1124 | } | ||
1125 | } | ||
1126 | return err; | ||
1127 | } | ||
1128 | |||
1129 | static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) | ||
1130 | { | ||
1131 | md->reset_done &= ~type; | ||
1132 | } | ||
1133 | |||
1134 | int mmc_access_rpmb(struct mmc_queue *mq) | ||
1135 | { | ||
1136 | struct mmc_blk_data *md = mq->blkdata; | ||
1137 | /* | ||
1138 | * If this is a RPMB partition access, return ture | ||
1139 | */ | ||
1140 | if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) | ||
1141 | return true; | ||
1142 | |||
1143 | return false; | ||
1144 | } | ||
1145 | |||
1146 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | ||
1147 | { | ||
1148 | struct mmc_blk_data *md = mq->blkdata; | ||
1149 | struct mmc_card *card = md->queue.card; | ||
1150 | unsigned int from, nr, arg; | ||
1151 | int err = 0, type = MMC_BLK_DISCARD; | ||
1152 | |||
1153 | if (!mmc_can_erase(card)) { | ||
1154 | err = -EOPNOTSUPP; | ||
1155 | goto out; | ||
1156 | } | ||
1157 | |||
1158 | from = blk_rq_pos(req); | ||
1159 | nr = blk_rq_sectors(req); | ||
1160 | |||
1161 | if (mmc_can_discard(card)) | ||
1162 | arg = MMC_DISCARD_ARG; | ||
1163 | else if (mmc_can_trim(card)) | ||
1164 | arg = MMC_TRIM_ARG; | ||
1165 | else | ||
1166 | arg = MMC_ERASE_ARG; | ||
1167 | retry: | ||
1168 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | ||
1169 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
1170 | INAND_CMD38_ARG_EXT_CSD, | ||
1171 | arg == MMC_TRIM_ARG ? | ||
1172 | INAND_CMD38_ARG_TRIM : | ||
1173 | INAND_CMD38_ARG_ERASE, | ||
1174 | 0); | ||
1175 | if (err) | ||
1176 | goto out; | ||
1177 | } | ||
1178 | err = mmc_erase(card, from, nr, arg); | ||
1179 | out: | ||
1180 | if (err == -EIO && !mmc_blk_reset(md, card->host, type)) | ||
1181 | goto retry; | ||
1182 | if (!err) | ||
1183 | mmc_blk_reset_success(md, type); | ||
1184 | blk_end_request(req, err, blk_rq_bytes(req)); | ||
1185 | |||
1186 | return err ? 0 : 1; | ||
1187 | } | ||
1188 | |||
1189 | static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | ||
1190 | struct request *req) | ||
1191 | { | ||
1192 | struct mmc_blk_data *md = mq->blkdata; | ||
1193 | struct mmc_card *card = md->queue.card; | ||
1194 | unsigned int from, nr, arg; | ||
1195 | int err = 0, type = MMC_BLK_SECDISCARD; | ||
1196 | |||
1197 | if (!(mmc_can_secure_erase_trim(card))) { | ||
1198 | err = -EOPNOTSUPP; | ||
1199 | goto out; | ||
1200 | } | ||
1201 | |||
1202 | from = blk_rq_pos(req); | ||
1203 | nr = blk_rq_sectors(req); | ||
1204 | |||
1205 | if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) | ||
1206 | arg = MMC_SECURE_TRIM1_ARG; | ||
1207 | else | ||
1208 | arg = MMC_SECURE_ERASE_ARG; | ||
1209 | |||
1210 | retry: | ||
1211 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | ||
1212 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
1213 | INAND_CMD38_ARG_EXT_CSD, | ||
1214 | arg == MMC_SECURE_TRIM1_ARG ? | ||
1215 | INAND_CMD38_ARG_SECTRIM1 : | ||
1216 | INAND_CMD38_ARG_SECERASE, | ||
1217 | 0); | ||
1218 | if (err) | ||
1219 | goto out_retry; | ||
1220 | } | ||
1221 | |||
1222 | err = mmc_erase(card, from, nr, arg); | ||
1223 | if (err == -EIO) | ||
1224 | goto out_retry; | ||
1225 | if (err) | ||
1226 | goto out; | ||
1227 | |||
1228 | if (arg == MMC_SECURE_TRIM1_ARG) { | ||
1229 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | ||
1230 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
1231 | INAND_CMD38_ARG_EXT_CSD, | ||
1232 | INAND_CMD38_ARG_SECTRIM2, | ||
1233 | 0); | ||
1234 | if (err) | ||
1235 | goto out_retry; | ||
1236 | } | ||
1237 | |||
1238 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); | ||
1239 | if (err == -EIO) | ||
1240 | goto out_retry; | ||
1241 | if (err) | ||
1242 | goto out; | ||
1243 | } | ||
1244 | |||
1245 | out_retry: | ||
1246 | if (err && !mmc_blk_reset(md, card->host, type)) | ||
1247 | goto retry; | ||
1248 | if (!err) | ||
1249 | mmc_blk_reset_success(md, type); | ||
1250 | out: | ||
1251 | blk_end_request(req, err, blk_rq_bytes(req)); | ||
1252 | |||
1253 | return err ? 0 : 1; | ||
1254 | } | ||
1255 | |||
1256 | static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) | ||
1257 | { | ||
1258 | struct mmc_blk_data *md = mq->blkdata; | ||
1259 | struct mmc_card *card = md->queue.card; | ||
1260 | int ret = 0; | ||
1261 | |||
1262 | ret = mmc_flush_cache(card); | ||
1263 | if (ret) | ||
1264 | ret = -EIO; | ||
1265 | |||
1266 | blk_end_request_all(req, ret); | ||
1267 | |||
1268 | return ret ? 0 : 1; | ||
1269 | } | ||
1270 | |||
1271 | /* | ||
1272 | * Reformat current write as a reliable write, supporting | ||
1273 | * both legacy and the enhanced reliable write MMC cards. | ||
1274 | * In each transfer we'll handle only as much as a single | ||
1275 | * reliable write can handle, thus finish the request in | ||
1276 | * partial completions. | ||
1277 | */ | ||
1278 | static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, | ||
1279 | struct mmc_card *card, | ||
1280 | struct request *req) | ||
1281 | { | ||
1282 | if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { | ||
1283 | /* Legacy mode imposes restrictions on transfers. */ | ||
1284 | if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) | ||
1285 | brq->data.blocks = 1; | ||
1286 | |||
1287 | if (brq->data.blocks > card->ext_csd.rel_sectors) | ||
1288 | brq->data.blocks = card->ext_csd.rel_sectors; | ||
1289 | else if (brq->data.blocks < card->ext_csd.rel_sectors) | ||
1290 | brq->data.blocks = 1; | ||
1291 | } | ||
1292 | } | ||
1293 | |||
1294 | #define CMD_ERRORS \ | ||
1295 | (R1_OUT_OF_RANGE | /* Command argument out of range */ \ | ||
1296 | R1_ADDRESS_ERROR | /* Misaligned address */ \ | ||
1297 | R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ | ||
1298 | R1_WP_VIOLATION | /* Tried to write to protected block */ \ | ||
1299 | R1_CC_ERROR | /* Card controller error */ \ | ||
1300 | R1_ERROR) /* General/unknown error */ | ||
1301 | |||
1302 | static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, | ||
1303 | struct mmc_async_req *areq) | ||
1304 | { | ||
1305 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, | ||
1306 | mmc_active); | ||
1307 | struct mmc_blk_request *brq = &mq_mrq->brq; | ||
1308 | struct request *req = mq_mrq->req; | ||
1309 | int need_retune = card->host->need_retune; | ||
1310 | bool ecc_err = false; | ||
1311 | bool gen_err = false; | ||
1312 | |||
1313 | /* | ||
1314 | * sbc.error indicates a problem with the set block count | ||
1315 | * command. No data will have been transferred. | ||
1316 | * | ||
1317 | * cmd.error indicates a problem with the r/w command. No | ||
1318 | * data will have been transferred. | ||
1319 | * | ||
1320 | * stop.error indicates a problem with the stop command. Data | ||
1321 | * may have been transferred, or may still be transferring. | ||
1322 | */ | ||
1323 | if (brq->sbc.error || brq->cmd.error || brq->stop.error || | ||
1324 | brq->data.error) { | ||
1325 | switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { | ||
1326 | case ERR_RETRY: | ||
1327 | return MMC_BLK_RETRY; | ||
1328 | case ERR_ABORT: | ||
1329 | return MMC_BLK_ABORT; | ||
1330 | case ERR_NOMEDIUM: | ||
1331 | return MMC_BLK_NOMEDIUM; | ||
1332 | case ERR_CONTINUE: | ||
1333 | break; | ||
1334 | } | ||
1335 | } | ||
1336 | |||
1337 | /* | ||
1338 | * Check for errors relating to the execution of the | ||
1339 | * initial command - such as address errors. No data | ||
1340 | * has been transferred. | ||
1341 | */ | ||
1342 | if (brq->cmd.resp[0] & CMD_ERRORS) { | ||
1343 | pr_err("%s: r/w command failed, status = %#x\n", | ||
1344 | req->rq_disk->disk_name, brq->cmd.resp[0]); | ||
1345 | return MMC_BLK_ABORT; | ||
1346 | } | ||
1347 | |||
1348 | /* | ||
1349 | * Everything else is either success, or a data error of some | ||
1350 | * kind. If it was a write, we may have transitioned to | ||
1351 | * program mode, which we have to wait for it to complete. | ||
1352 | */ | ||
1353 | if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { | ||
1354 | int err; | ||
1355 | |||
1356 | /* Check stop command response */ | ||
1357 | if (brq->stop.resp[0] & R1_ERROR) { | ||
1358 | pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", | ||
1359 | req->rq_disk->disk_name, __func__, | ||
1360 | brq->stop.resp[0]); | ||
1361 | gen_err = true; | ||
1362 | } | ||
1363 | |||
1364 | err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, | ||
1365 | &gen_err); | ||
1366 | if (err) | ||
1367 | return MMC_BLK_CMD_ERR; | ||
1368 | } | ||
1369 | |||
1370 | /* if general error occurs, retry the write operation. */ | ||
1371 | if (gen_err) { | ||
1372 | pr_warn("%s: retrying write for general error\n", | ||
1373 | req->rq_disk->disk_name); | ||
1374 | return MMC_BLK_RETRY; | ||
1375 | } | ||
1376 | |||
1377 | if (brq->data.error) { | ||
1378 | if (need_retune && !brq->retune_retry_done) { | ||
1379 | pr_debug("%s: retrying because a re-tune was needed\n", | ||
1380 | req->rq_disk->disk_name); | ||
1381 | brq->retune_retry_done = 1; | ||
1382 | return MMC_BLK_RETRY; | ||
1383 | } | ||
1384 | pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", | ||
1385 | req->rq_disk->disk_name, brq->data.error, | ||
1386 | (unsigned)blk_rq_pos(req), | ||
1387 | (unsigned)blk_rq_sectors(req), | ||
1388 | brq->cmd.resp[0], brq->stop.resp[0]); | ||
1389 | |||
1390 | if (rq_data_dir(req) == READ) { | ||
1391 | if (ecc_err) | ||
1392 | return MMC_BLK_ECC_ERR; | ||
1393 | return MMC_BLK_DATA_ERR; | ||
1394 | } else { | ||
1395 | return MMC_BLK_CMD_ERR; | ||
1396 | } | ||
1397 | } | ||
1398 | |||
1399 | if (!brq->data.bytes_xfered) | ||
1400 | return MMC_BLK_RETRY; | ||
1401 | |||
1402 | if (blk_rq_bytes(req) != brq->data.bytes_xfered) | ||
1403 | return MMC_BLK_PARTIAL; | ||
1404 | |||
1405 | return MMC_BLK_SUCCESS; | ||
1406 | } | ||
1407 | |||
1408 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | ||
1409 | struct mmc_card *card, | ||
1410 | int disable_multi, | ||
1411 | struct mmc_queue *mq) | ||
1412 | { | ||
1413 | u32 readcmd, writecmd; | ||
1414 | struct mmc_blk_request *brq = &mqrq->brq; | ||
1415 | struct request *req = mqrq->req; | ||
1416 | struct mmc_blk_data *md = mq->blkdata; | ||
1417 | bool do_data_tag; | ||
1418 | |||
1419 | /* | ||
1420 | * Reliable writes are used to implement Forced Unit Access and | ||
1421 | * are supported only on MMCs. | ||
1422 | */ | ||
1423 | bool do_rel_wr = (req->cmd_flags & REQ_FUA) && | ||
1424 | (rq_data_dir(req) == WRITE) && | ||
1425 | (md->flags & MMC_BLK_REL_WR); | ||
1426 | |||
1427 | memset(brq, 0, sizeof(struct mmc_blk_request)); | ||
1428 | brq->mrq.cmd = &brq->cmd; | ||
1429 | brq->mrq.data = &brq->data; | ||
1430 | |||
1431 | brq->cmd.arg = blk_rq_pos(req); | ||
1432 | if (!mmc_card_blockaddr(card)) | ||
1433 | brq->cmd.arg <<= 9; | ||
1434 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | ||
1435 | brq->data.blksz = 512; | ||
1436 | brq->stop.opcode = MMC_STOP_TRANSMISSION; | ||
1437 | brq->stop.arg = 0; | ||
1438 | brq->data.blocks = blk_rq_sectors(req); | ||
1439 | |||
1440 | /* | ||
1441 | * The block layer doesn't support all sector count | ||
1442 | * restrictions, so we need to be prepared for too big | ||
1443 | * requests. | ||
1444 | */ | ||
1445 | if (brq->data.blocks > card->host->max_blk_count) | ||
1446 | brq->data.blocks = card->host->max_blk_count; | ||
1447 | |||
1448 | if (brq->data.blocks > 1) { | ||
1449 | /* | ||
1450 | * After a read error, we redo the request one sector | ||
1451 | * at a time in order to accurately determine which | ||
1452 | * sectors can be read successfully. | ||
1453 | */ | ||
1454 | if (disable_multi) | ||
1455 | brq->data.blocks = 1; | ||
1456 | |||
1457 | /* | ||
1458 | * Some controllers have HW issues while operating | ||
1459 | * in multiple I/O mode | ||
1460 | */ | ||
1461 | if (card->host->ops->multi_io_quirk) | ||
1462 | brq->data.blocks = card->host->ops->multi_io_quirk(card, | ||
1463 | (rq_data_dir(req) == READ) ? | ||
1464 | MMC_DATA_READ : MMC_DATA_WRITE, | ||
1465 | brq->data.blocks); | ||
1466 | } | ||
1467 | |||
1468 | if (brq->data.blocks > 1 || do_rel_wr) { | ||
1469 | /* SPI multiblock writes terminate using a special | ||
1470 | * token, not a STOP_TRANSMISSION request. | ||
1471 | */ | ||
1472 | if (!mmc_host_is_spi(card->host) || | ||
1473 | rq_data_dir(req) == READ) | ||
1474 | brq->mrq.stop = &brq->stop; | ||
1475 | readcmd = MMC_READ_MULTIPLE_BLOCK; | ||
1476 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; | ||
1477 | } else { | ||
1478 | brq->mrq.stop = NULL; | ||
1479 | readcmd = MMC_READ_SINGLE_BLOCK; | ||
1480 | writecmd = MMC_WRITE_BLOCK; | ||
1481 | } | ||
1482 | if (rq_data_dir(req) == READ) { | ||
1483 | brq->cmd.opcode = readcmd; | ||
1484 | brq->data.flags = MMC_DATA_READ; | ||
1485 | if (brq->mrq.stop) | ||
1486 | brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | | ||
1487 | MMC_CMD_AC; | ||
1488 | } else { | ||
1489 | brq->cmd.opcode = writecmd; | ||
1490 | brq->data.flags = MMC_DATA_WRITE; | ||
1491 | if (brq->mrq.stop) | ||
1492 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | | ||
1493 | MMC_CMD_AC; | ||
1494 | } | ||
1495 | |||
1496 | if (do_rel_wr) | ||
1497 | mmc_apply_rel_rw(brq, card, req); | ||
1498 | |||
1499 | /* | ||
1500 | * Data tag is used only during writing meta data to speed | ||
1501 | * up write and any subsequent read of this meta data | ||
1502 | */ | ||
1503 | do_data_tag = (card->ext_csd.data_tag_unit_size) && | ||
1504 | (req->cmd_flags & REQ_META) && | ||
1505 | (rq_data_dir(req) == WRITE) && | ||
1506 | ((brq->data.blocks * brq->data.blksz) >= | ||
1507 | card->ext_csd.data_tag_unit_size); | ||
1508 | |||
1509 | /* | ||
1510 | * Pre-defined multi-block transfers are preferable to | ||
1511 | * open ended-ones (and necessary for reliable writes). | ||
1512 | * However, it is not sufficient to just send CMD23, | ||
1513 | * and avoid the final CMD12, as on an error condition | ||
1514 | * CMD12 (stop) needs to be sent anyway. This, coupled | ||
1515 | * with Auto-CMD23 enhancements provided by some | ||
1516 | * hosts, means that the complexity of dealing | ||
1517 | * with this is best left to the host. If CMD23 is | ||
1518 | * supported by card and host, we'll fill sbc in and let | ||
1519 | * the host deal with handling it correctly. This means | ||
1520 | * that for hosts that don't expose MMC_CAP_CMD23, no | ||
1521 | * change of behavior will be observed. | ||
1522 | * | ||
1523 | * N.B: Some MMC cards experience perf degradation. | ||
1524 | * We'll avoid using CMD23-bounded multiblock writes for | ||
1525 | * these, while retaining features like reliable writes. | ||
1526 | */ | ||
1527 | if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && | ||
1528 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || | ||
1529 | do_data_tag)) { | ||
1530 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; | ||
1531 | brq->sbc.arg = brq->data.blocks | | ||
1532 | (do_rel_wr ? (1 << 31) : 0) | | ||
1533 | (do_data_tag ? (1 << 29) : 0); | ||
1534 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
1535 | brq->mrq.sbc = &brq->sbc; | ||
1536 | } | ||
1537 | |||
1538 | mmc_set_data_timeout(&brq->data, card); | ||
1539 | |||
1540 | brq->data.sg = mqrq->sg; | ||
1541 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); | ||
1542 | |||
1543 | /* | ||
1544 | * Adjust the sg list so it is the same size as the | ||
1545 | * request. | ||
1546 | */ | ||
1547 | if (brq->data.blocks != blk_rq_sectors(req)) { | ||
1548 | int i, data_size = brq->data.blocks << 9; | ||
1549 | struct scatterlist *sg; | ||
1550 | |||
1551 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { | ||
1552 | data_size -= sg->length; | ||
1553 | if (data_size <= 0) { | ||
1554 | sg->length += data_size; | ||
1555 | i++; | ||
1556 | break; | ||
1557 | } | ||
1558 | } | ||
1559 | brq->data.sg_len = i; | ||
1560 | } | ||
1561 | |||
1562 | mqrq->mmc_active.mrq = &brq->mrq; | ||
1563 | mqrq->mmc_active.err_check = mmc_blk_err_check; | ||
1564 | |||
1565 | mmc_queue_bounce_pre(mqrq); | ||
1566 | } | ||
1567 | |||
1568 | static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, | ||
1569 | struct mmc_blk_request *brq, struct request *req, | ||
1570 | int ret) | ||
1571 | { | ||
1572 | struct mmc_queue_req *mq_rq; | ||
1573 | mq_rq = container_of(brq, struct mmc_queue_req, brq); | ||
1574 | |||
1575 | /* | ||
1576 | * If this is an SD card and we're writing, we can first | ||
1577 | * mark the known good sectors as ok. | ||
1578 | * | ||
1579 | * If the card is not SD, we can still ok written sectors | ||
1580 | * as reported by the controller (which might be less than | ||
1581 | * the real number of written sectors, but never more). | ||
1582 | */ | ||
1583 | if (mmc_card_sd(card)) { | ||
1584 | u32 blocks; | ||
1585 | |||
1586 | blocks = mmc_sd_num_wr_blocks(card); | ||
1587 | if (blocks != (u32)-1) { | ||
1588 | ret = blk_end_request(req, 0, blocks << 9); | ||
1589 | } | ||
1590 | } else { | ||
1591 | ret = blk_end_request(req, 0, brq->data.bytes_xfered); | ||
1592 | } | ||
1593 | return ret; | ||
1594 | } | ||
1595 | |||
1596 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | ||
1597 | { | ||
1598 | struct mmc_blk_data *md = mq->blkdata; | ||
1599 | struct mmc_card *card = md->queue.card; | ||
1600 | struct mmc_blk_request *brq; | ||
1601 | int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0; | ||
1602 | enum mmc_blk_status status; | ||
1603 | struct mmc_queue_req *mq_rq; | ||
1604 | struct request *req; | ||
1605 | struct mmc_async_req *areq; | ||
1606 | |||
1607 | if (!rqc && !mq->mqrq_prev->req) | ||
1608 | return 0; | ||
1609 | |||
1610 | do { | ||
1611 | if (rqc) { | ||
1612 | /* | ||
1613 | * When 4KB native sector is enabled, only 8 blocks | ||
1614 | * multiple read or write is allowed | ||
1615 | */ | ||
1616 | if (mmc_large_sector(card) && | ||
1617 | !IS_ALIGNED(blk_rq_sectors(rqc), 8)) { | ||
1618 | pr_err("%s: Transfer size is not 4KB sector size aligned\n", | ||
1619 | rqc->rq_disk->disk_name); | ||
1620 | mq_rq = mq->mqrq_cur; | ||
1621 | req = rqc; | ||
1622 | rqc = NULL; | ||
1623 | goto cmd_abort; | ||
1624 | } | ||
1625 | |||
1626 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | ||
1627 | areq = &mq->mqrq_cur->mmc_active; | ||
1628 | } else | ||
1629 | areq = NULL; | ||
1630 | areq = mmc_start_req(card->host, areq, &status); | ||
1631 | if (!areq) { | ||
1632 | if (status == MMC_BLK_NEW_REQUEST) | ||
1633 | mq->flags |= MMC_QUEUE_NEW_REQUEST; | ||
1634 | return 0; | ||
1635 | } | ||
1636 | |||
1637 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); | ||
1638 | brq = &mq_rq->brq; | ||
1639 | req = mq_rq->req; | ||
1640 | type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | ||
1641 | mmc_queue_bounce_post(mq_rq); | ||
1642 | |||
1643 | switch (status) { | ||
1644 | case MMC_BLK_SUCCESS: | ||
1645 | case MMC_BLK_PARTIAL: | ||
1646 | /* | ||
1647 | * A block was successfully transferred. | ||
1648 | */ | ||
1649 | mmc_blk_reset_success(md, type); | ||
1650 | |||
1651 | ret = blk_end_request(req, 0, | ||
1652 | brq->data.bytes_xfered); | ||
1653 | |||
1654 | /* | ||
1655 | * If the blk_end_request function returns non-zero even | ||
1656 | * though all data has been transferred and no errors | ||
1657 | * were returned by the host controller, it's a bug. | ||
1658 | */ | ||
1659 | if (status == MMC_BLK_SUCCESS && ret) { | ||
1660 | pr_err("%s BUG rq_tot %d d_xfer %d\n", | ||
1661 | __func__, blk_rq_bytes(req), | ||
1662 | brq->data.bytes_xfered); | ||
1663 | rqc = NULL; | ||
1664 | goto cmd_abort; | ||
1665 | } | ||
1666 | break; | ||
1667 | case MMC_BLK_CMD_ERR: | ||
1668 | ret = mmc_blk_cmd_err(md, card, brq, req, ret); | ||
1669 | if (mmc_blk_reset(md, card->host, type)) | ||
1670 | goto cmd_abort; | ||
1671 | if (!ret) | ||
1672 | goto start_new_req; | ||
1673 | break; | ||
1674 | case MMC_BLK_RETRY: | ||
1675 | retune_retry_done = brq->retune_retry_done; | ||
1676 | if (retry++ < 5) | ||
1677 | break; | ||
1678 | /* Fall through */ | ||
1679 | case MMC_BLK_ABORT: | ||
1680 | if (!mmc_blk_reset(md, card->host, type)) | ||
1681 | break; | ||
1682 | goto cmd_abort; | ||
1683 | case MMC_BLK_DATA_ERR: { | ||
1684 | int err; | ||
1685 | |||
1686 | err = mmc_blk_reset(md, card->host, type); | ||
1687 | if (!err) | ||
1688 | break; | ||
1689 | if (err == -ENODEV) | ||
1690 | goto cmd_abort; | ||
1691 | /* Fall through */ | ||
1692 | } | ||
1693 | case MMC_BLK_ECC_ERR: | ||
1694 | if (brq->data.blocks > 1) { | ||
1695 | /* Redo read one sector at a time */ | ||
1696 | pr_warn("%s: retrying using single block read\n", | ||
1697 | req->rq_disk->disk_name); | ||
1698 | disable_multi = 1; | ||
1699 | break; | ||
1700 | } | ||
1701 | /* | ||
1702 | * After an error, we redo I/O one sector at a | ||
1703 | * time, so we only reach here after trying to | ||
1704 | * read a single sector. | ||
1705 | */ | ||
1706 | ret = blk_end_request(req, -EIO, | ||
1707 | brq->data.blksz); | ||
1708 | if (!ret) | ||
1709 | goto start_new_req; | ||
1710 | break; | ||
1711 | case MMC_BLK_NOMEDIUM: | ||
1712 | goto cmd_abort; | ||
1713 | default: | ||
1714 | pr_err("%s: Unhandled return value (%d)", | ||
1715 | req->rq_disk->disk_name, status); | ||
1716 | goto cmd_abort; | ||
1717 | } | ||
1718 | |||
1719 | if (ret) { | ||
1720 | /* | ||
1721 | * In case of a incomplete request | ||
1722 | * prepare it again and resend. | ||
1723 | */ | ||
1724 | mmc_blk_rw_rq_prep(mq_rq, card, | ||
1725 | disable_multi, mq); | ||
1726 | mmc_start_req(card->host, | ||
1727 | &mq_rq->mmc_active, NULL); | ||
1728 | mq_rq->brq.retune_retry_done = retune_retry_done; | ||
1729 | } | ||
1730 | } while (ret); | ||
1731 | |||
1732 | return 1; | ||
1733 | |||
1734 | cmd_abort: | ||
1735 | if (mmc_card_removed(card)) | ||
1736 | req->cmd_flags |= REQ_QUIET; | ||
1737 | while (ret) | ||
1738 | ret = blk_end_request(req, -EIO, | ||
1739 | blk_rq_cur_bytes(req)); | ||
1740 | |||
1741 | start_new_req: | ||
1742 | if (rqc) { | ||
1743 | if (mmc_card_removed(card)) { | ||
1744 | rqc->cmd_flags |= REQ_QUIET; | ||
1745 | blk_end_request_all(rqc, -EIO); | ||
1746 | } else { | ||
1747 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | ||
1748 | mmc_start_req(card->host, | ||
1749 | &mq->mqrq_cur->mmc_active, NULL); | ||
1750 | } | ||
1751 | } | ||
1752 | |||
1753 | return 0; | ||
1754 | } | ||
1755 | |||
1756 | int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | ||
1757 | { | ||
1758 | int ret; | ||
1759 | struct mmc_blk_data *md = mq->blkdata; | ||
1760 | struct mmc_card *card = md->queue.card; | ||
1761 | bool req_is_special = mmc_req_is_special(req); | ||
1762 | |||
1763 | if (req && !mq->mqrq_prev->req) | ||
1764 | /* claim host only for the first request */ | ||
1765 | mmc_get_card(card); | ||
1766 | |||
1767 | ret = mmc_blk_part_switch(card, md); | ||
1768 | if (ret) { | ||
1769 | if (req) { | ||
1770 | blk_end_request_all(req, -EIO); | ||
1771 | } | ||
1772 | ret = 0; | ||
1773 | goto out; | ||
1774 | } | ||
1775 | |||
1776 | mq->flags &= ~MMC_QUEUE_NEW_REQUEST; | ||
1777 | if (req && req_op(req) == REQ_OP_DISCARD) { | ||
1778 | /* complete ongoing async transfer before issuing discard */ | ||
1779 | if (card->host->areq) | ||
1780 | mmc_blk_issue_rw_rq(mq, NULL); | ||
1781 | ret = mmc_blk_issue_discard_rq(mq, req); | ||
1782 | } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) { | ||
1783 | /* complete ongoing async transfer before issuing secure erase*/ | ||
1784 | if (card->host->areq) | ||
1785 | mmc_blk_issue_rw_rq(mq, NULL); | ||
1786 | ret = mmc_blk_issue_secdiscard_rq(mq, req); | ||
1787 | } else if (req && req_op(req) == REQ_OP_FLUSH) { | ||
1788 | /* complete ongoing async transfer before issuing flush */ | ||
1789 | if (card->host->areq) | ||
1790 | mmc_blk_issue_rw_rq(mq, NULL); | ||
1791 | ret = mmc_blk_issue_flush(mq, req); | ||
1792 | } else { | ||
1793 | ret = mmc_blk_issue_rw_rq(mq, req); | ||
1794 | } | ||
1795 | |||
1796 | out: | ||
1797 | if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special) | ||
1798 | /* | ||
1799 | * Release host when there are no more requests | ||
1800 | * and after special request(discard, flush) is done. | ||
1801 | * In case sepecial request, there is no reentry to | ||
1802 | * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. | ||
1803 | */ | ||
1804 | mmc_put_card(card); | ||
1805 | return ret; | ||
1806 | } | ||
1807 | |||
1808 | static inline int mmc_blk_readonly(struct mmc_card *card) | ||
1809 | { | ||
1810 | return mmc_card_readonly(card) || | ||
1811 | !(card->csd.cmdclass & CCC_BLOCK_WRITE); | ||
1812 | } | ||
1813 | |||
1814 | static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, | ||
1815 | struct device *parent, | ||
1816 | sector_t size, | ||
1817 | bool default_ro, | ||
1818 | const char *subname, | ||
1819 | int area_type) | ||
1820 | { | ||
1821 | struct mmc_blk_data *md; | ||
1822 | int devidx, ret; | ||
1823 | |||
1824 | again: | ||
1825 | if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL)) | ||
1826 | return ERR_PTR(-ENOMEM); | ||
1827 | |||
1828 | spin_lock(&mmc_blk_lock); | ||
1829 | ret = ida_get_new(&mmc_blk_ida, &devidx); | ||
1830 | spin_unlock(&mmc_blk_lock); | ||
1831 | |||
1832 | if (ret == -EAGAIN) | ||
1833 | goto again; | ||
1834 | else if (ret) | ||
1835 | return ERR_PTR(ret); | ||
1836 | |||
1837 | if (devidx >= max_devices) { | ||
1838 | ret = -ENOSPC; | ||
1839 | goto out; | ||
1840 | } | ||
1841 | |||
1842 | md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); | ||
1843 | if (!md) { | ||
1844 | ret = -ENOMEM; | ||
1845 | goto out; | ||
1846 | } | ||
1847 | |||
1848 | md->area_type = area_type; | ||
1849 | |||
1850 | /* | ||
1851 | * Set the read-only status based on the supported commands | ||
1852 | * and the write protect switch. | ||
1853 | */ | ||
1854 | md->read_only = mmc_blk_readonly(card); | ||
1855 | |||
1856 | md->disk = alloc_disk(perdev_minors); | ||
1857 | if (md->disk == NULL) { | ||
1858 | ret = -ENOMEM; | ||
1859 | goto err_kfree; | ||
1860 | } | ||
1861 | |||
1862 | spin_lock_init(&md->lock); | ||
1863 | INIT_LIST_HEAD(&md->part); | ||
1864 | md->usage = 1; | ||
1865 | |||
1866 | ret = mmc_init_queue(&md->queue, card, &md->lock, subname); | ||
1867 | if (ret) | ||
1868 | goto err_putdisk; | ||
1869 | |||
1870 | md->queue.blkdata = md; | ||
1871 | |||
1872 | md->disk->major = MMC_BLOCK_MAJOR; | ||
1873 | md->disk->first_minor = devidx * perdev_minors; | ||
1874 | md->disk->fops = &mmc_bdops; | ||
1875 | md->disk->private_data = md; | ||
1876 | md->disk->queue = md->queue.queue; | ||
1877 | md->parent = parent; | ||
1878 | set_disk_ro(md->disk, md->read_only || default_ro); | ||
1879 | md->disk->flags = GENHD_FL_EXT_DEVT; | ||
1880 | if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) | ||
1881 | md->disk->flags |= GENHD_FL_NO_PART_SCAN; | ||
1882 | |||
1883 | /* | ||
1884 | * As discussed on lkml, GENHD_FL_REMOVABLE should: | ||
1885 | * | ||
1886 | * - be set for removable media with permanent block devices | ||
1887 | * - be unset for removable block devices with permanent media | ||
1888 | * | ||
1889 | * Since MMC block devices clearly fall under the second | ||
1890 | * case, we do not set GENHD_FL_REMOVABLE. Userspace | ||
1891 | * should use the block device creation/destruction hotplug | ||
1892 | * messages to tell when the card is present. | ||
1893 | */ | ||
1894 | |||
1895 | snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), | ||
1896 | "mmcblk%u%s", card->host->index, subname ? subname : ""); | ||
1897 | |||
1898 | if (mmc_card_mmc(card)) | ||
1899 | blk_queue_logical_block_size(md->queue.queue, | ||
1900 | card->ext_csd.data_sector_size); | ||
1901 | else | ||
1902 | blk_queue_logical_block_size(md->queue.queue, 512); | ||
1903 | |||
1904 | set_capacity(md->disk, size); | ||
1905 | |||
1906 | if (mmc_host_cmd23(card->host)) { | ||
1907 | if ((mmc_card_mmc(card) && | ||
1908 | card->csd.mmca_vsn >= CSD_SPEC_VER_3) || | ||
1909 | (mmc_card_sd(card) && | ||
1910 | card->scr.cmds & SD_SCR_CMD23_SUPPORT)) | ||
1911 | md->flags |= MMC_BLK_CMD23; | ||
1912 | } | ||
1913 | |||
1914 | if (mmc_card_mmc(card) && | ||
1915 | md->flags & MMC_BLK_CMD23 && | ||
1916 | ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || | ||
1917 | card->ext_csd.rel_sectors)) { | ||
1918 | md->flags |= MMC_BLK_REL_WR; | ||
1919 | blk_queue_write_cache(md->queue.queue, true, true); | ||
1920 | } | ||
1921 | |||
1922 | return md; | ||
1923 | |||
1924 | err_putdisk: | ||
1925 | put_disk(md->disk); | ||
1926 | err_kfree: | ||
1927 | kfree(md); | ||
1928 | out: | ||
1929 | spin_lock(&mmc_blk_lock); | ||
1930 | ida_remove(&mmc_blk_ida, devidx); | ||
1931 | spin_unlock(&mmc_blk_lock); | ||
1932 | return ERR_PTR(ret); | ||
1933 | } | ||
1934 | |||
1935 | static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) | ||
1936 | { | ||
1937 | sector_t size; | ||
1938 | |||
1939 | if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { | ||
1940 | /* | ||
1941 | * The EXT_CSD sector count is in number or 512 byte | ||
1942 | * sectors. | ||
1943 | */ | ||
1944 | size = card->ext_csd.sectors; | ||
1945 | } else { | ||
1946 | /* | ||
1947 | * The CSD capacity field is in units of read_blkbits. | ||
1948 | * set_capacity takes units of 512 bytes. | ||
1949 | */ | ||
1950 | size = (typeof(sector_t))card->csd.capacity | ||
1951 | << (card->csd.read_blkbits - 9); | ||
1952 | } | ||
1953 | |||
1954 | return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, | ||
1955 | MMC_BLK_DATA_AREA_MAIN); | ||
1956 | } | ||
1957 | |||
1958 | static int mmc_blk_alloc_part(struct mmc_card *card, | ||
1959 | struct mmc_blk_data *md, | ||
1960 | unsigned int part_type, | ||
1961 | sector_t size, | ||
1962 | bool default_ro, | ||
1963 | const char *subname, | ||
1964 | int area_type) | ||
1965 | { | ||
1966 | char cap_str[10]; | ||
1967 | struct mmc_blk_data *part_md; | ||
1968 | |||
1969 | part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, | ||
1970 | subname, area_type); | ||
1971 | if (IS_ERR(part_md)) | ||
1972 | return PTR_ERR(part_md); | ||
1973 | part_md->part_type = part_type; | ||
1974 | list_add(&part_md->part, &md->part); | ||
1975 | |||
1976 | string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2, | ||
1977 | cap_str, sizeof(cap_str)); | ||
1978 | pr_info("%s: %s %s partition %u %s\n", | ||
1979 | part_md->disk->disk_name, mmc_card_id(card), | ||
1980 | mmc_card_name(card), part_md->part_type, cap_str); | ||
1981 | return 0; | ||
1982 | } | ||
1983 | |||
1984 | /* MMC Physical partitions consist of two boot partitions and | ||
1985 | * up to four general purpose partitions. | ||
1986 | * For each partition enabled in EXT_CSD a block device will be allocatedi | ||
1987 | * to provide access to the partition. | ||
1988 | */ | ||
1989 | |||
1990 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) | ||
1991 | { | ||
1992 | int idx, ret = 0; | ||
1993 | |||
1994 | if (!mmc_card_mmc(card)) | ||
1995 | return 0; | ||
1996 | |||
1997 | for (idx = 0; idx < card->nr_parts; idx++) { | ||
1998 | if (card->part[idx].size) { | ||
1999 | ret = mmc_blk_alloc_part(card, md, | ||
2000 | card->part[idx].part_cfg, | ||
2001 | card->part[idx].size >> 9, | ||
2002 | card->part[idx].force_ro, | ||
2003 | card->part[idx].name, | ||
2004 | card->part[idx].area_type); | ||
2005 | if (ret) | ||
2006 | return ret; | ||
2007 | } | ||
2008 | } | ||
2009 | |||
2010 | return ret; | ||
2011 | } | ||
2012 | |||
2013 | static void mmc_blk_remove_req(struct mmc_blk_data *md) | ||
2014 | { | ||
2015 | struct mmc_card *card; | ||
2016 | |||
2017 | if (md) { | ||
2018 | /* | ||
2019 | * Flush remaining requests and free queues. It | ||
2020 | * is freeing the queue that stops new requests | ||
2021 | * from being accepted. | ||
2022 | */ | ||
2023 | card = md->queue.card; | ||
2024 | mmc_cleanup_queue(&md->queue); | ||
2025 | if (md->disk->flags & GENHD_FL_UP) { | ||
2026 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | ||
2027 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && | ||
2028 | card->ext_csd.boot_ro_lockable) | ||
2029 | device_remove_file(disk_to_dev(md->disk), | ||
2030 | &md->power_ro_lock); | ||
2031 | |||
2032 | del_gendisk(md->disk); | ||
2033 | } | ||
2034 | mmc_blk_put(md); | ||
2035 | } | ||
2036 | } | ||
2037 | |||
2038 | static void mmc_blk_remove_parts(struct mmc_card *card, | ||
2039 | struct mmc_blk_data *md) | ||
2040 | { | ||
2041 | struct list_head *pos, *q; | ||
2042 | struct mmc_blk_data *part_md; | ||
2043 | |||
2044 | list_for_each_safe(pos, q, &md->part) { | ||
2045 | part_md = list_entry(pos, struct mmc_blk_data, part); | ||
2046 | list_del(pos); | ||
2047 | mmc_blk_remove_req(part_md); | ||
2048 | } | ||
2049 | } | ||
2050 | |||
2051 | static int mmc_add_disk(struct mmc_blk_data *md) | ||
2052 | { | ||
2053 | int ret; | ||
2054 | struct mmc_card *card = md->queue.card; | ||
2055 | |||
2056 | device_add_disk(md->parent, md->disk); | ||
2057 | md->force_ro.show = force_ro_show; | ||
2058 | md->force_ro.store = force_ro_store; | ||
2059 | sysfs_attr_init(&md->force_ro.attr); | ||
2060 | md->force_ro.attr.name = "force_ro"; | ||
2061 | md->force_ro.attr.mode = S_IRUGO | S_IWUSR; | ||
2062 | ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); | ||
2063 | if (ret) | ||
2064 | goto force_ro_fail; | ||
2065 | |||
2066 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && | ||
2067 | card->ext_csd.boot_ro_lockable) { | ||
2068 | umode_t mode; | ||
2069 | |||
2070 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) | ||
2071 | mode = S_IRUGO; | ||
2072 | else | ||
2073 | mode = S_IRUGO | S_IWUSR; | ||
2074 | |||
2075 | md->power_ro_lock.show = power_ro_lock_show; | ||
2076 | md->power_ro_lock.store = power_ro_lock_store; | ||
2077 | sysfs_attr_init(&md->power_ro_lock.attr); | ||
2078 | md->power_ro_lock.attr.mode = mode; | ||
2079 | md->power_ro_lock.attr.name = | ||
2080 | "ro_lock_until_next_power_on"; | ||
2081 | ret = device_create_file(disk_to_dev(md->disk), | ||
2082 | &md->power_ro_lock); | ||
2083 | if (ret) | ||
2084 | goto power_ro_lock_fail; | ||
2085 | } | ||
2086 | return ret; | ||
2087 | |||
2088 | power_ro_lock_fail: | ||
2089 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | ||
2090 | force_ro_fail: | ||
2091 | del_gendisk(md->disk); | ||
2092 | |||
2093 | return ret; | ||
2094 | } | ||
2095 | |||
2096 | static const struct mmc_fixup blk_fixups[] = | ||
2097 | { | ||
2098 | MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, | ||
2099 | MMC_QUIRK_INAND_CMD38), | ||
2100 | MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, | ||
2101 | MMC_QUIRK_INAND_CMD38), | ||
2102 | MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, | ||
2103 | MMC_QUIRK_INAND_CMD38), | ||
2104 | MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, | ||
2105 | MMC_QUIRK_INAND_CMD38), | ||
2106 | MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, | ||
2107 | MMC_QUIRK_INAND_CMD38), | ||
2108 | |||
2109 | /* | ||
2110 | * Some MMC cards experience performance degradation with CMD23 | ||
2111 | * instead of CMD12-bounded multiblock transfers. For now we'll | ||
2112 | * black list what's bad... | ||
2113 | * - Certain Toshiba cards. | ||
2114 | * | ||
2115 | * N.B. This doesn't affect SD cards. | ||
2116 | */ | ||
2117 | MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, | ||
2118 | MMC_QUIRK_BLK_NO_CMD23), | ||
2119 | MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, | ||
2120 | MMC_QUIRK_BLK_NO_CMD23), | ||
2121 | MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, | ||
2122 | MMC_QUIRK_BLK_NO_CMD23), | ||
2123 | MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, | ||
2124 | MMC_QUIRK_BLK_NO_CMD23), | ||
2125 | MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, | ||
2126 | MMC_QUIRK_BLK_NO_CMD23), | ||
2127 | |||
2128 | /* | ||
2129 | * Some MMC cards need longer data read timeout than indicated in CSD. | ||
2130 | */ | ||
2131 | MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, | ||
2132 | MMC_QUIRK_LONG_READ_TIME), | ||
2133 | MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, | ||
2134 | MMC_QUIRK_LONG_READ_TIME), | ||
2135 | |||
2136 | /* | ||
2137 | * On these Samsung MoviNAND parts, performing secure erase or | ||
2138 | * secure trim can result in unrecoverable corruption due to a | ||
2139 | * firmware bug. | ||
2140 | */ | ||
2141 | MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
2142 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
2143 | MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
2144 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
2145 | MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
2146 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
2147 | MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
2148 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
2149 | MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
2150 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
2151 | MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
2152 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
2153 | MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
2154 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
2155 | MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
2156 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
2157 | |||
2158 | /* | ||
2159 | * On Some Kingston eMMCs, performing trim can result in | ||
2160 | * unrecoverable data conrruption occasionally due to a firmware bug. | ||
2161 | */ | ||
2162 | MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc, | ||
2163 | MMC_QUIRK_TRIM_BROKEN), | ||
2164 | MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc, | ||
2165 | MMC_QUIRK_TRIM_BROKEN), | ||
2166 | |||
2167 | END_FIXUP | ||
2168 | }; | ||
2169 | |||
2170 | static int mmc_blk_probe(struct mmc_card *card) | ||
2171 | { | ||
2172 | struct mmc_blk_data *md, *part_md; | ||
2173 | char cap_str[10]; | ||
2174 | |||
2175 | /* | ||
2176 | * Check that the card supports the command class(es) we need. | ||
2177 | */ | ||
2178 | if (!(card->csd.cmdclass & CCC_BLOCK_READ)) | ||
2179 | return -ENODEV; | ||
2180 | |||
2181 | mmc_fixup_device(card, blk_fixups); | ||
2182 | |||
2183 | md = mmc_blk_alloc(card); | ||
2184 | if (IS_ERR(md)) | ||
2185 | return PTR_ERR(md); | ||
2186 | |||
2187 | string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, | ||
2188 | cap_str, sizeof(cap_str)); | ||
2189 | pr_info("%s: %s %s %s %s\n", | ||
2190 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), | ||
2191 | cap_str, md->read_only ? "(ro)" : ""); | ||
2192 | |||
2193 | if (mmc_blk_alloc_parts(card, md)) | ||
2194 | goto out; | ||
2195 | |||
2196 | dev_set_drvdata(&card->dev, md); | ||
2197 | |||
2198 | if (mmc_add_disk(md)) | ||
2199 | goto out; | ||
2200 | |||
2201 | list_for_each_entry(part_md, &md->part, part) { | ||
2202 | if (mmc_add_disk(part_md)) | ||
2203 | goto out; | ||
2204 | } | ||
2205 | |||
2206 | pm_runtime_set_autosuspend_delay(&card->dev, 3000); | ||
2207 | pm_runtime_use_autosuspend(&card->dev); | ||
2208 | |||
2209 | /* | ||
2210 | * Don't enable runtime PM for SD-combo cards here. Leave that | ||
2211 | * decision to be taken during the SDIO init sequence instead. | ||
2212 | */ | ||
2213 | if (card->type != MMC_TYPE_SD_COMBO) { | ||
2214 | pm_runtime_set_active(&card->dev); | ||
2215 | pm_runtime_enable(&card->dev); | ||
2216 | } | ||
2217 | |||
2218 | return 0; | ||
2219 | |||
2220 | out: | ||
2221 | mmc_blk_remove_parts(card, md); | ||
2222 | mmc_blk_remove_req(md); | ||
2223 | return 0; | ||
2224 | } | ||
2225 | |||
2226 | static void mmc_blk_remove(struct mmc_card *card) | ||
2227 | { | ||
2228 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); | ||
2229 | |||
2230 | mmc_blk_remove_parts(card, md); | ||
2231 | pm_runtime_get_sync(&card->dev); | ||
2232 | mmc_claim_host(card->host); | ||
2233 | mmc_blk_part_switch(card, md); | ||
2234 | mmc_release_host(card->host); | ||
2235 | if (card->type != MMC_TYPE_SD_COMBO) | ||
2236 | pm_runtime_disable(&card->dev); | ||
2237 | pm_runtime_put_noidle(&card->dev); | ||
2238 | mmc_blk_remove_req(md); | ||
2239 | dev_set_drvdata(&card->dev, NULL); | ||
2240 | } | ||
2241 | |||
2242 | static int _mmc_blk_suspend(struct mmc_card *card) | ||
2243 | { | ||
2244 | struct mmc_blk_data *part_md; | ||
2245 | struct mmc_blk_data *md = dev_get_drvdata(&card->dev); | ||
2246 | |||
2247 | if (md) { | ||
2248 | mmc_queue_suspend(&md->queue); | ||
2249 | list_for_each_entry(part_md, &md->part, part) { | ||
2250 | mmc_queue_suspend(&part_md->queue); | ||
2251 | } | ||
2252 | } | ||
2253 | return 0; | ||
2254 | } | ||
2255 | |||
2256 | static void mmc_blk_shutdown(struct mmc_card *card) | ||
2257 | { | ||
2258 | _mmc_blk_suspend(card); | ||
2259 | } | ||
2260 | |||
2261 | #ifdef CONFIG_PM_SLEEP | ||
2262 | static int mmc_blk_suspend(struct device *dev) | ||
2263 | { | ||
2264 | struct mmc_card *card = mmc_dev_to_card(dev); | ||
2265 | |||
2266 | return _mmc_blk_suspend(card); | ||
2267 | } | ||
2268 | |||
2269 | static int mmc_blk_resume(struct device *dev) | ||
2270 | { | ||
2271 | struct mmc_blk_data *part_md; | ||
2272 | struct mmc_blk_data *md = dev_get_drvdata(dev); | ||
2273 | |||
2274 | if (md) { | ||
2275 | /* | ||
2276 | * Resume involves the card going into idle state, | ||
2277 | * so current partition is always the main one. | ||
2278 | */ | ||
2279 | md->part_curr = md->part_type; | ||
2280 | mmc_queue_resume(&md->queue); | ||
2281 | list_for_each_entry(part_md, &md->part, part) { | ||
2282 | mmc_queue_resume(&part_md->queue); | ||
2283 | } | ||
2284 | } | ||
2285 | return 0; | ||
2286 | } | ||
2287 | #endif | ||
2288 | |||
2289 | static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); | ||
2290 | |||
2291 | static struct mmc_driver mmc_driver = { | ||
2292 | .drv = { | ||
2293 | .name = "mmcblk", | ||
2294 | .pm = &mmc_blk_pm_ops, | ||
2295 | }, | ||
2296 | .probe = mmc_blk_probe, | ||
2297 | .remove = mmc_blk_remove, | ||
2298 | .shutdown = mmc_blk_shutdown, | ||
2299 | }; | ||
2300 | |||
2301 | static int __init mmc_blk_init(void) | ||
2302 | { | ||
2303 | int res; | ||
2304 | |||
2305 | if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) | ||
2306 | pr_info("mmcblk: using %d minors per device\n", perdev_minors); | ||
2307 | |||
2308 | max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); | ||
2309 | |||
2310 | res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); | ||
2311 | if (res) | ||
2312 | goto out; | ||
2313 | |||
2314 | res = mmc_register_driver(&mmc_driver); | ||
2315 | if (res) | ||
2316 | goto out2; | ||
2317 | |||
2318 | return 0; | ||
2319 | out2: | ||
2320 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); | ||
2321 | out: | ||
2322 | return res; | ||
2323 | } | ||
2324 | |||
2325 | static void __exit mmc_blk_exit(void) | ||
2326 | { | ||
2327 | mmc_unregister_driver(&mmc_driver); | ||
2328 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); | ||
2329 | } | ||
2330 | |||
2331 | module_init(mmc_blk_init); | ||
2332 | module_exit(mmc_blk_exit); | ||
2333 | |||
2334 | MODULE_LICENSE("GPL"); | ||
2335 | MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); | ||
2336 | |||