diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-08-06 12:48:31 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-08-06 12:48:31 -0400 |
commit | c87985a3ce723995fc7b25e598238d67154108a1 (patch) | |
tree | e60def1b77c25c1d74180f62e8a5603f9826f209 /drivers/mmc/card | |
parent | d155255a344c417acad74156654295a2964e6b81 (diff) | |
parent | 0d7614f09c1ebdbaa1599a5aba7593f147bf96ee (diff) |
Merge tty-next into 3.6-rc1
This handles the merge issue in:
arch/um/drivers/line.c
arch/um/drivers/line.h
And resolves the duplicate patches that were in both trees do to the
tty-next branch not getting merged into 3.6-rc1.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r-- | drivers/mmc/card/block.c | 36 |
1 files changed, 9 insertions, 27 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 276d21ce6bc1..f1c84decb192 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -850,9 +850,7 @@ out: | |||
850 | goto retry; | 850 | goto retry; |
851 | if (!err) | 851 | if (!err) |
852 | mmc_blk_reset_success(md, type); | 852 | mmc_blk_reset_success(md, type); |
853 | spin_lock_irq(&md->lock); | 853 | blk_end_request(req, err, blk_rq_bytes(req)); |
854 | __blk_end_request(req, err, blk_rq_bytes(req)); | ||
855 | spin_unlock_irq(&md->lock); | ||
856 | 854 | ||
857 | return err ? 0 : 1; | 855 | return err ? 0 : 1; |
858 | } | 856 | } |
@@ -934,9 +932,7 @@ out_retry: | |||
934 | if (!err) | 932 | if (!err) |
935 | mmc_blk_reset_success(md, type); | 933 | mmc_blk_reset_success(md, type); |
936 | out: | 934 | out: |
937 | spin_lock_irq(&md->lock); | 935 | blk_end_request(req, err, blk_rq_bytes(req)); |
938 | __blk_end_request(req, err, blk_rq_bytes(req)); | ||
939 | spin_unlock_irq(&md->lock); | ||
940 | 936 | ||
941 | return err ? 0 : 1; | 937 | return err ? 0 : 1; |
942 | } | 938 | } |
@@ -951,9 +947,7 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) | |||
951 | if (ret) | 947 | if (ret) |
952 | ret = -EIO; | 948 | ret = -EIO; |
953 | 949 | ||
954 | spin_lock_irq(&md->lock); | 950 | blk_end_request_all(req, ret); |
955 | __blk_end_request_all(req, ret); | ||
956 | spin_unlock_irq(&md->lock); | ||
957 | 951 | ||
958 | return ret ? 0 : 1; | 952 | return ret ? 0 : 1; |
959 | } | 953 | } |
@@ -1252,14 +1246,10 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, | |||
1252 | 1246 | ||
1253 | blocks = mmc_sd_num_wr_blocks(card); | 1247 | blocks = mmc_sd_num_wr_blocks(card); |
1254 | if (blocks != (u32)-1) { | 1248 | if (blocks != (u32)-1) { |
1255 | spin_lock_irq(&md->lock); | 1249 | ret = blk_end_request(req, 0, blocks << 9); |
1256 | ret = __blk_end_request(req, 0, blocks << 9); | ||
1257 | spin_unlock_irq(&md->lock); | ||
1258 | } | 1250 | } |
1259 | } else { | 1251 | } else { |
1260 | spin_lock_irq(&md->lock); | 1252 | ret = blk_end_request(req, 0, brq->data.bytes_xfered); |
1261 | ret = __blk_end_request(req, 0, brq->data.bytes_xfered); | ||
1262 | spin_unlock_irq(&md->lock); | ||
1263 | } | 1253 | } |
1264 | return ret; | 1254 | return ret; |
1265 | } | 1255 | } |
@@ -1311,10 +1301,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1311 | * A block was successfully transferred. | 1301 | * A block was successfully transferred. |
1312 | */ | 1302 | */ |
1313 | mmc_blk_reset_success(md, type); | 1303 | mmc_blk_reset_success(md, type); |
1314 | spin_lock_irq(&md->lock); | 1304 | ret = blk_end_request(req, 0, |
1315 | ret = __blk_end_request(req, 0, | ||
1316 | brq->data.bytes_xfered); | 1305 | brq->data.bytes_xfered); |
1317 | spin_unlock_irq(&md->lock); | ||
1318 | /* | 1306 | /* |
1319 | * If the blk_end_request function returns non-zero even | 1307 | * If the blk_end_request function returns non-zero even |
1320 | * though all data has been transferred and no errors | 1308 | * though all data has been transferred and no errors |
@@ -1364,10 +1352,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1364 | * time, so we only reach here after trying to | 1352 | * time, so we only reach here after trying to |
1365 | * read a single sector. | 1353 | * read a single sector. |
1366 | */ | 1354 | */ |
1367 | spin_lock_irq(&md->lock); | 1355 | ret = blk_end_request(req, -EIO, |
1368 | ret = __blk_end_request(req, -EIO, | ||
1369 | brq->data.blksz); | 1356 | brq->data.blksz); |
1370 | spin_unlock_irq(&md->lock); | ||
1371 | if (!ret) | 1357 | if (!ret) |
1372 | goto start_new_req; | 1358 | goto start_new_req; |
1373 | break; | 1359 | break; |
@@ -1388,12 +1374,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1388 | return 1; | 1374 | return 1; |
1389 | 1375 | ||
1390 | cmd_abort: | 1376 | cmd_abort: |
1391 | spin_lock_irq(&md->lock); | ||
1392 | if (mmc_card_removed(card)) | 1377 | if (mmc_card_removed(card)) |
1393 | req->cmd_flags |= REQ_QUIET; | 1378 | req->cmd_flags |= REQ_QUIET; |
1394 | while (ret) | 1379 | while (ret) |
1395 | ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); | 1380 | ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); |
1396 | spin_unlock_irq(&md->lock); | ||
1397 | 1381 | ||
1398 | start_new_req: | 1382 | start_new_req: |
1399 | if (rqc) { | 1383 | if (rqc) { |
@@ -1417,9 +1401,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1417 | ret = mmc_blk_part_switch(card, md); | 1401 | ret = mmc_blk_part_switch(card, md); |
1418 | if (ret) { | 1402 | if (ret) { |
1419 | if (req) { | 1403 | if (req) { |
1420 | spin_lock_irq(&md->lock); | 1404 | blk_end_request_all(req, -EIO); |
1421 | __blk_end_request_all(req, -EIO); | ||
1422 | spin_unlock_irq(&md->lock); | ||
1423 | } | 1405 | } |
1424 | ret = 0; | 1406 | ret = 0; |
1425 | goto out; | 1407 | goto out; |