diff options
author | Kevin Hilman <khilman@linaro.org> | 2014-09-05 11:01:52 -0400 |
---|---|---|
committer | Kevin Hilman <khilman@linaro.org> | 2014-09-05 11:05:56 -0400 |
commit | 95f6e8142d82789eca977ccdd6153a48b343fde9 (patch) | |
tree | ef39263a5bbc5c497d8603b77d2db1316a2845fc /block | |
parent | 28c2260f13c8ea3be6fcba1609502874f868284b (diff) | |
parent | c7cc9ba11f8c09a4d12af0fc4aa9f9b026cdd354 (diff) |
Merge tag 'omap-fixes-against-v3.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into fixes
Merge "omap fixes against v3.17-rc3" from Tony Lindgren:
Few fixes for omaps mostly for various devices to get them working
properly on the new am437x and dra7 hardware for several devices
such as I2C, NAND, DDR3 and USB. There's also a clock fix for omap3.
And also included are two minor cosmetic fixes that are not
stictly fixes for the new hardware support added recently to
downgrade a GPMC warning into a debug statement, and fix the
confusing comments for dra7-evm spi1 mux.
Note that these are all .dts changes except for a GPMC change.
* tag 'omap-fixes-against-v3.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: (255 commits)
ARM: dts: dra7-evm: Add vtt regulator support
ARM: dts: dra7-evm: Fix spi1 mux documentation
ARM: dts: am43x-epos-evm: Disable QSPI to prevent conflict with GPMC-NAND
ARM: OMAP2+: gpmc: Don't complain if wait pin is used without r/w monitoring
ARM: dts: am43xx-epos-evm: Don't use read/write wait monitoring
ARM: dts: am437x-gp-evm: Don't use read/write wait monitoring
ARM: dts: am437x-gp-evm: Use BCH16 ECC scheme instead of BCH8
ARM: dts: am43x-epos-evm: Use BCH16 ECC scheme instead of BCH8
ARM: dts: am4372: fix USB regs size
ARM: dts: am437x-gp: switch i2c0 to 100KHz
ARM: dts: dra7-evm: Fix 8th NAND partition's name
ARM: dts: dra7-evm: Fix i2c3 pinmux and frequency
Linux 3.17-rc3
...
Signed-off-by: Kevin Hilman <khilman@linaro.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/bio-integrity.c | 2 | ||||
-rw-r--r-- | block/blk-core.c | 1 | ||||
-rw-r--r-- | block/blk-mq.c | 36 | ||||
-rw-r--r-- | block/cfq-iosched.c | 19 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 40 |
5 files changed, 66 insertions, 32 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c index bc423f7b02da..f14b4abbebd8 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c | |||
@@ -520,7 +520,7 @@ void bio_integrity_endio(struct bio *bio, int error) | |||
520 | */ | 520 | */ |
521 | if (error) { | 521 | if (error) { |
522 | bio->bi_end_io = bip->bip_end_io; | 522 | bio->bi_end_io = bip->bip_end_io; |
523 | bio_endio(bio, error); | 523 | bio_endio_nodec(bio, error); |
524 | 524 | ||
525 | return; | 525 | return; |
526 | } | 526 | } |
diff --git a/block/blk-core.c b/block/blk-core.c index c359d72e9d76..bf930f481d43 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1252,7 +1252,6 @@ void blk_rq_set_block_pc(struct request *rq) | |||
1252 | rq->__sector = (sector_t) -1; | 1252 | rq->__sector = (sector_t) -1; |
1253 | rq->bio = rq->biotail = NULL; | 1253 | rq->bio = rq->biotail = NULL; |
1254 | memset(rq->__cmd, 0, sizeof(rq->__cmd)); | 1254 | memset(rq->__cmd, 0, sizeof(rq->__cmd)); |
1255 | rq->cmd = rq->__cmd; | ||
1256 | } | 1255 | } |
1257 | EXPORT_SYMBOL(blk_rq_set_block_pc); | 1256 | EXPORT_SYMBOL(blk_rq_set_block_pc); |
1258 | 1257 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 5189cb1e478a..4aac82615a46 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -112,18 +112,22 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) | |||
112 | */ | 112 | */ |
113 | void blk_mq_freeze_queue(struct request_queue *q) | 113 | void blk_mq_freeze_queue(struct request_queue *q) |
114 | { | 114 | { |
115 | bool freeze; | ||
116 | |||
115 | spin_lock_irq(q->queue_lock); | 117 | spin_lock_irq(q->queue_lock); |
116 | q->mq_freeze_depth++; | 118 | freeze = !q->mq_freeze_depth++; |
117 | spin_unlock_irq(q->queue_lock); | 119 | spin_unlock_irq(q->queue_lock); |
118 | 120 | ||
119 | percpu_ref_kill(&q->mq_usage_counter); | 121 | if (freeze) { |
120 | blk_mq_run_queues(q, false); | 122 | percpu_ref_kill(&q->mq_usage_counter); |
123 | blk_mq_run_queues(q, false); | ||
124 | } | ||
121 | wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); | 125 | wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); |
122 | } | 126 | } |
123 | 127 | ||
124 | static void blk_mq_unfreeze_queue(struct request_queue *q) | 128 | static void blk_mq_unfreeze_queue(struct request_queue *q) |
125 | { | 129 | { |
126 | bool wake = false; | 130 | bool wake; |
127 | 131 | ||
128 | spin_lock_irq(q->queue_lock); | 132 | spin_lock_irq(q->queue_lock); |
129 | wake = !--q->mq_freeze_depth; | 133 | wake = !--q->mq_freeze_depth; |
@@ -172,6 +176,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
172 | /* tag was already set */ | 176 | /* tag was already set */ |
173 | rq->errors = 0; | 177 | rq->errors = 0; |
174 | 178 | ||
179 | rq->cmd = rq->__cmd; | ||
180 | |||
175 | rq->extra_len = 0; | 181 | rq->extra_len = 0; |
176 | rq->sense_len = 0; | 182 | rq->sense_len = 0; |
177 | rq->resid_len = 0; | 183 | rq->resid_len = 0; |
@@ -1068,13 +1074,17 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) | |||
1068 | blk_account_io_start(rq, 1); | 1074 | blk_account_io_start(rq, 1); |
1069 | } | 1075 | } |
1070 | 1076 | ||
1077 | static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) | ||
1078 | { | ||
1079 | return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && | ||
1080 | !blk_queue_nomerges(hctx->queue); | ||
1081 | } | ||
1082 | |||
1071 | static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, | 1083 | static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, |
1072 | struct blk_mq_ctx *ctx, | 1084 | struct blk_mq_ctx *ctx, |
1073 | struct request *rq, struct bio *bio) | 1085 | struct request *rq, struct bio *bio) |
1074 | { | 1086 | { |
1075 | struct request_queue *q = hctx->queue; | 1087 | if (!hctx_allow_merges(hctx)) { |
1076 | |||
1077 | if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) { | ||
1078 | blk_mq_bio_to_request(rq, bio); | 1088 | blk_mq_bio_to_request(rq, bio); |
1079 | spin_lock(&ctx->lock); | 1089 | spin_lock(&ctx->lock); |
1080 | insert_rq: | 1090 | insert_rq: |
@@ -1082,6 +1092,8 @@ insert_rq: | |||
1082 | spin_unlock(&ctx->lock); | 1092 | spin_unlock(&ctx->lock); |
1083 | return false; | 1093 | return false; |
1084 | } else { | 1094 | } else { |
1095 | struct request_queue *q = hctx->queue; | ||
1096 | |||
1085 | spin_lock(&ctx->lock); | 1097 | spin_lock(&ctx->lock); |
1086 | if (!blk_mq_attempt_merge(q, ctx, bio)) { | 1098 | if (!blk_mq_attempt_merge(q, ctx, bio)) { |
1087 | blk_mq_bio_to_request(rq, bio); | 1099 | blk_mq_bio_to_request(rq, bio); |
@@ -1574,7 +1586,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, | |||
1574 | hctx->tags = set->tags[i]; | 1586 | hctx->tags = set->tags[i]; |
1575 | 1587 | ||
1576 | /* | 1588 | /* |
1577 | * Allocate space for all possible cpus to avoid allocation in | 1589 | * Allocate space for all possible cpus to avoid allocation at |
1578 | * runtime | 1590 | * runtime |
1579 | */ | 1591 | */ |
1580 | hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), | 1592 | hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), |
@@ -1662,8 +1674,8 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1662 | 1674 | ||
1663 | queue_for_each_hw_ctx(q, hctx, i) { | 1675 | queue_for_each_hw_ctx(q, hctx, i) { |
1664 | /* | 1676 | /* |
1665 | * If not software queues are mapped to this hardware queue, | 1677 | * If no software queues are mapped to this hardware queue, |
1666 | * disable it and free the request entries | 1678 | * disable it and free the request entries. |
1667 | */ | 1679 | */ |
1668 | if (!hctx->nr_ctx) { | 1680 | if (!hctx->nr_ctx) { |
1669 | struct blk_mq_tag_set *set = q->tag_set; | 1681 | struct blk_mq_tag_set *set = q->tag_set; |
@@ -1713,14 +1725,10 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) | |||
1713 | { | 1725 | { |
1714 | struct blk_mq_tag_set *set = q->tag_set; | 1726 | struct blk_mq_tag_set *set = q->tag_set; |
1715 | 1727 | ||
1716 | blk_mq_freeze_queue(q); | ||
1717 | |||
1718 | mutex_lock(&set->tag_list_lock); | 1728 | mutex_lock(&set->tag_list_lock); |
1719 | list_del_init(&q->tag_set_list); | 1729 | list_del_init(&q->tag_set_list); |
1720 | blk_mq_update_tag_set_depth(set); | 1730 | blk_mq_update_tag_set_depth(set); |
1721 | mutex_unlock(&set->tag_list_lock); | 1731 | mutex_unlock(&set->tag_list_lock); |
1722 | |||
1723 | blk_mq_unfreeze_queue(q); | ||
1724 | } | 1732 | } |
1725 | 1733 | ||
1726 | static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, | 1734 | static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index cadc37841744..3f31cf9508e6 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1272,15 +1272,22 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) | |||
1272 | rb_insert_color(&cfqg->rb_node, &st->rb); | 1272 | rb_insert_color(&cfqg->rb_node, &st->rb); |
1273 | } | 1273 | } |
1274 | 1274 | ||
1275 | /* | ||
1276 | * This has to be called only on activation of cfqg | ||
1277 | */ | ||
1275 | static void | 1278 | static void |
1276 | cfq_update_group_weight(struct cfq_group *cfqg) | 1279 | cfq_update_group_weight(struct cfq_group *cfqg) |
1277 | { | 1280 | { |
1278 | BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); | ||
1279 | |||
1280 | if (cfqg->new_weight) { | 1281 | if (cfqg->new_weight) { |
1281 | cfqg->weight = cfqg->new_weight; | 1282 | cfqg->weight = cfqg->new_weight; |
1282 | cfqg->new_weight = 0; | 1283 | cfqg->new_weight = 0; |
1283 | } | 1284 | } |
1285 | } | ||
1286 | |||
1287 | static void | ||
1288 | cfq_update_group_leaf_weight(struct cfq_group *cfqg) | ||
1289 | { | ||
1290 | BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); | ||
1284 | 1291 | ||
1285 | if (cfqg->new_leaf_weight) { | 1292 | if (cfqg->new_leaf_weight) { |
1286 | cfqg->leaf_weight = cfqg->new_leaf_weight; | 1293 | cfqg->leaf_weight = cfqg->new_leaf_weight; |
@@ -1299,7 +1306,12 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) | |||
1299 | /* add to the service tree */ | 1306 | /* add to the service tree */ |
1300 | BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); | 1307 | BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); |
1301 | 1308 | ||
1302 | cfq_update_group_weight(cfqg); | 1309 | /* |
1310 | * Update leaf_weight. We cannot update weight at this point | ||
1311 | * because cfqg might already have been activated and is | ||
1312 | * contributing its current weight to the parent's child_weight. | ||
1313 | */ | ||
1314 | cfq_update_group_leaf_weight(cfqg); | ||
1303 | __cfq_group_service_tree_add(st, cfqg); | 1315 | __cfq_group_service_tree_add(st, cfqg); |
1304 | 1316 | ||
1305 | /* | 1317 | /* |
@@ -1323,6 +1335,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) | |||
1323 | */ | 1335 | */ |
1324 | while ((parent = cfqg_parent(pos))) { | 1336 | while ((parent = cfqg_parent(pos))) { |
1325 | if (propagate) { | 1337 | if (propagate) { |
1338 | cfq_update_group_weight(pos); | ||
1326 | propagate = !parent->nr_active++; | 1339 | propagate = !parent->nr_active++; |
1327 | parent->children_weight += pos->weight; | 1340 | parent->children_weight += pos->weight; |
1328 | } | 1341 | } |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 51bf5155ee75..9b8eaeca6a79 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -279,7 +279,6 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, | |||
279 | r = blk_rq_unmap_user(bio); | 279 | r = blk_rq_unmap_user(bio); |
280 | if (!ret) | 280 | if (!ret) |
281 | ret = r; | 281 | ret = r; |
282 | blk_put_request(rq); | ||
283 | 282 | ||
284 | return ret; | 283 | return ret; |
285 | } | 284 | } |
@@ -297,8 +296,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, | |||
297 | 296 | ||
298 | if (hdr->interface_id != 'S') | 297 | if (hdr->interface_id != 'S') |
299 | return -EINVAL; | 298 | return -EINVAL; |
300 | if (hdr->cmd_len > BLK_MAX_CDB) | ||
301 | return -EINVAL; | ||
302 | 299 | ||
303 | if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) | 300 | if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) |
304 | return -EIO; | 301 | return -EIO; |
@@ -317,16 +314,23 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, | |||
317 | if (hdr->flags & SG_FLAG_Q_AT_HEAD) | 314 | if (hdr->flags & SG_FLAG_Q_AT_HEAD) |
318 | at_head = 1; | 315 | at_head = 1; |
319 | 316 | ||
317 | ret = -ENOMEM; | ||
320 | rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); | 318 | rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); |
321 | if (!rq) | 319 | if (!rq) |
322 | return -ENOMEM; | 320 | goto out; |
323 | blk_rq_set_block_pc(rq); | 321 | blk_rq_set_block_pc(rq); |
324 | 322 | ||
325 | if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { | 323 | if (hdr->cmd_len > BLK_MAX_CDB) { |
326 | blk_put_request(rq); | 324 | rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL); |
327 | return -EFAULT; | 325 | if (!rq->cmd) |
326 | goto out_put_request; | ||
328 | } | 327 | } |
329 | 328 | ||
329 | ret = -EFAULT; | ||
330 | if (blk_fill_sghdr_rq(q, rq, hdr, mode)) | ||
331 | goto out_free_cdb; | ||
332 | |||
333 | ret = 0; | ||
330 | if (hdr->iovec_count) { | 334 | if (hdr->iovec_count) { |
331 | size_t iov_data_len; | 335 | size_t iov_data_len; |
332 | struct iovec *iov = NULL; | 336 | struct iovec *iov = NULL; |
@@ -335,7 +339,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, | |||
335 | 0, NULL, &iov); | 339 | 0, NULL, &iov); |
336 | if (ret < 0) { | 340 | if (ret < 0) { |
337 | kfree(iov); | 341 | kfree(iov); |
338 | goto out; | 342 | goto out_free_cdb; |
339 | } | 343 | } |
340 | 344 | ||
341 | iov_data_len = ret; | 345 | iov_data_len = ret; |
@@ -358,7 +362,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, | |||
358 | GFP_KERNEL); | 362 | GFP_KERNEL); |
359 | 363 | ||
360 | if (ret) | 364 | if (ret) |
361 | goto out; | 365 | goto out_free_cdb; |
362 | 366 | ||
363 | bio = rq->bio; | 367 | bio = rq->bio; |
364 | memset(sense, 0, sizeof(sense)); | 368 | memset(sense, 0, sizeof(sense)); |
@@ -376,9 +380,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, | |||
376 | 380 | ||
377 | hdr->duration = jiffies_to_msecs(jiffies - start_time); | 381 | hdr->duration = jiffies_to_msecs(jiffies - start_time); |
378 | 382 | ||
379 | return blk_complete_sghdr_rq(rq, hdr, bio); | 383 | ret = blk_complete_sghdr_rq(rq, hdr, bio); |
380 | out: | 384 | |
385 | out_free_cdb: | ||
386 | if (rq->cmd != rq->__cmd) | ||
387 | kfree(rq->cmd); | ||
388 | out_put_request: | ||
381 | blk_put_request(rq); | 389 | blk_put_request(rq); |
390 | out: | ||
382 | return ret; | 391 | return ret; |
383 | } | 392 | } |
384 | 393 | ||
@@ -448,6 +457,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, | |||
448 | } | 457 | } |
449 | 458 | ||
450 | rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); | 459 | rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); |
460 | if (!rq) { | ||
461 | err = -ENOMEM; | ||
462 | goto error; | ||
463 | } | ||
464 | blk_rq_set_block_pc(rq); | ||
451 | 465 | ||
452 | cmdlen = COMMAND_SIZE(opcode); | 466 | cmdlen = COMMAND_SIZE(opcode); |
453 | 467 | ||
@@ -501,7 +515,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, | |||
501 | memset(sense, 0, sizeof(sense)); | 515 | memset(sense, 0, sizeof(sense)); |
502 | rq->sense = sense; | 516 | rq->sense = sense; |
503 | rq->sense_len = 0; | 517 | rq->sense_len = 0; |
504 | blk_rq_set_block_pc(rq); | ||
505 | 518 | ||
506 | blk_execute_rq(q, disk, rq, 0); | 519 | blk_execute_rq(q, disk, rq, 0); |
507 | 520 | ||
@@ -521,7 +534,8 @@ out: | |||
521 | 534 | ||
522 | error: | 535 | error: |
523 | kfree(buffer); | 536 | kfree(buffer); |
524 | blk_put_request(rq); | 537 | if (rq) |
538 | blk_put_request(rq); | ||
525 | return err; | 539 | return err; |
526 | } | 540 | } |
527 | EXPORT_SYMBOL_GPL(sg_scsi_ioctl); | 541 | EXPORT_SYMBOL_GPL(sg_scsi_ioctl); |