summaryrefslogtreecommitdiffstats
path: root/arch/um/drivers
diff options
context:
space:
mode:
authorAnton Ivanov <anton.ivanov@cambridgegreys.com>2018-11-14 13:41:06 -0500
committerRichard Weinberger <richard@nod.at>2018-12-27 16:48:20 -0500
commita43c83161a5ec1631a54338dd9b734b3cdce8d9a (patch)
tree931f54fb70e6afeab568ffb4a5cbe60dd1967ec6 /arch/um/drivers
parent550ed0e2036663b35cec12374b835444f9c60454 (diff)
um: Switch to block-mq constants in the UML UBD driver
Switch to block mq-constants for both commands, error codes and various computations. Signed-off-by: Anton Ivanov <anton.ivanov@cambridgegreys.com> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um/drivers')
-rw-r--r--arch/um/drivers/ubd_kern.c66
1 files changed, 38 insertions, 28 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 28c40624bcb6..3a8a3d403ef8 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -43,11 +43,11 @@
43#include <os.h> 43#include <os.h>
44#include "cow.h" 44#include "cow.h"
45 45
46enum ubd_req { UBD_READ, UBD_WRITE, UBD_FLUSH }; 46/* Max request size is determined by sector mask - 32K */
47#define UBD_MAX_REQUEST (8 * sizeof(long))
47 48
48struct io_thread_req { 49struct io_thread_req {
49 struct request *req; 50 struct request *req;
50 enum ubd_req op;
51 int fds[2]; 51 int fds[2];
52 unsigned long offsets[2]; 52 unsigned long offsets[2];
53 unsigned long long offset; 53 unsigned long long offset;
@@ -511,15 +511,13 @@ static void ubd_handler(void)
511 } 511 }
512 for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { 512 for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
513 struct io_thread_req *io_req = (*irq_req_buffer)[count]; 513 struct io_thread_req *io_req = (*irq_req_buffer)[count];
514 int err = io_req->error ? BLK_STS_IOERR : BLK_STS_OK;
515 514
516 if (!blk_update_request(io_req->req, err, io_req->length)) 515 if (!blk_update_request(io_req->req, io_req->error, io_req->length))
517 __blk_mq_end_request(io_req->req, err); 516 __blk_mq_end_request(io_req->req, io_req->error);
518 517
519 kfree(io_req); 518 kfree(io_req);
520 } 519 }
521 } 520 }
522
523 reactivate_fd(thread_fd, UBD_IRQ); 521 reactivate_fd(thread_fd, UBD_IRQ);
524} 522}
525 523
@@ -789,7 +787,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
789 787
790 if((fd == -ENOENT) && create_cow){ 788 if((fd == -ENOENT) && create_cow){
791 fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file, 789 fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file,
792 ubd_dev->openflags, 1 << 9, PAGE_SIZE, 790 ubd_dev->openflags, SECTOR_SIZE, PAGE_SIZE,
793 &ubd_dev->cow.bitmap_offset, 791 &ubd_dev->cow.bitmap_offset,
794 &ubd_dev->cow.bitmap_len, 792 &ubd_dev->cow.bitmap_len,
795 &ubd_dev->cow.data_offset); 793 &ubd_dev->cow.data_offset);
@@ -830,6 +828,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
830 if(err < 0) goto error; 828 if(err < 0) goto error;
831 ubd_dev->cow.fd = err; 829 ubd_dev->cow.fd = err;
832 } 830 }
831 blk_queue_flag_set(QUEUE_FLAG_NONROT, ubd_dev->queue);
833 return 0; 832 return 0;
834 error: 833 error:
835 os_close_file(ubd_dev->fd); 834 os_close_file(ubd_dev->fd);
@@ -882,7 +881,7 @@ static int ubd_disk_register(int major, u64 size, int unit,
882 return 0; 881 return 0;
883} 882}
884 883
885#define ROUND_BLOCK(n) ((n + ((1 << 9) - 1)) & (-1 << 9)) 884#define ROUND_BLOCK(n) ((n + (SECTOR_SIZE - 1)) & (-SECTOR_SIZE))
886 885
887static const struct blk_mq_ops ubd_mq_ops = { 886static const struct blk_mq_ops ubd_mq_ops = {
888 .queue_rq = ubd_queue_rq, 887 .queue_rq = ubd_queue_rq,
@@ -1234,10 +1233,10 @@ static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
1234 __u64 bitmap_offset, unsigned long *bitmap_words, 1233 __u64 bitmap_offset, unsigned long *bitmap_words,
1235 __u64 bitmap_len) 1234 __u64 bitmap_len)
1236{ 1235{
1237 __u64 sector = io_offset >> 9; 1236 __u64 sector = io_offset >> SECTOR_SHIFT;
1238 int i, update_bitmap = 0; 1237 int i, update_bitmap = 0;
1239 1238
1240 for(i = 0; i < length >> 9; i++){ 1239 for (i = 0; i < length >> SECTOR_SHIFT; i++) {
1241 if(cow_mask != NULL) 1240 if(cow_mask != NULL)
1242 ubd_set_bit(i, (unsigned char *) cow_mask); 1241 ubd_set_bit(i, (unsigned char *) cow_mask);
1243 if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) 1242 if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
@@ -1271,14 +1270,14 @@ static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
1271static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, 1270static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
1272 __u64 bitmap_offset, __u64 bitmap_len) 1271 __u64 bitmap_offset, __u64 bitmap_len)
1273{ 1272{
1274 __u64 sector = req->offset >> 9; 1273 __u64 sector = req->offset >> SECTOR_SHIFT;
1275 int i; 1274 int i;
1276 1275
1277 if(req->length > (sizeof(req->sector_mask) * 8) << 9) 1276 if (req->length > (sizeof(req->sector_mask) * 8) << SECTOR_SHIFT)
1278 panic("Operation too long"); 1277 panic("Operation too long");
1279 1278
1280 if(req->op == UBD_READ) { 1279 if (req_op(req->req) == REQ_OP_READ) {
1281 for(i = 0; i < req->length >> 9; i++){ 1280 for (i = 0; i < req->length >> SECTOR_SHIFT; i++) {
1282 if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) 1281 if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
1283 ubd_set_bit(i, (unsigned char *) 1282 ubd_set_bit(i, (unsigned char *)
1284 &req->sector_mask); 1283 &req->sector_mask);
@@ -1307,19 +1306,16 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
1307 io_req->fds[0] = dev->fd; 1306 io_req->fds[0] = dev->fd;
1308 io_req->error = 0; 1307 io_req->error = 0;
1309 1308
1310 if (req_op(req) == REQ_OP_FLUSH) { 1309 if (req_op(req) != REQ_OP_FLUSH) {
1311 io_req->op = UBD_FLUSH;
1312 } else {
1313 io_req->fds[1] = dev->fd; 1310 io_req->fds[1] = dev->fd;
1314 io_req->cow_offset = -1; 1311 io_req->cow_offset = -1;
1315 io_req->offset = off; 1312 io_req->offset = off;
1316 io_req->length = bvec->bv_len; 1313 io_req->length = bvec->bv_len;
1317 io_req->sector_mask = 0; 1314 io_req->sector_mask = 0;
1318 io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
1319 io_req->offsets[0] = 0; 1315 io_req->offsets[0] = 0;
1320 io_req->offsets[1] = dev->cow.data_offset; 1316 io_req->offsets[1] = dev->cow.data_offset;
1321 io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset; 1317 io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset;
1322 io_req->sectorsize = 1 << 9; 1318 io_req->sectorsize = SECTOR_SIZE;
1323 1319
1324 if (dev->cow.file) { 1320 if (dev->cow.file) {
1325 cowify_req(io_req, dev->cow.bitmap, 1321 cowify_req(io_req, dev->cow.bitmap,
@@ -1353,7 +1349,7 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
1353 } else { 1349 } else {
1354 struct req_iterator iter; 1350 struct req_iterator iter;
1355 struct bio_vec bvec; 1351 struct bio_vec bvec;
1356 u64 off = (u64)blk_rq_pos(req) << 9; 1352 u64 off = (u64)blk_rq_pos(req) << SECTOR_SHIFT;
1357 1353
1358 rq_for_each_segment(bvec, req, iter) { 1354 rq_for_each_segment(bvec, req, iter) {
1359 ret = ubd_queue_one_vec(hctx, req, off, &bvec); 1355 ret = ubd_queue_one_vec(hctx, req, off, &bvec);
@@ -1413,22 +1409,36 @@ static int ubd_ioctl(struct block_device *bdev, fmode_t mode,
1413 return -EINVAL; 1409 return -EINVAL;
1414} 1410}
1415 1411
1412static int map_error(int error_code)
1413{
1414 switch (error_code) {
1415 case 0:
1416 return BLK_STS_OK;
1417 case ENOSYS:
1418 case EOPNOTSUPP:
1419 return BLK_STS_NOTSUPP;
1420 case ENOSPC:
1421 return BLK_STS_NOSPC;
1422 }
1423 return BLK_STS_IOERR;
1424}
1425
1416static int update_bitmap(struct io_thread_req *req) 1426static int update_bitmap(struct io_thread_req *req)
1417{ 1427{
1418 int n; 1428 int n;
1419 1429
1420 if(req->cow_offset == -1) 1430 if(req->cow_offset == -1)
1421 return 0; 1431 return map_error(0);
1422 1432
1423 n = os_pwrite_file(req->fds[1], &req->bitmap_words, 1433 n = os_pwrite_file(req->fds[1], &req->bitmap_words,
1424 sizeof(req->bitmap_words), req->cow_offset); 1434 sizeof(req->bitmap_words), req->cow_offset);
1425 if(n != sizeof(req->bitmap_words)){ 1435 if(n != sizeof(req->bitmap_words)){
1426 printk("do_io - bitmap update failed, err = %d fd = %d\n", -n, 1436 printk("do_io - bitmap update failed, err = %d fd = %d\n", -n,
1427 req->fds[1]); 1437 req->fds[1]);
1428 return 1; 1438 return map_error(-n);
1429 } 1439 }
1430 1440
1431 return 0; 1441 return map_error(0);
1432} 1442}
1433 1443
1434static void do_io(struct io_thread_req *req) 1444static void do_io(struct io_thread_req *req)
@@ -1438,13 +1448,13 @@ static void do_io(struct io_thread_req *req)
1438 int n, nsectors, start, end, bit; 1448 int n, nsectors, start, end, bit;
1439 __u64 off; 1449 __u64 off;
1440 1450
1441 if (req->op == UBD_FLUSH) { 1451 if (req_op(req->req) == REQ_OP_FLUSH) {
1442 /* fds[0] is always either the rw image or our cow file */ 1452 /* fds[0] is always either the rw image or our cow file */
1443 n = os_sync_file(req->fds[0]); 1453 n = os_sync_file(req->fds[0]);
1444 if (n != 0) { 1454 if (n != 0) {
1445 printk("do_io - sync failed err = %d " 1455 printk("do_io - sync failed err = %d "
1446 "fd = %d\n", -n, req->fds[0]); 1456 "fd = %d\n", -n, req->fds[0]);
1447 req->error = 1; 1457 req->error = map_error(-n);
1448 } 1458 }
1449 return; 1459 return;
1450 } 1460 }
@@ -1464,7 +1474,7 @@ static void do_io(struct io_thread_req *req)
1464 len = (end - start) * req->sectorsize; 1474 len = (end - start) * req->sectorsize;
1465 buf = &req->buffer[start * req->sectorsize]; 1475 buf = &req->buffer[start * req->sectorsize];
1466 1476
1467 if(req->op == UBD_READ){ 1477 if (req_op(req->req) == REQ_OP_READ) {
1468 n = 0; 1478 n = 0;
1469 do { 1479 do {
1470 buf = &buf[n]; 1480 buf = &buf[n];
@@ -1473,7 +1483,7 @@ static void do_io(struct io_thread_req *req)
1473 if (n < 0) { 1483 if (n < 0) {
1474 printk("do_io - read failed, err = %d " 1484 printk("do_io - read failed, err = %d "
1475 "fd = %d\n", -n, req->fds[bit]); 1485 "fd = %d\n", -n, req->fds[bit]);
1476 req->error = 1; 1486 req->error = map_error(-n);
1477 return; 1487 return;
1478 } 1488 }
1479 } while((n < len) && (n != 0)); 1489 } while((n < len) && (n != 0));
@@ -1483,7 +1493,7 @@ static void do_io(struct io_thread_req *req)
1483 if(n != len){ 1493 if(n != len){
1484 printk("do_io - write failed err = %d " 1494 printk("do_io - write failed err = %d "
1485 "fd = %d\n", -n, req->fds[bit]); 1495 "fd = %d\n", -n, req->fds[bit]);
1486 req->error = 1; 1496 req->error = map_error(-n);
1487 return; 1497 return;
1488 } 1498 }
1489 } 1499 }