aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/drivers
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2013-08-18 07:30:06 -0400
committerRichard Weinberger <richard@nod.at>2013-09-07 04:56:49 -0400
commit805f11a0d515658106bfbfadceff0eb30bd90ad2 (patch)
tree2bcfbc9028d5a92875272302bd9e7daf82b4e4be /arch/um/drivers
parentf75b1b1bedfb498cc43a992ce4d7ed8df3b1e770 (diff)
um: ubd: Add REQ_FLUSH suppport
UML's block device driver does not support write barriers, to support this this patch adds REQ_FLUSH suppport. Every time the block layer sends a REQ_FLUSH we fsync() now our backing file to guarantee data consistency. Reported-and-tested-by: Richard W.M. Jones <rjones@redhat.com> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um/drivers')
-rw-r--r--arch/um/drivers/ubd_kern.c41
1 files changed, 40 insertions, 1 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 879990cb66c6..d27c703be1a1 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -41,7 +41,7 @@
41#include <os.h> 41#include <os.h>
42#include "cow.h" 42#include "cow.h"
43 43
44enum ubd_req { UBD_READ, UBD_WRITE }; 44enum ubd_req { UBD_READ, UBD_WRITE, UBD_FLUSH };
45 45
46struct io_thread_req { 46struct io_thread_req {
47 struct request *req; 47 struct request *req;
@@ -866,6 +866,7 @@ static int ubd_add(int n, char **error_out)
866 goto out; 866 goto out;
867 } 867 }
868 ubd_dev->queue->queuedata = ubd_dev; 868 ubd_dev->queue->queuedata = ubd_dev;
869 blk_queue_flush(ubd_dev->queue, REQ_FLUSH);
869 870
870 blk_queue_max_segments(ubd_dev->queue, MAX_SG); 871 blk_queue_max_segments(ubd_dev->queue, MAX_SG);
871 err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); 872 err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
@@ -1239,6 +1240,19 @@ static void prepare_request(struct request *req, struct io_thread_req *io_req,
1239} 1240}
1240 1241
1241/* Called with dev->lock held */ 1242/* Called with dev->lock held */
1243static void prepare_flush_request(struct request *req,
1244 struct io_thread_req *io_req)
1245{
1246 struct gendisk *disk = req->rq_disk;
1247 struct ubd *ubd_dev = disk->private_data;
1248
1249 io_req->req = req;
1250 io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
1251 ubd_dev->fd;
1252 io_req->op = UBD_FLUSH;
1253}
1254
1255/* Called with dev->lock held */
1242static void do_ubd_request(struct request_queue *q) 1256static void do_ubd_request(struct request_queue *q)
1243{ 1257{
1244 struct io_thread_req *io_req; 1258 struct io_thread_req *io_req;
@@ -1259,6 +1273,20 @@ static void do_ubd_request(struct request_queue *q)
1259 } 1273 }
1260 1274
1261 req = dev->request; 1275 req = dev->request;
1276
1277 if (req->cmd_flags & REQ_FLUSH) {
1278 io_req = kmalloc(sizeof(struct io_thread_req),
1279 GFP_ATOMIC);
1280 if (io_req == NULL) {
1281 if (list_empty(&dev->restart))
1282 list_add(&dev->restart, &restart);
1283 return;
1284 }
1285 prepare_flush_request(req, io_req);
1286 os_write_file(thread_fd, &io_req,
1287 sizeof(struct io_thread_req *));
1288 }
1289
1262 while(dev->start_sg < dev->end_sg){ 1290 while(dev->start_sg < dev->end_sg){
1263 struct scatterlist *sg = &dev->sg[dev->start_sg]; 1291 struct scatterlist *sg = &dev->sg[dev->start_sg];
1264 1292
@@ -1367,6 +1395,17 @@ static void do_io(struct io_thread_req *req)
1367 int err; 1395 int err;
1368 __u64 off; 1396 __u64 off;
1369 1397
1398 if (req->op == UBD_FLUSH) {
1399 /* fds[0] is always either the rw image or our cow file */
1400 n = os_sync_file(req->fds[0]);
1401 if (n != 0) {
1402 printk("do_io - sync failed err = %d "
1403 "fd = %d\n", -n, req->fds[0]);
1404 req->error = 1;
1405 }
1406 return;
1407 }
1408
1370 nsectors = req->length / req->sectorsize; 1409 nsectors = req->length / req->sectorsize;
1371 start = 0; 1410 start = 0;
1372 do { 1411 do {