summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/swap.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/swap.h')
-rw-r--r--drivers/gpu/nvgpu/os/linux/swap.h41
1 files changed, 23 insertions, 18 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/swap.h b/drivers/gpu/nvgpu/os/linux/swap.h
index f762ba81..1e986095 100644
--- a/drivers/gpu/nvgpu/os/linux/swap.h
+++ b/drivers/gpu/nvgpu/os/linux/swap.h
@@ -2,17 +2,23 @@
2#include <linux/bio.h> 2#include <linux/bio.h>
3//#include <nvgpu/bug.h> 3//#include <nvgpu/bug.h>
4 4
5// Callback for completion of the I/O chain
6static void complete_swap_io(struct bio *bio) {
7 struct nvgpu_mapped_buf *m = bio->bi_private;
8 bio_put(bio);
9 complete(&m->os_priv.swap_io_done);
10}
11
5// Queue a command to copy out an SGT to disk 12// Queue a command to copy out an SGT to disk
6// TODO: Cache bdev 13// TODO: Cache bdev
7// TODO: Asynchronous I/O
8// TODO: Don't hardcode sector 0 14// TODO: Don't hardcode sector 0
9int copy(struct sg_table *sgt, int op) { 15// TODO: Figure out if submit_bio() can fail, and what to do then
16int copy(struct sg_table *sgt, int op, struct nvgpu_mapped_buf *m) {
10 unsigned int i; 17 unsigned int i;
11 struct scatterlist *sg; 18 struct scatterlist *sg;
12 struct bio *bio; 19 struct bio *bio;
13 int err = 0; 20 int err = 0;
14 int sg_cnt = sgt->nents; 21 int sg_cnt = sgt->nents;
15 struct bio *bio_orig;
16 sector_t sector = 0; // XXX: For testing 22 sector_t sector = 0; // XXX: For testing
17 // Find and open the block device 23 // Find and open the block device
18 struct block_device *bdev = blkdev_get_by_path("/dev/nvme0n1", FMODE_READ | FMODE_WRITE, copy); 24 struct block_device *bdev = blkdev_get_by_path("/dev/nvme0n1", FMODE_READ | FMODE_WRITE, copy);
@@ -20,12 +26,15 @@ int copy(struct sg_table *sgt, int op) {
20 printk(KERN_WARNING "Unabled to find `nvme0`, err %ld!\n", PTR_ERR(bdev)); 26 printk(KERN_WARNING "Unabled to find `nvme0`, err %ld!\n", PTR_ERR(bdev));
21 return -ENODEV; 27 return -ENODEV;
22 } 28 }
23 // Will never fail when allocating <= BIO_MAX_PAGES 29 // Reset the .done variable in the completion
30 reinit_completion(&m->os_priv.swap_io_done);
31 // bio_alloc() will never fail when allocating <= BIO_MAX_PAGES
24 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); 32 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
25 bio_orig = bio;
26 bio->bi_bdev = bdev; // Switch to bio_set_dev(bdev) in newer kernels 33 bio->bi_bdev = bdev; // Switch to bio_set_dev(bdev) in newer kernels
27 bio->bi_iter.bi_sector = sector; 34 bio->bi_iter.bi_sector = sector;
28 bio_set_op_attrs(bio, op, op == REQ_OP_WRITE ? WRITE_ODIRECT : 0);//REQ_SYNC); // XXX: Is REQ_SYNC necessary? 35 bio_set_op_attrs(bio, op, REQ_SYNC); // REQ_SYNC is identical to WRITE_ODIRECT
36 bio->bi_private = m;
37 bio->bi_end_io = complete_swap_io;
29 // Copy the scatter-gather table (sgt) into a block I/O vector (bio vec) 38 // Copy the scatter-gather table (sgt) into a block I/O vector (bio vec)
30 // bio_chain() approach borrowed from drivers/nvme/target/io-cmd.c:nvmet_execute_rw() 39 // bio_chain() approach borrowed from drivers/nvme/target/io-cmd.c:nvmet_execute_rw()
31 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 40 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -47,16 +56,12 @@ int copy(struct sg_table *sgt, int op) {
47 sector += sg_dma_len(sg) >> 9; 56 sector += sg_dma_len(sg) >> 9;
48 sg_cnt--; 57 sg_cnt--;
49 } 58 }
50 // Use blocking submit for now
51 // TODO: Switch to async via submit_bio(bio)
52 err = submit_bio_wait(bio);
53 59
54 if (bio->bi_error && bio->bi_error != err) 60 // Async submit. Caller should wait_for_completion_io(&m->os_priv.swap_io_done);
55 printk(KERN_WARNING "nvgpu: bio->bi_error %d != return val from submit_bio_wait() %d\n", bio->bi_error, err); 61 submit_bio(bio);
56 62
57//out: 63 // Release our block device handle
58 bio_put(bio_orig); // TODO: Move to completion handler 64 blkdev_put(bdev, FMODE_WRITE | FMODE_READ); // Is this safe?
59 blkdev_put(bdev, FMODE_WRITE|FMODE_READ);
60 return err; 65 return err;
61} 66}
62 67
@@ -107,11 +112,11 @@ int copy_all(struct vm_gk20a *vm) {
107 return 0; 112 return 0;
108} 113}
109 114
110int copy_out(struct sg_table *sgt) { 115int copy_out(struct sg_table *sgt, struct nvgpu_mapped_buf *m) {
111 return copy(sgt, REQ_OP_WRITE); 116 return copy(sgt, REQ_OP_WRITE, m);
112} 117}
113 118
114int copy_in(struct sg_table *sgt) { 119int copy_in(struct sg_table *sgt, struct nvgpu_mapped_buf *m) {
115 return copy(sgt, REQ_OP_READ); 120 return copy(sgt, REQ_OP_READ, m);
116} 121}
117 122