summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/swap.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/swap.h')
-rw-r--r--drivers/gpu/nvgpu/os/linux/swap.h117
1 files changed, 117 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/swap.h b/drivers/gpu/nvgpu/os/linux/swap.h
new file mode 100644
index 00000000..f762ba81
--- /dev/null
+++ b/drivers/gpu/nvgpu/os/linux/swap.h
@@ -0,0 +1,117 @@
1#include <linux/scatterlist.h>
2#include <linux/bio.h>
3//#include <nvgpu/bug.h>
4
5// Queue a command to copy out an SGT to disk
6// TODO: Cache bdev
7// TODO: Asynchronous I/O
8// TODO: Don't hardcode sector 0
9int copy(struct sg_table *sgt, int op) {
10 unsigned int i;
11 struct scatterlist *sg;
12 struct bio *bio;
13 int err = 0;
14 int sg_cnt = sgt->nents;
15 struct bio *bio_orig;
16 sector_t sector = 0; // XXX: For testing
17 // Find and open the block device
18 struct block_device *bdev = blkdev_get_by_path("/dev/nvme0n1", FMODE_READ | FMODE_WRITE, copy);
19 if (unlikely(IS_ERR(bdev))) {
20 printk(KERN_WARNING "Unabled to find `nvme0`, err %ld!\n", PTR_ERR(bdev));
21 return -ENODEV;
22 }
23 // Will never fail when allocating <= BIO_MAX_PAGES
24 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
25 bio_orig = bio;
26 bio->bi_bdev = bdev; // Switch to bio_set_dev(bdev) in newer kernels
27 bio->bi_iter.bi_sector = sector;
28 bio_set_op_attrs(bio, op, op == REQ_OP_WRITE ? WRITE_ODIRECT : 0);//REQ_SYNC); // XXX: Is REQ_SYNC necessary?
29 // Copy the scatter-gather table (sgt) into a block I/O vector (bio vec)
30 // bio_chain() approach borrowed from drivers/nvme/target/io-cmd.c:nvmet_execute_rw()
31 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
32 // On most iterations, this inner loop shouldn't happen at all. This loop
33 // conditional only triggers if we fill up the bio and are unable to map
34 // the full length of an SGL entry.
35 while (bio_add_page(bio, sg_page(sg), sg_dma_len(sg), sg->offset) != sg_dma_len(sg)) {
36 // Uh oh! We ran out of space in the bio. Allocate a new one and chain it...
37 struct bio *prev = bio;
38 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
39 bio->bi_bdev = bdev; // Switch to bio_set_dev(bdev) in newer kernels
40 bio->bi_iter.bi_sector = sector;
41 bio_set_op_attrs(bio, op, op == REQ_OP_WRITE ? WRITE_ODIRECT : 0);
42 bio_chain(bio, prev);
43 // Get the I/O started
44 submit_bio(prev);
45 // No need to call bio_put() as that's automatically managed for chained bios
46 }
47 sector += sg_dma_len(sg) >> 9;
48 sg_cnt--;
49 }
50 // Use blocking submit for now
51 // TODO: Switch to async via submit_bio(bio)
52 err = submit_bio_wait(bio);
53
54 if (bio->bi_error && bio->bi_error != err)
55 printk(KERN_WARNING "nvgpu: bio->bi_error %d != return val from submit_bio_wait() %d\n", bio->bi_error, err);
56
57//out:
58 bio_put(bio_orig); // TODO: Move to completion handler
59 blkdev_put(bdev, FMODE_WRITE|FMODE_READ);
60 return err;
61}
62
63// Patterned off how __nvgpu_vm_find_mapped_buf_reverse() works in vm.c
64// Needs struct nvgpu_rbtree_node *node, struct nvgpu_rbtree_node *root,
65// and struct nvgpu_mapped_buf *m.
66// Steps until end of rbtree OR !m
67#define for_each_buffer(node, root, m) \
68 for (nvgpu_rbtree_enum_start(0, &node, root); \
69 node && (uintptr_t)(m = mapped_buffer_from_rbtree_node(node)); \
70 nvgpu_rbtree_enum_next(&node, node))
71
72// New, fast replacement to looking through with the above macro to match
73struct nvgpu_mapped_buf* dmabuf_to_mapped_buf(struct dma_buf *dmabuf) {
74 struct list_head *nvmap_priv = nvmap_get_priv_list(dmabuf);
75 struct nvgpu_mapped_buf *mapped_buffer;
76 struct nvgpu_mapped_buf_priv *priv;
77
78 if (IS_ERR(nvmap_priv))
79 return ERR_PTR(-EOPNOTSUPP);
80
81 priv = list_first_entry_or_null(nvmap_priv, struct nvgpu_mapped_buf_priv, nvmap_priv_entry);
82 if (unlikely(!priv)) {
83 printk(KERN_ERR "nvgpu: State tracking error for fast reverse lookups. Have unattached dmabuf!");
84 return ERR_PTR(-ENOTRECOVERABLE);
85 }
86
87 mapped_buffer = container_of(priv, struct nvgpu_mapped_buf, os_priv);
88 if (unlikely(mapped_buffer->os_priv.dmabuf != dmabuf)) {
89 printk(KERN_ERR "nvgpu: dmabuf_to_mapped_buf mapping inconsistent! BUG!\n");
90 return ERR_PTR(-ENOTRECOVERABLE);
91 }
92 if (!list_is_singular(&priv->nvmap_priv_entry)) {
93 printk(KERN_WARNING "nvgpu: Requesting paging on memory with multiple mappings! Aborting...\n");
94 return ERR_PTR(-EOPNOTSUPP);
95 }
96 return mapped_buffer;
97}
98
99int copy_all(struct vm_gk20a *vm) {
100 struct nvgpu_rbtree_node *node;
101 struct nvgpu_mapped_buf *m;
102
103 for_each_buffer(node, vm->mapped_buffers, m) {
104 // TODO
105 continue;
106 }
107 return 0;
108}
109
110int copy_out(struct sg_table *sgt) {
111 return copy(sgt, REQ_OP_WRITE);
112}
113
114int copy_in(struct sg_table *sgt) {
115 return copy(sgt, REQ_OP_READ);
116}
117