summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/swap.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/swap.h')
-rw-r--r--drivers/gpu/nvgpu/os/linux/swap.h31
1 files changed, 27 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/swap.h b/drivers/gpu/nvgpu/os/linux/swap.h
index 1e986095..3a648b26 100644
--- a/drivers/gpu/nvgpu/os/linux/swap.h
+++ b/drivers/gpu/nvgpu/os/linux/swap.h
@@ -1,8 +1,12 @@
1#include <linux/scatterlist.h> 1#include <linux/scatterlist.h>
2#include <linux/bio.h> 2#include <linux/bio.h>
3//#include <nvgpu/bug.h> 3#include <linux/blkdev.h> // For SECTOR_SHIFT
4
5// Next sector to assign a mapped_buf to. Skip first disk block
6atomic64_t nvgpu_swap_next_sector = {4};
4 7
5// Callback for completion of the I/O chain 8// Callback for completion of the I/O chain
9// TODO: Error checking and handling
6static void complete_swap_io(struct bio *bio) { 10static void complete_swap_io(struct bio *bio) {
7 struct nvgpu_mapped_buf *m = bio->bi_private; 11 struct nvgpu_mapped_buf *m = bio->bi_private;
8 bio_put(bio); 12 bio_put(bio);
@@ -11,21 +15,38 @@ static void complete_swap_io(struct bio *bio) {
11 15
12// Queue a command to copy out an SGT to disk 16// Queue a command to copy out an SGT to disk
13// TODO: Cache bdev 17// TODO: Cache bdev
14// TODO: Don't hardcode sector 0 18// TODO: Track, allocate, and recycle individual swap buffers on disk instead
15// TODO: Figure out if submit_bio() can fail, and what to do then 19// of only supporting a global reset
16int copy(struct sg_table *sgt, int op, struct nvgpu_mapped_buf *m) { 20int copy(struct sg_table *sgt, int op, struct nvgpu_mapped_buf *m) {
17 unsigned int i; 21 unsigned int i;
18 struct scatterlist *sg; 22 struct scatterlist *sg;
19 struct bio *bio; 23 struct bio *bio;
20 int err = 0; 24 int err = 0;
21 int sg_cnt = sgt->nents; 25 int sg_cnt = sgt->nents;
22 sector_t sector = 0; // XXX: For testing 26 sector_t sector = m->os_priv.swap_sector;
23 // Find and open the block device 27 // Find and open the block device
24 struct block_device *bdev = blkdev_get_by_path("/dev/nvme0n1", FMODE_READ | FMODE_WRITE, copy); 28 struct block_device *bdev = blkdev_get_by_path("/dev/nvme0n1", FMODE_READ | FMODE_WRITE, copy);
25 if (unlikely(IS_ERR(bdev))) { 29 if (unlikely(IS_ERR(bdev))) {
26 printk(KERN_WARNING "Unabled to find `nvme0`, err %ld!\n", PTR_ERR(bdev)); 30 printk(KERN_WARNING "Unabled to find `nvme0`, err %ld!\n", PTR_ERR(bdev));
27 return -ENODEV; 31 return -ENODEV;
28 } 32 }
33 // Assign a sector on-disk (0 indicates unassigned, we start at 4)
34 if (sector == 0) {
35 // Read block device size in sectors, and fail if we'd use more than 1/3rd
36 // of the disk (to stay in SLC-emulation-mode).
37 // TODO: Issue NVMe DSM commands to try to manage this better? Read-only
38 // regions should be able to be moved to TLC safely, whereas other
39 // data should be kept in the SLC cache to reduce wear.
40 if (atomic64_read(&nvgpu_swap_next_sector) >= i_size_read(bdev->bd_inode)/3) {
41 err = -ENOMEM;
42 goto out_put;
43 }
44 // Hand out sectors sequentially, and statically
45 // TODO: Intelligent sector allocation
46 sector = atomic64_add_return(m->size >> SECTOR_SHIFT, &nvgpu_swap_next_sector);
47 sector -= (m->size >> SECTOR_SHIFT);
48 m->os_priv.swap_sector = sector;
49 }
29 // Reset the .done variable in the completion 50 // Reset the .done variable in the completion
30 reinit_completion(&m->os_priv.swap_io_done); 51 reinit_completion(&m->os_priv.swap_io_done);
31 // bio_alloc() will never fail when allocating <= BIO_MAX_PAGES 52 // bio_alloc() will never fail when allocating <= BIO_MAX_PAGES
@@ -58,8 +79,10 @@ int copy(struct sg_table *sgt, int op, struct nvgpu_mapped_buf *m) {
58 } 79 }
59 80
60 // Async submit. Caller should wait_for_completion_io(&m->os_priv.swap_io_done); 81 // Async submit. Caller should wait_for_completion_io(&m->os_priv.swap_io_done);
82 // Does not fail. Error reported via completion handler.
61 submit_bio(bio); 83 submit_bio(bio);
62 84
85out_put:
63 // Release our block device handle 86 // Release our block device handle
64 blkdev_put(bdev, FMODE_WRITE | FMODE_READ); // Is this safe? 87 blkdev_put(bdev, FMODE_WRITE | FMODE_READ); // Is this safe?
65 return err; 88 return err;