aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dax/dax-private.h2
-rw-r--r--drivers/dax/kmem.c46
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c2
-rw-r--r--drivers/pps/pps.c8
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c54
-rw-r--r--drivers/vfio/vfio_iommu_type1.c17
8 files changed, 65 insertions, 119 deletions
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index c915889d1769..6ccca3b890d6 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -43,6 +43,7 @@ struct dax_region {
43 * @target_node: effective numa node if dev_dax memory range is onlined 43 * @target_node: effective numa node if dev_dax memory range is onlined
44 * @dev - device core 44 * @dev - device core
45 * @pgmap - pgmap for memmap setup / lifetime (driver owned) 45 * @pgmap - pgmap for memmap setup / lifetime (driver owned)
46 * @dax_mem_res: physical address range of hotadded DAX memory
46 */ 47 */
47struct dev_dax { 48struct dev_dax {
48 struct dax_region *region; 49 struct dax_region *region;
@@ -50,6 +51,7 @@ struct dev_dax {
50 int target_node; 51 int target_node;
51 struct device dev; 52 struct device dev;
52 struct dev_pagemap pgmap; 53 struct dev_pagemap pgmap;
54 struct resource *dax_kmem_res;
53}; 55};
54 56
55static inline struct dev_dax *to_dev_dax(struct device *dev) 57static inline struct dev_dax *to_dev_dax(struct device *dev)
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index a02318c6d28a..3d0a7e702c94 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -66,23 +66,59 @@ int dev_dax_kmem_probe(struct device *dev)
66 new_res->name = dev_name(dev); 66 new_res->name = dev_name(dev);
67 67
68 rc = add_memory(numa_node, new_res->start, resource_size(new_res)); 68 rc = add_memory(numa_node, new_res->start, resource_size(new_res));
69 if (rc) 69 if (rc) {
70 release_resource(new_res);
71 kfree(new_res);
70 return rc; 72 return rc;
73 }
74 dev_dax->dax_kmem_res = new_res;
71 75
72 return 0; 76 return 0;
73} 77}
74 78
79#ifdef CONFIG_MEMORY_HOTREMOVE
80static int dev_dax_kmem_remove(struct device *dev)
81{
82 struct dev_dax *dev_dax = to_dev_dax(dev);
83 struct resource *res = dev_dax->dax_kmem_res;
84 resource_size_t kmem_start = res->start;
85 resource_size_t kmem_size = resource_size(res);
86 int rc;
87
88 /*
89 * We have one shot for removing memory, if some memory blocks were not
90 * offline prior to calling this function remove_memory() will fail, and
91 * there is no way to hotremove this memory until reboot because device
92 * unbind will succeed even if we return failure.
93 */
94 rc = remove_memory(dev_dax->target_node, kmem_start, kmem_size);
95 if (rc) {
96 dev_err(dev,
97 "DAX region %pR cannot be hotremoved until the next reboot\n",
98 res);
99 return rc;
100 }
101
102 /* Release and free dax resources */
103 release_resource(res);
104 kfree(res);
105 dev_dax->dax_kmem_res = NULL;
106
107 return 0;
108}
109#else
75static int dev_dax_kmem_remove(struct device *dev) 110static int dev_dax_kmem_remove(struct device *dev)
76{ 111{
77 /* 112 /*
78 * Purposely leak the request_mem_region() for the device-dax 113 * Without hotremove purposely leak the request_mem_region() for the
79 * range and return '0' to ->remove() attempts. The removal of 114 * device-dax range and return '0' to ->remove() attempts. The removal
80 * the device from the driver always succeeds, but the region 115 * of the device from the driver always succeeds, but the region is
81 * is permanently pinned as reserved by the unreleased 116 * permanently pinned as reserved by the unreleased
82 * request_mem_region(). 117 * request_mem_region().
83 */ 118 */
84 return 0; 119 return 0;
85} 120}
121#endif /* CONFIG_MEMORY_HOTREMOVE */
86 122
87static struct dax_device_driver device_dax_kmem_driver = { 123static struct dax_device_driver device_dax_kmem_driver = {
88 .drv = { 124 .drv = {
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index dcd80b088c7b..62f924489db5 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -12,6 +12,7 @@
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/sched/signal.h> 13#include <linux/sched/signal.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/mm.h>
15 16
16#include "dfl-afu.h" 17#include "dfl-afu.h"
17 18
@@ -32,52 +33,6 @@ void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
32} 33}
33 34
34/** 35/**
35 * afu_dma_adjust_locked_vm - adjust locked memory
36 * @dev: port device
37 * @npages: number of pages
38 * @incr: increase or decrease locked memory
39 *
40 * Increase or decrease the locked memory size with npages input.
41 *
42 * Return 0 on success.
43 * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK.
44 */
45static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr)
46{
47 unsigned long locked, lock_limit;
48 int ret = 0;
49
50 /* the task is exiting. */
51 if (!current->mm)
52 return 0;
53
54 down_write(&current->mm->mmap_sem);
55
56 if (incr) {
57 locked = current->mm->locked_vm + npages;
58 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
59
60 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
61 ret = -ENOMEM;
62 else
63 current->mm->locked_vm += npages;
64 } else {
65 if (WARN_ON_ONCE(npages > current->mm->locked_vm))
66 npages = current->mm->locked_vm;
67 current->mm->locked_vm -= npages;
68 }
69
70 dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid,
71 incr ? '+' : '-', npages << PAGE_SHIFT,
72 current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK),
73 ret ? "- exceeded" : "");
74
75 up_write(&current->mm->mmap_sem);
76
77 return ret;
78}
79
80/**
81 * afu_dma_pin_pages - pin pages of given dma memory region 36 * afu_dma_pin_pages - pin pages of given dma memory region
82 * @pdata: feature device platform data 37 * @pdata: feature device platform data
83 * @region: dma memory region to be pinned 38 * @region: dma memory region to be pinned
@@ -92,7 +47,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
92 struct device *dev = &pdata->dev->dev; 47 struct device *dev = &pdata->dev->dev;
93 int ret, pinned; 48 int ret, pinned;
94 49
95 ret = afu_dma_adjust_locked_vm(dev, npages, true); 50 ret = account_locked_vm(current->mm, npages, true);
96 if (ret) 51 if (ret)
97 return ret; 52 return ret;
98 53
@@ -121,7 +76,7 @@ put_pages:
121free_pages: 76free_pages:
122 kfree(region->pages); 77 kfree(region->pages);
123unlock_vm: 78unlock_vm:
124 afu_dma_adjust_locked_vm(dev, npages, false); 79 account_locked_vm(current->mm, npages, false);
125 return ret; 80 return ret;
126} 81}
127 82
@@ -141,7 +96,7 @@ static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
141 96
142 put_all_pages(region->pages, npages); 97 put_all_pages(region->pages, npages);
143 kfree(region->pages); 98 kfree(region->pages);
144 afu_dma_adjust_locked_vm(dev, npages, false); 99 account_locked_vm(current->mm, npages, false);
145 100
146 dev_dbg(dev, "%ld pages unpinned\n", npages); 101 dev_dbg(dev, "%ld pages unpinned\n", npages);
147} 102}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 5e4f3a8c5784..e4332d5a5757 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -53,7 +53,7 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
53 pad = round_up(skb->len, 4) + 4 - skb->len; 53 pad = round_up(skb->len, 4) + 4 - skb->len;
54 54
55 /* First packet of a A-MSDU burst keeps track of the whole burst 55 /* First packet of a A-MSDU burst keeps track of the whole burst
56 * length, need to update lenght of it and the last packet. 56 * length, need to update length of it and the last packet.
57 */ 57 */
58 skb_walk_frags(skb, iter) { 58 skb_walk_frags(skb, iter) {
59 last = iter; 59 last = iter;
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 3a546ec10d90..22a65ad4e46e 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -152,6 +152,14 @@ static long pps_cdev_ioctl(struct file *file,
152 pps->params.mode |= PPS_CANWAIT; 152 pps->params.mode |= PPS_CANWAIT;
153 pps->params.api_version = PPS_API_VERS; 153 pps->params.api_version = PPS_API_VERS;
154 154
155 /*
156 * Clear unused fields of pps_kparams to avoid leaking
157 * uninitialized data of the PPS_SETPARAMS caller via
158 * PPS_GETPARAMS
159 */
160 pps->params.assert_off_tu.flags = 0;
161 pps->params.clear_off_tu.flags = 0;
162
155 spin_unlock_irq(&pps->lock); 163 spin_unlock_irq(&pps->lock);
156 164
157 break; 165 break;
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index ce7a90e68042..8155f59ece38 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1686,6 +1686,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1686 1686
1687 if (copy_from_user(&dev_info, arg, sizeof(dev_info))) 1687 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1688 return -EFAULT; 1688 return -EFAULT;
1689 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1689 1690
1690 rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, 1691 rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1691 dev_info.comptag, dev_info.destid, dev_info.hopcount); 1692 dev_info.comptag, dev_info.destid, dev_info.hopcount);
@@ -1817,6 +1818,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1817 1818
1818 if (copy_from_user(&dev_info, arg, sizeof(dev_info))) 1819 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1819 return -EFAULT; 1820 return -EFAULT;
1821 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1820 1822
1821 mport = priv->md->mport; 1823 mport = priv->md->mport;
1822 1824
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 7048c9198c21..8ce9ad21129f 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -19,6 +19,7 @@
19#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
20#include <linux/sched/mm.h> 20#include <linux/sched/mm.h>
21#include <linux/sched/signal.h> 21#include <linux/sched/signal.h>
22#include <linux/mm.h>
22 23
23#include <asm/iommu.h> 24#include <asm/iommu.h>
24#include <asm/tce.h> 25#include <asm/tce.h>
@@ -31,51 +32,6 @@
31static void tce_iommu_detach_group(void *iommu_data, 32static void tce_iommu_detach_group(void *iommu_data,
32 struct iommu_group *iommu_group); 33 struct iommu_group *iommu_group);
33 34
34static long try_increment_locked_vm(struct mm_struct *mm, long npages)
35{
36 long ret = 0, locked, lock_limit;
37
38 if (WARN_ON_ONCE(!mm))
39 return -EPERM;
40
41 if (!npages)
42 return 0;
43
44 down_write(&mm->mmap_sem);
45 locked = mm->locked_vm + npages;
46 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
48 ret = -ENOMEM;
49 else
50 mm->locked_vm += npages;
51
52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
53 npages << PAGE_SHIFT,
54 mm->locked_vm << PAGE_SHIFT,
55 rlimit(RLIMIT_MEMLOCK),
56 ret ? " - exceeded" : "");
57
58 up_write(&mm->mmap_sem);
59
60 return ret;
61}
62
63static void decrement_locked_vm(struct mm_struct *mm, long npages)
64{
65 if (!mm || !npages)
66 return;
67
68 down_write(&mm->mmap_sem);
69 if (WARN_ON_ONCE(npages > mm->locked_vm))
70 npages = mm->locked_vm;
71 mm->locked_vm -= npages;
72 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
73 npages << PAGE_SHIFT,
74 mm->locked_vm << PAGE_SHIFT,
75 rlimit(RLIMIT_MEMLOCK));
76 up_write(&mm->mmap_sem);
77}
78
79/* 35/*
80 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation 36 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
81 * 37 *
@@ -333,7 +289,7 @@ static int tce_iommu_enable(struct tce_container *container)
333 return ret; 289 return ret;
334 290
335 locked = table_group->tce32_size >> PAGE_SHIFT; 291 locked = table_group->tce32_size >> PAGE_SHIFT;
336 ret = try_increment_locked_vm(container->mm, locked); 292 ret = account_locked_vm(container->mm, locked, true);
337 if (ret) 293 if (ret)
338 return ret; 294 return ret;
339 295
@@ -352,7 +308,7 @@ static void tce_iommu_disable(struct tce_container *container)
352 container->enabled = false; 308 container->enabled = false;
353 309
354 BUG_ON(!container->mm); 310 BUG_ON(!container->mm);
355 decrement_locked_vm(container->mm, container->locked_pages); 311 account_locked_vm(container->mm, container->locked_pages, false);
356} 312}
357 313
358static void *tce_iommu_open(unsigned long arg) 314static void *tce_iommu_open(unsigned long arg)
@@ -656,7 +612,7 @@ static long tce_iommu_create_table(struct tce_container *container,
656 if (!table_size) 612 if (!table_size)
657 return -EINVAL; 613 return -EINVAL;
658 614
659 ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT); 615 ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true);
660 if (ret) 616 if (ret)
661 return ret; 617 return ret;
662 618
@@ -675,7 +631,7 @@ static void tce_iommu_free_table(struct tce_container *container,
675 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; 631 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
676 632
677 iommu_tce_table_put(tbl); 633 iommu_tce_table_put(tbl);
678 decrement_locked_vm(container->mm, pages); 634 account_locked_vm(container->mm, pages, false);
679} 635}
680 636
681static long tce_iommu_create_window(struct tce_container *container, 637static long tce_iommu_create_window(struct tce_container *container,
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index add34adfadc7..054391f30fa8 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -272,21 +272,8 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
272 272
273 ret = down_write_killable(&mm->mmap_sem); 273 ret = down_write_killable(&mm->mmap_sem);
274 if (!ret) { 274 if (!ret) {
275 if (npage > 0) { 275 ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
276 if (!dma->lock_cap) { 276 dma->lock_cap);
277 unsigned long limit;
278
279 limit = task_rlimit(dma->task,
280 RLIMIT_MEMLOCK) >> PAGE_SHIFT;
281
282 if (mm->locked_vm + npage > limit)
283 ret = -ENOMEM;
284 }
285 }
286
287 if (!ret)
288 mm->locked_vm += npage;
289
290 up_write(&mm->mmap_sem); 277 up_write(&mm->mmap_sem);
291 } 278 }
292 279