summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c115
-rw-r--r--fs/proc/base.c3
-rw-r--r--include/linux/page-flags.h22
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/uapi/linux/rio_mport_cdev.h (renamed from include/linux/rio_mport_cdev.h)144
-rw-r--r--include/uapi/linux/swab.h24
-rw-r--r--lib/stackdepot.c6
-rw-r--r--mm/compaction.c14
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/memory.c11
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/zswap.c8
-rw-r--r--scripts/mod/file2alias.c69
16 files changed, 252 insertions, 182 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 73141b4f46a9..23aeb66f3007 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7978,7 +7978,7 @@ F: arch/arm/*omap*/*pm*
7978F: drivers/cpufreq/omap-cpufreq.c 7978F: drivers/cpufreq/omap-cpufreq.c
7979 7979
7980OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT 7980OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
7981M: Rajendra Nayak <rnayak@ti.com> 7981M: Rajendra Nayak <rnayak@codeaurora.org>
7982M: Paul Walmsley <paul@pwsan.com> 7982M: Paul Walmsley <paul@pwsan.com>
7983L: linux-omap@vger.kernel.org 7983L: linux-omap@vger.kernel.org
7984S: Maintained 7984S: Maintained
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 58dbd5c439df..d6d4191e68f2 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1004,7 +1004,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
1004 kvm_pfn_t pfn = *pfnp; 1004 kvm_pfn_t pfn = *pfnp;
1005 gfn_t gfn = *ipap >> PAGE_SHIFT; 1005 gfn_t gfn = *ipap >> PAGE_SHIFT;
1006 1006
1007 if (PageTransCompound(pfn_to_page(pfn))) { 1007 if (PageTransCompoundMap(pfn_to_page(pfn))) {
1008 unsigned long mask; 1008 unsigned long mask;
1009 /* 1009 /*
1010 * The address we faulted on is backed by a transparent huge 1010 * The address we faulted on is backed by a transparent huge
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1ff4dbb73fb7..b6f50e8b0a39 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2823,7 +2823,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2823 */ 2823 */
2824 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && 2824 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
2825 level == PT_PAGE_TABLE_LEVEL && 2825 level == PT_PAGE_TABLE_LEVEL &&
2826 PageTransCompound(pfn_to_page(pfn)) && 2826 PageTransCompoundMap(pfn_to_page(pfn)) &&
2827 !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { 2827 !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
2828 unsigned long mask; 2828 unsigned long mask;
2829 /* 2829 /*
@@ -4785,7 +4785,7 @@ restart:
4785 */ 4785 */
4786 if (sp->role.direct && 4786 if (sp->role.direct &&
4787 !kvm_is_reserved_pfn(pfn) && 4787 !kvm_is_reserved_pfn(pfn) &&
4788 PageTransCompound(pfn_to_page(pfn))) { 4788 PageTransCompoundMap(pfn_to_page(pfn))) {
4789 drop_spte(kvm, sptep); 4789 drop_spte(kvm, sptep);
4790 need_tlb_flush = 1; 4790 need_tlb_flush = 1;
4791 goto restart; 4791 goto restart;
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 96168b819044..e165b7ce29d7 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -126,7 +126,7 @@ struct rio_mport_mapping {
126 struct list_head node; 126 struct list_head node;
127 struct mport_dev *md; 127 struct mport_dev *md;
128 enum rio_mport_map_dir dir; 128 enum rio_mport_map_dir dir;
129 u32 rioid; 129 u16 rioid;
130 u64 rio_addr; 130 u64 rio_addr;
131 dma_addr_t phys_addr; /* for mmap */ 131 dma_addr_t phys_addr; /* for mmap */
132 void *virt_addr; /* kernel address, for dma_free_coherent */ 132 void *virt_addr; /* kernel address, for dma_free_coherent */
@@ -137,7 +137,7 @@ struct rio_mport_mapping {
137 137
138struct rio_mport_dma_map { 138struct rio_mport_dma_map {
139 int valid; 139 int valid;
140 uint64_t length; 140 u64 length;
141 void *vaddr; 141 void *vaddr;
142 dma_addr_t paddr; 142 dma_addr_t paddr;
143}; 143};
@@ -208,7 +208,7 @@ struct mport_cdev_priv {
208 struct kfifo event_fifo; 208 struct kfifo event_fifo;
209 wait_queue_head_t event_rx_wait; 209 wait_queue_head_t event_rx_wait;
210 spinlock_t fifo_lock; 210 spinlock_t fifo_lock;
211 unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ 211 u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
212#ifdef CONFIG_RAPIDIO_DMA_ENGINE 212#ifdef CONFIG_RAPIDIO_DMA_ENGINE
213 struct dma_chan *dmach; 213 struct dma_chan *dmach;
214 struct list_head async_list; 214 struct list_head async_list;
@@ -276,7 +276,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
276 return -EFAULT; 276 return -EFAULT;
277 277
278 if ((maint_io.offset % 4) || 278 if ((maint_io.offset % 4) ||
279 (maint_io.length == 0) || (maint_io.length % 4)) 279 (maint_io.length == 0) || (maint_io.length % 4) ||
280 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
280 return -EINVAL; 281 return -EINVAL;
281 282
282 buffer = vmalloc(maint_io.length); 283 buffer = vmalloc(maint_io.length);
@@ -298,7 +299,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
298 offset += 4; 299 offset += 4;
299 } 300 }
300 301
301 if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length))) 302 if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
303 buffer, maint_io.length)))
302 ret = -EFAULT; 304 ret = -EFAULT;
303out: 305out:
304 vfree(buffer); 306 vfree(buffer);
@@ -319,7 +321,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
319 return -EFAULT; 321 return -EFAULT;
320 322
321 if ((maint_io.offset % 4) || 323 if ((maint_io.offset % 4) ||
322 (maint_io.length == 0) || (maint_io.length % 4)) 324 (maint_io.length == 0) || (maint_io.length % 4) ||
325 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
323 return -EINVAL; 326 return -EINVAL;
324 327
325 buffer = vmalloc(maint_io.length); 328 buffer = vmalloc(maint_io.length);
@@ -327,7 +330,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
327 return -ENOMEM; 330 return -ENOMEM;
328 length = maint_io.length; 331 length = maint_io.length;
329 332
330 if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) { 333 if (unlikely(copy_from_user(buffer,
334 (void __user *)(uintptr_t)maint_io.buffer, length))) {
331 ret = -EFAULT; 335 ret = -EFAULT;
332 goto out; 336 goto out;
333 } 337 }
@@ -360,7 +364,7 @@ out:
360 */ 364 */
361static int 365static int
362rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, 366rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
363 u32 rioid, u64 raddr, u32 size, 367 u16 rioid, u64 raddr, u32 size,
364 dma_addr_t *paddr) 368 dma_addr_t *paddr)
365{ 369{
366 struct rio_mport *mport = md->mport; 370 struct rio_mport *mport = md->mport;
@@ -369,7 +373,7 @@ rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
369 373
370 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); 374 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
371 375
372 map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 376 map = kzalloc(sizeof(*map), GFP_KERNEL);
373 if (map == NULL) 377 if (map == NULL)
374 return -ENOMEM; 378 return -ENOMEM;
375 379
@@ -394,7 +398,7 @@ err_map_outb:
394 398
395static int 399static int
396rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, 400rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
397 u32 rioid, u64 raddr, u32 size, 401 u16 rioid, u64 raddr, u32 size,
398 dma_addr_t *paddr) 402 dma_addr_t *paddr)
399{ 403{
400 struct rio_mport_mapping *map; 404 struct rio_mport_mapping *map;
@@ -433,7 +437,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
433 dma_addr_t paddr; 437 dma_addr_t paddr;
434 int ret; 438 int ret;
435 439
436 if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) 440 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
437 return -EFAULT; 441 return -EFAULT;
438 442
439 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", 443 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
@@ -448,7 +452,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
448 452
449 map.handle = paddr; 453 map.handle = paddr;
450 454
451 if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) 455 if (unlikely(copy_to_user(arg, &map, sizeof(map))))
452 return -EFAULT; 456 return -EFAULT;
453 return 0; 457 return 0;
454} 458}
@@ -469,7 +473,7 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
469 if (!md->mport->ops->unmap_outb) 473 if (!md->mport->ops->unmap_outb)
470 return -EPROTONOSUPPORT; 474 return -EPROTONOSUPPORT;
471 475
472 if (copy_from_user(&handle, arg, sizeof(u64))) 476 if (copy_from_user(&handle, arg, sizeof(handle)))
473 return -EFAULT; 477 return -EFAULT;
474 478
475 rmcd_debug(OBW, "h=0x%llx", handle); 479 rmcd_debug(OBW, "h=0x%llx", handle);
@@ -498,9 +502,9 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
498static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) 502static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
499{ 503{
500 struct mport_dev *md = priv->md; 504 struct mport_dev *md = priv->md;
501 uint16_t hdid; 505 u16 hdid;
502 506
503 if (copy_from_user(&hdid, arg, sizeof(uint16_t))) 507 if (copy_from_user(&hdid, arg, sizeof(hdid)))
504 return -EFAULT; 508 return -EFAULT;
505 509
506 md->mport->host_deviceid = hdid; 510 md->mport->host_deviceid = hdid;
@@ -520,9 +524,9 @@ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
520static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) 524static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
521{ 525{
522 struct mport_dev *md = priv->md; 526 struct mport_dev *md = priv->md;
523 uint32_t comptag; 527 u32 comptag;
524 528
525 if (copy_from_user(&comptag, arg, sizeof(uint32_t))) 529 if (copy_from_user(&comptag, arg, sizeof(comptag)))
526 return -EFAULT; 530 return -EFAULT;
527 531
528 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); 532 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
@@ -837,7 +841,7 @@ err_out:
837 * @xfer: data transfer descriptor structure 841 * @xfer: data transfer descriptor structure
838 */ 842 */
839static int 843static int
840rio_dma_transfer(struct file *filp, uint32_t transfer_mode, 844rio_dma_transfer(struct file *filp, u32 transfer_mode,
841 enum rio_transfer_sync sync, enum dma_data_direction dir, 845 enum rio_transfer_sync sync, enum dma_data_direction dir,
842 struct rio_transfer_io *xfer) 846 struct rio_transfer_io *xfer)
843{ 847{
@@ -875,7 +879,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
875 unsigned long offset; 879 unsigned long offset;
876 long pinned; 880 long pinned;
877 881
878 offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK; 882 offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
879 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; 883 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
880 884
881 page_list = kmalloc_array(nr_pages, 885 page_list = kmalloc_array(nr_pages,
@@ -1015,19 +1019,20 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
1015 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) 1019 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
1016 return -EFAULT; 1020 return -EFAULT;
1017 1021
1018 if (transaction.count != 1) 1022 if (transaction.count != 1) /* only single transfer for now */
1019 return -EINVAL; 1023 return -EINVAL;
1020 1024
1021 if ((transaction.transfer_mode & 1025 if ((transaction.transfer_mode &
1022 priv->md->properties.transfer_mode) == 0) 1026 priv->md->properties.transfer_mode) == 0)
1023 return -ENODEV; 1027 return -ENODEV;
1024 1028
1025 transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io)); 1029 transfer = vmalloc(transaction.count * sizeof(*transfer));
1026 if (!transfer) 1030 if (!transfer)
1027 return -ENOMEM; 1031 return -ENOMEM;
1028 1032
1029 if (unlikely(copy_from_user(transfer, transaction.block, 1033 if (unlikely(copy_from_user(transfer,
1030 transaction.count * sizeof(struct rio_transfer_io)))) { 1034 (void __user *)(uintptr_t)transaction.block,
1035 transaction.count * sizeof(*transfer)))) {
1031 ret = -EFAULT; 1036 ret = -EFAULT;
1032 goto out_free; 1037 goto out_free;
1033 } 1038 }
@@ -1038,8 +1043,9 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
1038 ret = rio_dma_transfer(filp, transaction.transfer_mode, 1043 ret = rio_dma_transfer(filp, transaction.transfer_mode,
1039 transaction.sync, dir, &transfer[i]); 1044 transaction.sync, dir, &transfer[i]);
1040 1045
1041 if (unlikely(copy_to_user(transaction.block, transfer, 1046 if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
1042 transaction.count * sizeof(struct rio_transfer_io)))) 1047 transfer,
1048 transaction.count * sizeof(*transfer))))
1043 ret = -EFAULT; 1049 ret = -EFAULT;
1044 1050
1045out_free: 1051out_free:
@@ -1129,11 +1135,11 @@ err_tmo:
1129} 1135}
1130 1136
1131static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, 1137static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
1132 uint64_t size, struct rio_mport_mapping **mapping) 1138 u64 size, struct rio_mport_mapping **mapping)
1133{ 1139{
1134 struct rio_mport_mapping *map; 1140 struct rio_mport_mapping *map;
1135 1141
1136 map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 1142 map = kzalloc(sizeof(*map), GFP_KERNEL);
1137 if (map == NULL) 1143 if (map == NULL)
1138 return -ENOMEM; 1144 return -ENOMEM;
1139 1145
@@ -1165,7 +1171,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1165 struct rio_mport_mapping *mapping = NULL; 1171 struct rio_mport_mapping *mapping = NULL;
1166 int ret; 1172 int ret;
1167 1173
1168 if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem)))) 1174 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1169 return -EFAULT; 1175 return -EFAULT;
1170 1176
1171 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); 1177 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
@@ -1174,7 +1180,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1174 1180
1175 map.dma_handle = mapping->phys_addr; 1181 map.dma_handle = mapping->phys_addr;
1176 1182
1177 if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) { 1183 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1178 mutex_lock(&md->buf_mutex); 1184 mutex_lock(&md->buf_mutex);
1179 kref_put(&mapping->ref, mport_release_mapping); 1185 kref_put(&mapping->ref, mport_release_mapping);
1180 mutex_unlock(&md->buf_mutex); 1186 mutex_unlock(&md->buf_mutex);
@@ -1192,7 +1198,7 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
1192 int ret = -EFAULT; 1198 int ret = -EFAULT;
1193 struct rio_mport_mapping *map, *_map; 1199 struct rio_mport_mapping *map, *_map;
1194 1200
1195 if (copy_from_user(&handle, arg, sizeof(u64))) 1201 if (copy_from_user(&handle, arg, sizeof(handle)))
1196 return -EFAULT; 1202 return -EFAULT;
1197 rmcd_debug(EXIT, "filp=%p", filp); 1203 rmcd_debug(EXIT, "filp=%p", filp);
1198 1204
@@ -1242,14 +1248,18 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
1242 1248
1243static int 1249static int
1244rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, 1250rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1245 u64 raddr, u32 size, 1251 u64 raddr, u64 size,
1246 struct rio_mport_mapping **mapping) 1252 struct rio_mport_mapping **mapping)
1247{ 1253{
1248 struct rio_mport *mport = md->mport; 1254 struct rio_mport *mport = md->mport;
1249 struct rio_mport_mapping *map; 1255 struct rio_mport_mapping *map;
1250 int ret; 1256 int ret;
1251 1257
1252 map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 1258 /* rio_map_inb_region() accepts u32 size */
1259 if (size > 0xffffffff)
1260 return -EINVAL;
1261
1262 map = kzalloc(sizeof(*map), GFP_KERNEL);
1253 if (map == NULL) 1263 if (map == NULL)
1254 return -ENOMEM; 1264 return -ENOMEM;
1255 1265
@@ -1262,7 +1272,7 @@ rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1262 1272
1263 if (raddr == RIO_MAP_ANY_ADDR) 1273 if (raddr == RIO_MAP_ANY_ADDR)
1264 raddr = map->phys_addr; 1274 raddr = map->phys_addr;
1265 ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0); 1275 ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1266 if (ret < 0) 1276 if (ret < 0)
1267 goto err_map_inb; 1277 goto err_map_inb;
1268 1278
@@ -1288,7 +1298,7 @@ err_dma_alloc:
1288 1298
1289static int 1299static int
1290rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, 1300rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
1291 u64 raddr, u32 size, 1301 u64 raddr, u64 size,
1292 struct rio_mport_mapping **mapping) 1302 struct rio_mport_mapping **mapping)
1293{ 1303{
1294 struct rio_mport_mapping *map; 1304 struct rio_mport_mapping *map;
@@ -1331,7 +1341,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1331 1341
1332 if (!md->mport->ops->map_inb) 1342 if (!md->mport->ops->map_inb)
1333 return -EPROTONOSUPPORT; 1343 return -EPROTONOSUPPORT;
1334 if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) 1344 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1335 return -EFAULT; 1345 return -EFAULT;
1336 1346
1337 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); 1347 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
@@ -1344,7 +1354,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1344 map.handle = mapping->phys_addr; 1354 map.handle = mapping->phys_addr;
1345 map.rio_addr = mapping->rio_addr; 1355 map.rio_addr = mapping->rio_addr;
1346 1356
1347 if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) { 1357 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1348 /* Delete mapping if it was created by this request */ 1358 /* Delete mapping if it was created by this request */
1349 if (ret == 0 && mapping->filp == filp) { 1359 if (ret == 0 && mapping->filp == filp) {
1350 mutex_lock(&md->buf_mutex); 1360 mutex_lock(&md->buf_mutex);
@@ -1375,7 +1385,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1375 if (!md->mport->ops->unmap_inb) 1385 if (!md->mport->ops->unmap_inb)
1376 return -EPROTONOSUPPORT; 1386 return -EPROTONOSUPPORT;
1377 1387
1378 if (copy_from_user(&handle, arg, sizeof(u64))) 1388 if (copy_from_user(&handle, arg, sizeof(handle)))
1379 return -EFAULT; 1389 return -EFAULT;
1380 1390
1381 mutex_lock(&md->buf_mutex); 1391 mutex_lock(&md->buf_mutex);
@@ -1401,7 +1411,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1401static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) 1411static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1402{ 1412{
1403 struct mport_dev *md = priv->md; 1413 struct mport_dev *md = priv->md;
1404 uint32_t port_idx = md->mport->index; 1414 u32 port_idx = md->mport->index;
1405 1415
1406 rmcd_debug(MPORT, "port_index=%d", port_idx); 1416 rmcd_debug(MPORT, "port_index=%d", port_idx);
1407 1417
@@ -1451,7 +1461,7 @@ static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1451 handled = 0; 1461 handled = 0;
1452 spin_lock(&data->db_lock); 1462 spin_lock(&data->db_lock);
1453 list_for_each_entry(db_filter, &data->doorbells, data_node) { 1463 list_for_each_entry(db_filter, &data->doorbells, data_node) {
1454 if (((db_filter->filter.rioid == 0xffffffff || 1464 if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1455 db_filter->filter.rioid == src)) && 1465 db_filter->filter.rioid == src)) &&
1456 info >= db_filter->filter.low && 1466 info >= db_filter->filter.low &&
1457 info <= db_filter->filter.high) { 1467 info <= db_filter->filter.high) {
@@ -1525,6 +1535,9 @@ static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1525 if (copy_from_user(&filter, arg, sizeof(filter))) 1535 if (copy_from_user(&filter, arg, sizeof(filter)))
1526 return -EFAULT; 1536 return -EFAULT;
1527 1537
1538 if (filter.low > filter.high)
1539 return -EINVAL;
1540
1528 spin_lock_irqsave(&priv->md->db_lock, flags); 1541 spin_lock_irqsave(&priv->md->db_lock, flags);
1529 list_for_each_entry(db_filter, &priv->db_filters, priv_node) { 1542 list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1530 if (db_filter->filter.rioid == filter.rioid && 1543 if (db_filter->filter.rioid == filter.rioid &&
@@ -1737,10 +1750,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1737 return -EEXIST; 1750 return -EEXIST;
1738 } 1751 }
1739 1752
1740 size = sizeof(struct rio_dev); 1753 size = sizeof(*rdev);
1741 mport = md->mport; 1754 mport = md->mport;
1742 destid = (u16)dev_info.destid; 1755 destid = dev_info.destid;
1743 hopcount = (u8)dev_info.hopcount; 1756 hopcount = dev_info.hopcount;
1744 1757
1745 if (rio_mport_read_config_32(mport, destid, hopcount, 1758 if (rio_mport_read_config_32(mport, destid, hopcount,
1746 RIO_PEF_CAR, &rval)) 1759 RIO_PEF_CAR, &rval))
@@ -1872,8 +1885,8 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1872 do { 1885 do {
1873 rdev = rio_get_comptag(dev_info.comptag, rdev); 1886 rdev = rio_get_comptag(dev_info.comptag, rdev);
1874 if (rdev && rdev->dev.parent == &mport->net->dev && 1887 if (rdev && rdev->dev.parent == &mport->net->dev &&
1875 rdev->destid == (u16)dev_info.destid && 1888 rdev->destid == dev_info.destid &&
1876 rdev->hopcount == (u8)dev_info.hopcount) 1889 rdev->hopcount == dev_info.hopcount)
1877 break; 1890 break;
1878 } while (rdev); 1891 } while (rdev);
1879 } 1892 }
@@ -2146,8 +2159,8 @@ static long mport_cdev_ioctl(struct file *filp,
2146 return maint_port_idx_get(data, (void __user *)arg); 2159 return maint_port_idx_get(data, (void __user *)arg);
2147 case RIO_MPORT_GET_PROPERTIES: 2160 case RIO_MPORT_GET_PROPERTIES:
2148 md->properties.hdid = md->mport->host_deviceid; 2161 md->properties.hdid = md->mport->host_deviceid;
2149 if (copy_to_user((void __user *)arg, &(data->md->properties), 2162 if (copy_to_user((void __user *)arg, &(md->properties),
2150 sizeof(data->md->properties))) 2163 sizeof(md->properties)))
2151 return -EFAULT; 2164 return -EFAULT;
2152 return 0; 2165 return 0;
2153 case RIO_ENABLE_DOORBELL_RANGE: 2166 case RIO_ENABLE_DOORBELL_RANGE:
@@ -2159,11 +2172,11 @@ static long mport_cdev_ioctl(struct file *filp,
2159 case RIO_DISABLE_PORTWRITE_RANGE: 2172 case RIO_DISABLE_PORTWRITE_RANGE:
2160 return rio_mport_remove_pw_filter(data, (void __user *)arg); 2173 return rio_mport_remove_pw_filter(data, (void __user *)arg);
2161 case RIO_SET_EVENT_MASK: 2174 case RIO_SET_EVENT_MASK:
2162 data->event_mask = arg; 2175 data->event_mask = (u32)arg;
2163 return 0; 2176 return 0;
2164 case RIO_GET_EVENT_MASK: 2177 case RIO_GET_EVENT_MASK:
2165 if (copy_to_user((void __user *)arg, &data->event_mask, 2178 if (copy_to_user((void __user *)arg, &data->event_mask,
2166 sizeof(data->event_mask))) 2179 sizeof(u32)))
2167 return -EFAULT; 2180 return -EFAULT;
2168 return 0; 2181 return 0;
2169 case RIO_MAP_OUTBOUND: 2182 case RIO_MAP_OUTBOUND:
@@ -2374,7 +2387,7 @@ static ssize_t mport_write(struct file *filp, const char __user *buf,
2374 return -EINVAL; 2387 return -EINVAL;
2375 2388
2376 ret = rio_mport_send_doorbell(mport, 2389 ret = rio_mport_send_doorbell(mport,
2377 (u16)event.u.doorbell.rioid, 2390 event.u.doorbell.rioid,
2378 event.u.doorbell.payload); 2391 event.u.doorbell.payload);
2379 if (ret < 0) 2392 if (ret < 0)
2380 return ret; 2393 return ret;
@@ -2421,7 +2434,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2421 struct mport_dev *md; 2434 struct mport_dev *md;
2422 struct rio_mport_attr attr; 2435 struct rio_mport_attr attr;
2423 2436
2424 md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL); 2437 md = kzalloc(sizeof(*md), GFP_KERNEL);
2425 if (!md) { 2438 if (!md) {
2426 rmcd_error("Unable allocate a device object"); 2439 rmcd_error("Unable allocate a device object");
2427 return NULL; 2440 return NULL;
@@ -2470,7 +2483,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2470 /* The transfer_mode property will be returned through mport query 2483 /* The transfer_mode property will be returned through mport query
2471 * interface 2484 * interface
2472 */ 2485 */
2473#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */ 2486#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2474 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; 2487 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2475#else 2488#else
2476 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; 2489 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b1755b23893e..92e37e224cd2 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
955 struct mm_struct *mm = file->private_data; 955 struct mm_struct *mm = file->private_data;
956 unsigned long env_start, env_end; 956 unsigned long env_start, env_end;
957 957
958 if (!mm) 958 /* Ensure the process spawned far enough to have an environment. */
959 if (!mm || !mm->env_end)
959 return 0; 960 return 0;
960 961
961 page = (char *)__get_free_page(GFP_TEMPORARY); 962 page = (char *)__get_free_page(GFP_TEMPORARY);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f4ed4f1b0c77..6b052aa7b5b7 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -517,6 +517,27 @@ static inline int PageTransCompound(struct page *page)
517} 517}
518 518
519/* 519/*
520 * PageTransCompoundMap is the same as PageTransCompound, but it also
521 * guarantees the primary MMU has the entire compound page mapped
522 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
523 * can also map the entire compound page. This allows the secondary
524 * MMUs to call get_user_pages() only once for each compound page and
525 * to immediately map the entire compound page with a single secondary
526 * MMU fault. If there will be a pmd split later, the secondary MMUs
527 * will get an update through the MMU notifier invalidation through
528 * split_huge_pmd().
529 *
530 * Unlike PageTransCompound, this is safe to be called only while
531 * split_huge_pmd() cannot run from under us, like if protected by the
532 * MMU notifier, otherwise it may result in page->_mapcount < 0 false
533 * positives.
534 */
535static inline int PageTransCompoundMap(struct page *page)
536{
537 return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
538}
539
540/*
520 * PageTransTail returns true for both transparent huge pages 541 * PageTransTail returns true for both transparent huge pages
521 * and hugetlbfs pages, so it should only be called when it's known 542 * and hugetlbfs pages, so it should only be called when it's known
522 * that hugetlbfs pages aren't involved. 543 * that hugetlbfs pages aren't involved.
@@ -559,6 +580,7 @@ static inline int TestClearPageDoubleMap(struct page *page)
559#else 580#else
560TESTPAGEFLAG_FALSE(TransHuge) 581TESTPAGEFLAG_FALSE(TransHuge)
561TESTPAGEFLAG_FALSE(TransCompound) 582TESTPAGEFLAG_FALSE(TransCompound)
583TESTPAGEFLAG_FALSE(TransCompoundMap)
562TESTPAGEFLAG_FALSE(TransTail) 584TESTPAGEFLAG_FALSE(TransTail)
563TESTPAGEFLAG_FALSE(DoubleMap) 585TESTPAGEFLAG_FALSE(DoubleMap)
564 TESTSETFLAG_FALSE(DoubleMap) 586 TESTSETFLAG_FALSE(DoubleMap)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2b83359c19ca..0a4cd4703f40 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
533#ifdef CONFIG_MEMCG 533#ifdef CONFIG_MEMCG
534static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) 534static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
535{ 535{
536 /* Cgroup2 doesn't have per-cgroup swappiness */
537 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
538 return vm_swappiness;
539
536 /* root ? */ 540 /* root ? */
537 if (mem_cgroup_disabled() || !memcg->css.parent) 541 if (mem_cgroup_disabled() || !memcg->css.parent)
538 return vm_swappiness; 542 return vm_swappiness;
diff --git a/include/linux/rio_mport_cdev.h b/include/uapi/linux/rio_mport_cdev.h
index b65d19df76d2..5796bf1d06ad 100644
--- a/include/linux/rio_mport_cdev.h
+++ b/include/uapi/linux/rio_mport_cdev.h
@@ -39,16 +39,16 @@
39#ifndef _RIO_MPORT_CDEV_H_ 39#ifndef _RIO_MPORT_CDEV_H_
40#define _RIO_MPORT_CDEV_H_ 40#define _RIO_MPORT_CDEV_H_
41 41
42#ifndef __user 42#include <linux/ioctl.h>
43#define __user 43#include <linux/types.h>
44#endif
45 44
46struct rio_mport_maint_io { 45struct rio_mport_maint_io {
47 uint32_t rioid; /* destID of remote device */ 46 __u16 rioid; /* destID of remote device */
48 uint32_t hopcount; /* hopcount to remote device */ 47 __u8 hopcount; /* hopcount to remote device */
49 uint32_t offset; /* offset in register space */ 48 __u8 pad0[5];
50 size_t length; /* length in bytes */ 49 __u32 offset; /* offset in register space */
51 void __user *buffer; /* data buffer */ 50 __u32 length; /* length in bytes */
51 __u64 buffer; /* pointer to data buffer */
52}; 52};
53 53
54/* 54/*
@@ -66,22 +66,23 @@ struct rio_mport_maint_io {
66#define RIO_CAP_MAP_INB (1 << 7) 66#define RIO_CAP_MAP_INB (1 << 7)
67 67
68struct rio_mport_properties { 68struct rio_mport_properties {
69 uint16_t hdid; 69 __u16 hdid;
70 uint8_t id; /* Physical port ID */ 70 __u8 id; /* Physical port ID */
71 uint8_t index; 71 __u8 index;
72 uint32_t flags; 72 __u32 flags;
73 uint32_t sys_size; /* Default addressing size */ 73 __u32 sys_size; /* Default addressing size */
74 uint8_t port_ok; 74 __u8 port_ok;
75 uint8_t link_speed; 75 __u8 link_speed;
76 uint8_t link_width; 76 __u8 link_width;
77 uint32_t dma_max_sge; 77 __u8 pad0;
78 uint32_t dma_max_size; 78 __u32 dma_max_sge;
79 uint32_t dma_align; 79 __u32 dma_max_size;
80 uint32_t transfer_mode; /* Default transfer mode */ 80 __u32 dma_align;
81 uint32_t cap_sys_size; /* Capable system sizes */ 81 __u32 transfer_mode; /* Default transfer mode */
82 uint32_t cap_addr_size; /* Capable addressing sizes */ 82 __u32 cap_sys_size; /* Capable system sizes */
83 uint32_t cap_transfer_mode; /* Capable transfer modes */ 83 __u32 cap_addr_size; /* Capable addressing sizes */
84 uint32_t cap_mport; /* Mport capabilities */ 84 __u32 cap_transfer_mode; /* Capable transfer modes */
85 __u32 cap_mport; /* Mport capabilities */
85}; 86};
86 87
87/* 88/*
@@ -93,54 +94,57 @@ struct rio_mport_properties {
93#define RIO_PORTWRITE (1 << 1) 94#define RIO_PORTWRITE (1 << 1)
94 95
95struct rio_doorbell { 96struct rio_doorbell {
96 uint32_t rioid; 97 __u16 rioid;
97 uint16_t payload; 98 __u16 payload;
98}; 99};
99 100
100struct rio_doorbell_filter { 101struct rio_doorbell_filter {
101 uint32_t rioid; /* 0xffffffff to match all ids */ 102 __u16 rioid; /* Use RIO_INVALID_DESTID to match all ids */
102 uint16_t low; 103 __u16 low;
103 uint16_t high; 104 __u16 high;
105 __u16 pad0;
104}; 106};
105 107
106 108
107struct rio_portwrite { 109struct rio_portwrite {
108 uint32_t payload[16]; 110 __u32 payload[16];
109}; 111};
110 112
111struct rio_pw_filter { 113struct rio_pw_filter {
112 uint32_t mask; 114 __u32 mask;
113 uint32_t low; 115 __u32 low;
114 uint32_t high; 116 __u32 high;
117 __u32 pad0;
115}; 118};
116 119
117/* RapidIO base address for inbound requests set to value defined below 120/* RapidIO base address for inbound requests set to value defined below
118 * indicates that no specific RIO-to-local address translation is requested 121 * indicates that no specific RIO-to-local address translation is requested
119 * and driver should use direct (one-to-one) address mapping. 122 * and driver should use direct (one-to-one) address mapping.
120*/ 123*/
121#define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0)) 124#define RIO_MAP_ANY_ADDR (__u64)(~((__u64) 0))
122 125
123struct rio_mmap { 126struct rio_mmap {
124 uint32_t rioid; 127 __u16 rioid;
125 uint64_t rio_addr; 128 __u16 pad0[3];
126 uint64_t length; 129 __u64 rio_addr;
127 uint64_t handle; 130 __u64 length;
128 void *address; 131 __u64 handle;
132 __u64 address;
129}; 133};
130 134
131struct rio_dma_mem { 135struct rio_dma_mem {
132 uint64_t length; /* length of DMA memory */ 136 __u64 length; /* length of DMA memory */
133 uint64_t dma_handle; /* handle associated with this memory */ 137 __u64 dma_handle; /* handle associated with this memory */
134 void *buffer; /* pointer to this memory */ 138 __u64 address;
135}; 139};
136 140
137
138struct rio_event { 141struct rio_event {
139 unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ 142 __u32 header; /* event type RIO_DOORBELL or RIO_PORTWRITE */
140 union { 143 union {
141 struct rio_doorbell doorbell; /* header for RIO_DOORBELL */ 144 struct rio_doorbell doorbell; /* header for RIO_DOORBELL */
142 struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */ 145 struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
143 } u; 146 } u;
147 __u32 pad0;
144}; 148};
145 149
146enum rio_transfer_sync { 150enum rio_transfer_sync {
@@ -184,35 +188,37 @@ enum rio_exchange {
184}; 188};
185 189
186struct rio_transfer_io { 190struct rio_transfer_io {
187 uint32_t rioid; /* Target destID */ 191 __u64 rio_addr; /* Address in target's RIO mem space */
188 uint64_t rio_addr; /* Address in target's RIO mem space */ 192 __u64 loc_addr;
189 enum rio_exchange method; /* Data exchange method */ 193 __u64 handle;
190 void __user *loc_addr; 194 __u64 offset; /* Offset in buffer */
191 uint64_t handle; 195 __u64 length; /* Length in bytes */
192 uint64_t offset; /* Offset in buffer */ 196 __u16 rioid; /* Target destID */
193 uint64_t length; /* Length in bytes */ 197 __u16 method; /* Data exchange method, one of rio_exchange enum */
194 uint32_t completion_code; /* Completion code for this transfer */ 198 __u32 completion_code; /* Completion code for this transfer */
195}; 199};
196 200
197struct rio_transaction { 201struct rio_transaction {
198 uint32_t transfer_mode; /* Data transfer mode */ 202 __u64 block; /* Pointer to array of <count> transfers */
199 enum rio_transfer_sync sync; /* Synchronization method */ 203 __u32 count; /* Number of transfers */
200 enum rio_transfer_dir dir; /* Transfer direction */ 204 __u32 transfer_mode; /* Data transfer mode */
201 size_t count; /* Number of transfers */ 205 __u16 sync; /* Synch method, one of rio_transfer_sync enum */
202 struct rio_transfer_io __user *block; /* Array of <count> transfers */ 206 __u16 dir; /* Transfer direction, one of rio_transfer_dir enum */
207 __u32 pad0;
203}; 208};
204 209
205struct rio_async_tx_wait { 210struct rio_async_tx_wait {
206 uint32_t token; /* DMA transaction ID token */ 211 __u32 token; /* DMA transaction ID token */
207 uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */ 212 __u32 timeout; /* Wait timeout in msec, if 0 use default TO */
208}; 213};
209 214
210#define RIO_MAX_DEVNAME_SZ 20 215#define RIO_MAX_DEVNAME_SZ 20
211 216
212struct rio_rdev_info { 217struct rio_rdev_info {
213 uint32_t destid; 218 __u16 destid;
214 uint8_t hopcount; 219 __u8 hopcount;
215 uint32_t comptag; 220 __u8 pad0;
221 __u32 comptag;
216 char name[RIO_MAX_DEVNAME_SZ + 1]; 222 char name[RIO_MAX_DEVNAME_SZ + 1];
217}; 223};
218 224
@@ -220,11 +226,11 @@ struct rio_rdev_info {
220#define RIO_MPORT_DRV_MAGIC 'm' 226#define RIO_MPORT_DRV_MAGIC 'm'
221 227
222#define RIO_MPORT_MAINT_HDID_SET \ 228#define RIO_MPORT_MAINT_HDID_SET \
223 _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t) 229 _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16)
224#define RIO_MPORT_MAINT_COMPTAG_SET \ 230#define RIO_MPORT_MAINT_COMPTAG_SET \
225 _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t) 231 _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32)
226#define RIO_MPORT_MAINT_PORT_IDX_GET \ 232#define RIO_MPORT_MAINT_PORT_IDX_GET \
227 _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t) 233 _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32)
228#define RIO_MPORT_GET_PROPERTIES \ 234#define RIO_MPORT_GET_PROPERTIES \
229 _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties) 235 _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
230#define RIO_MPORT_MAINT_READ_LOCAL \ 236#define RIO_MPORT_MAINT_READ_LOCAL \
@@ -244,9 +250,9 @@ struct rio_rdev_info {
244#define RIO_DISABLE_PORTWRITE_RANGE \ 250#define RIO_DISABLE_PORTWRITE_RANGE \
245 _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter) 251 _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
246#define RIO_SET_EVENT_MASK \ 252#define RIO_SET_EVENT_MASK \
247 _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int) 253 _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32)
248#define RIO_GET_EVENT_MASK \ 254#define RIO_GET_EVENT_MASK \
249 _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int) 255 _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32)
250#define RIO_MAP_OUTBOUND \ 256#define RIO_MAP_OUTBOUND \
251 _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap) 257 _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
252#define RIO_UNMAP_OUTBOUND \ 258#define RIO_UNMAP_OUTBOUND \
@@ -254,11 +260,11 @@ struct rio_rdev_info {
254#define RIO_MAP_INBOUND \ 260#define RIO_MAP_INBOUND \
255 _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap) 261 _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
256#define RIO_UNMAP_INBOUND \ 262#define RIO_UNMAP_INBOUND \
257 _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t) 263 _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64)
258#define RIO_ALLOC_DMA \ 264#define RIO_ALLOC_DMA \
259 _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem) 265 _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
260#define RIO_FREE_DMA \ 266#define RIO_FREE_DMA \
261 _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t) 267 _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64)
262#define RIO_TRANSFER \ 268#define RIO_TRANSFER \
263 _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction) 269 _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
264#define RIO_WAIT_FOR_ASYNC \ 270#define RIO_WAIT_FOR_ASYNC \
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 3f10e5317b46..8f3a8f606fd9 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -45,9 +45,7 @@
45 45
46static inline __attribute_const__ __u16 __fswab16(__u16 val) 46static inline __attribute_const__ __u16 __fswab16(__u16 val)
47{ 47{
48#ifdef __HAVE_BUILTIN_BSWAP16__ 48#if defined (__arch_swab16)
49 return __builtin_bswap16(val);
50#elif defined (__arch_swab16)
51 return __arch_swab16(val); 49 return __arch_swab16(val);
52#else 50#else
53 return ___constant_swab16(val); 51 return ___constant_swab16(val);
@@ -56,9 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
56 54
57static inline __attribute_const__ __u32 __fswab32(__u32 val) 55static inline __attribute_const__ __u32 __fswab32(__u32 val)
58{ 56{
59#ifdef __HAVE_BUILTIN_BSWAP32__ 57#if defined(__arch_swab32)
60 return __builtin_bswap32(val);
61#elif defined(__arch_swab32)
62 return __arch_swab32(val); 58 return __arch_swab32(val);
63#else 59#else
64 return ___constant_swab32(val); 60 return ___constant_swab32(val);
@@ -67,9 +63,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
67 63
68static inline __attribute_const__ __u64 __fswab64(__u64 val) 64static inline __attribute_const__ __u64 __fswab64(__u64 val)
69{ 65{
70#ifdef __HAVE_BUILTIN_BSWAP64__ 66#if defined (__arch_swab64)
71 return __builtin_bswap64(val);
72#elif defined (__arch_swab64)
73 return __arch_swab64(val); 67 return __arch_swab64(val);
74#elif defined(__SWAB_64_THRU_32__) 68#elif defined(__SWAB_64_THRU_32__)
75 __u32 h = val >> 32; 69 __u32 h = val >> 32;
@@ -102,28 +96,40 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
102 * __swab16 - return a byteswapped 16-bit value 96 * __swab16 - return a byteswapped 16-bit value
103 * @x: value to byteswap 97 * @x: value to byteswap
104 */ 98 */
99#ifdef __HAVE_BUILTIN_BSWAP16__
100#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
101#else
105#define __swab16(x) \ 102#define __swab16(x) \
106 (__builtin_constant_p((__u16)(x)) ? \ 103 (__builtin_constant_p((__u16)(x)) ? \
107 ___constant_swab16(x) : \ 104 ___constant_swab16(x) : \
108 __fswab16(x)) 105 __fswab16(x))
106#endif
109 107
110/** 108/**
111 * __swab32 - return a byteswapped 32-bit value 109 * __swab32 - return a byteswapped 32-bit value
112 * @x: value to byteswap 110 * @x: value to byteswap
113 */ 111 */
112#ifdef __HAVE_BUILTIN_BSWAP32__
113#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
114#else
114#define __swab32(x) \ 115#define __swab32(x) \
115 (__builtin_constant_p((__u32)(x)) ? \ 116 (__builtin_constant_p((__u32)(x)) ? \
116 ___constant_swab32(x) : \ 117 ___constant_swab32(x) : \
117 __fswab32(x)) 118 __fswab32(x))
119#endif
118 120
119/** 121/**
120 * __swab64 - return a byteswapped 64-bit value 122 * __swab64 - return a byteswapped 64-bit value
121 * @x: value to byteswap 123 * @x: value to byteswap
122 */ 124 */
125#ifdef __HAVE_BUILTIN_BSWAP64__
126#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
127#else
123#define __swab64(x) \ 128#define __swab64(x) \
124 (__builtin_constant_p((__u64)(x)) ? \ 129 (__builtin_constant_p((__u64)(x)) ? \
125 ___constant_swab64(x) : \ 130 ___constant_swab64(x) : \
126 __fswab64(x)) 131 __fswab64(x))
132#endif
127 133
128/** 134/**
129 * __swahw32 - return a word-swapped 32-bit value 135 * __swahw32 - return a word-swapped 32-bit value
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 9e0b0315a724..53ad6c0831ae 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -42,12 +42,14 @@
42 42
43#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) 43#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
44 44
45#define STACK_ALLOC_NULL_PROTECTION_BITS 1
45#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ 46#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
46#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) 47#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
47#define STACK_ALLOC_ALIGN 4 48#define STACK_ALLOC_ALIGN 4
48#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ 49#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
49 STACK_ALLOC_ALIGN) 50 STACK_ALLOC_ALIGN)
50#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS) 51#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
52 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
51#define STACK_ALLOC_SLABS_CAP 1024 53#define STACK_ALLOC_SLABS_CAP 1024
52#define STACK_ALLOC_MAX_SLABS \ 54#define STACK_ALLOC_MAX_SLABS \
53 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ 55 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
@@ -59,6 +61,7 @@ union handle_parts {
59 struct { 61 struct {
60 u32 slabindex : STACK_ALLOC_INDEX_BITS; 62 u32 slabindex : STACK_ALLOC_INDEX_BITS;
61 u32 offset : STACK_ALLOC_OFFSET_BITS; 63 u32 offset : STACK_ALLOC_OFFSET_BITS;
64 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
62 }; 65 };
63}; 66};
64 67
@@ -136,6 +139,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
136 stack->size = size; 139 stack->size = size;
137 stack->handle.slabindex = depot_index; 140 stack->handle.slabindex = depot_index;
138 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; 141 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
142 stack->handle.valid = 1;
139 memcpy(stack->entries, entries, size * sizeof(unsigned long)); 143 memcpy(stack->entries, entries, size * sizeof(unsigned long));
140 depot_offset += required_size; 144 depot_offset += required_size;
141 145
diff --git a/mm/compaction.c b/mm/compaction.c
index ccf97b02b85f..8fa254043801 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -852,16 +852,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
852 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 852 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
853 ISOLATE_UNEVICTABLE); 853 ISOLATE_UNEVICTABLE);
854 854
855 /* 855 if (!pfn)
856 * In case of fatal failure, release everything that might
857 * have been isolated in the previous iteration, and signal
858 * the failure back to caller.
859 */
860 if (!pfn) {
861 putback_movable_pages(&cc->migratepages);
862 cc->nr_migratepages = 0;
863 break; 856 break;
864 }
865 857
866 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 858 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
867 break; 859 break;
@@ -1741,7 +1733,7 @@ void compaction_unregister_node(struct node *node)
1741 1733
1742static inline bool kcompactd_work_requested(pg_data_t *pgdat) 1734static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1743{ 1735{
1744 return pgdat->kcompactd_max_order > 0; 1736 return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
1745} 1737}
1746 1738
1747static bool kcompactd_node_suitable(pg_data_t *pgdat) 1739static bool kcompactd_node_suitable(pg_data_t *pgdat)
@@ -1805,6 +1797,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
1805 INIT_LIST_HEAD(&cc.freepages); 1797 INIT_LIST_HEAD(&cc.freepages);
1806 INIT_LIST_HEAD(&cc.migratepages); 1798 INIT_LIST_HEAD(&cc.migratepages);
1807 1799
1800 if (kthread_should_stop())
1801 return;
1808 status = compact_zone(zone, &cc); 1802 status = compact_zone(zone, &cc);
1809 1803
1810 if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone), 1804 if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index df67b53ae3c5..f7daa7de8f48 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3452,7 +3452,7 @@ next:
3452 } 3452 }
3453 } 3453 }
3454 3454
3455 pr_info("%lu of %lu THP split", split, total); 3455 pr_info("%lu of %lu THP split\n", split, total);
3456 3456
3457 return 0; 3457 return 0;
3458} 3458}
@@ -3463,7 +3463,7 @@ static int __init split_huge_pages_debugfs(void)
3463{ 3463{
3464 void *ret; 3464 void *ret;
3465 3465
3466 ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL, 3466 ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3467 &split_huge_pages_fops); 3467 &split_huge_pages_fops);
3468 if (!ret) 3468 if (!ret)
3469 pr_warn("Failed to create split_huge_pages in debugfs"); 3469 pr_warn("Failed to create split_huge_pages in debugfs");
diff --git a/mm/memory.c b/mm/memory.c
index 305537fc8640..52c218e2b724 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1222,15 +1222,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1222 next = pmd_addr_end(addr, end); 1222 next = pmd_addr_end(addr, end);
1223 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1223 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1224 if (next - addr != HPAGE_PMD_SIZE) { 1224 if (next - addr != HPAGE_PMD_SIZE) {
1225#ifdef CONFIG_DEBUG_VM 1225 VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
1226 if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { 1226 !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
1227 pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
1228 __func__, addr, end,
1229 vma->vm_start,
1230 vma->vm_end);
1231 BUG();
1232 }
1233#endif
1234 split_huge_pmd(vma, pmd, addr); 1227 split_huge_pmd(vma, pmd, addr);
1235 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) 1228 } else if (zap_huge_pmd(tlb, vma, pmd, addr))
1236 goto next; 1229 goto next;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 59de90d5d3a3..c1069efcc4d7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6485,7 +6485,7 @@ int __meminit init_per_zone_wmark_min(void)
6485 setup_per_zone_inactive_ratio(); 6485 setup_per_zone_inactive_ratio();
6486 return 0; 6486 return 0;
6487} 6487}
6488module_init(init_per_zone_wmark_min) 6488core_initcall(init_per_zone_wmark_min)
6489 6489
6490/* 6490/*
6491 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6491 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
diff --git a/mm/zswap.c b/mm/zswap.c
index 91dad80d068b..de0f119b1780 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
170static LIST_HEAD(zswap_pools); 170static LIST_HEAD(zswap_pools);
171/* protects zswap_pools list modification */ 171/* protects zswap_pools list modification */
172static DEFINE_SPINLOCK(zswap_pools_lock); 172static DEFINE_SPINLOCK(zswap_pools_lock);
173/* pool counter to provide unique names to zpool */
174static atomic_t zswap_pools_count = ATOMIC_INIT(0);
173 175
174/* used by param callback function */ 176/* used by param callback function */
175static bool zswap_init_started; 177static bool zswap_init_started;
@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
565static struct zswap_pool *zswap_pool_create(char *type, char *compressor) 567static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
566{ 568{
567 struct zswap_pool *pool; 569 struct zswap_pool *pool;
570 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
568 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 571 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
569 572
570 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 573 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
573 return NULL; 576 return NULL;
574 } 577 }
575 578
576 pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops); 579 /* unique name for each pool specifically required by zsmalloc */
580 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
581
582 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
577 if (!pool->zpool) { 583 if (!pool->zpool) {
578 pr_err("%s zpool not available\n", type); 584 pr_err("%s zpool not available\n", type);
579 goto error; 585 goto error;
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 161dd0d67da8..a9155077feef 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size,
371 do_usb_entry_multi(symval + i, mod); 371 do_usb_entry_multi(symval + i, mod);
372} 372}
373 373
374static void do_of_entry_multi(void *symval, struct module *mod)
375{
376 char alias[500];
377 int len;
378 char *tmp;
379
380 DEF_FIELD_ADDR(symval, of_device_id, name);
381 DEF_FIELD_ADDR(symval, of_device_id, type);
382 DEF_FIELD_ADDR(symval, of_device_id, compatible);
383
384 len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
385 (*type)[0] ? *type : "*");
386
387 if (compatible[0])
388 sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
389 *compatible);
390
391 /* Replace all whitespace with underscores */
392 for (tmp = alias; tmp && *tmp; tmp++)
393 if (isspace(*tmp))
394 *tmp = '_';
395
396 buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
397 strcat(alias, "C");
398 add_wildcard(alias);
399 buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
400}
401
402static void do_of_table(void *symval, unsigned long size,
403 struct module *mod)
404{
405 unsigned int i;
406 const unsigned long id_size = SIZE_of_device_id;
407
408 device_id_check(mod->name, "of", size, id_size, symval);
409
410 /* Leave last one: it's the terminator. */
411 size -= id_size;
412
413 for (i = 0; i < size; i += id_size)
414 do_of_entry_multi(symval + i, mod);
415}
416
374/* Looks like: hid:bNvNpN */ 417/* Looks like: hid:bNvNpN */
375static int do_hid_entry(const char *filename, 418static int do_hid_entry(const char *filename,
376 void *symval, char *alias) 419 void *symval, char *alias)
@@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename,
684} 727}
685ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry); 728ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
686 729
687static int do_of_entry (const char *filename, void *symval, char *alias)
688{
689 int len;
690 char *tmp;
691 DEF_FIELD_ADDR(symval, of_device_id, name);
692 DEF_FIELD_ADDR(symval, of_device_id, type);
693 DEF_FIELD_ADDR(symval, of_device_id, compatible);
694
695 len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
696 (*type)[0] ? *type : "*");
697
698 if (compatible[0])
699 sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
700 *compatible);
701
702 /* Replace all whitespace with underscores */
703 for (tmp = alias; tmp && *tmp; tmp++)
704 if (isspace (*tmp))
705 *tmp = '_';
706
707 return 1;
708}
709ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
710
711static int do_vio_entry(const char *filename, void *symval, 730static int do_vio_entry(const char *filename, void *symval,
712 char *alias) 731 char *alias)
713{ 732{
@@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
1348 /* First handle the "special" cases */ 1367 /* First handle the "special" cases */
1349 if (sym_is(name, namelen, "usb")) 1368 if (sym_is(name, namelen, "usb"))
1350 do_usb_table(symval, sym->st_size, mod); 1369 do_usb_table(symval, sym->st_size, mod);
1370 if (sym_is(name, namelen, "of"))
1371 do_of_table(symval, sym->st_size, mod);
1351 else if (sym_is(name, namelen, "pnp")) 1372 else if (sym_is(name, namelen, "pnp"))
1352 do_pnp_device_entry(symval, sym->st_size, mod); 1373 do_pnp_device_entry(symval, sym->st_size, mod);
1353 else if (sym_is(name, namelen, "pnp_card")) 1374 else if (sym_is(name, namelen, "pnp_card"))