diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-22 00:32:38 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-22 00:32:38 -0500 |
| commit | a5d6e63323fe7799eb0e6fd0a41fbfad10fca258 (patch) | |
| tree | ca52285fc4ac7fa25a8e1621782a5a51130ac08b | |
| parent | 78dc53c422172a317adb0776dfb687057ffa28b7 (diff) | |
| parent | 7aa555bf26763b86332c7a3689701c999834b87a (diff) | |
Merge branch 'akpm' (fixes from Andrew)
Merge patches from Andrew Morton:
"13 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
mm: place page->pmd_huge_pte to right union
MAINTAINERS: add keyboard driver to Hyper-V file list
x86, mm: do not leak page->ptl for pmd page tables
ipc,shm: correct error return value in shmctl (SHM_UNLOCK)
mm, mempolicy: silence gcc warning
block/partitions/efi.c: fix bound check
ARM: drivers/rtc/rtc-at91rm9200.c: disable interrupts at shutdown
mm: hugetlbfs: fix hugetlbfs optimization
kernel: remove CONFIG_USE_GENERIC_SMP_HELPERS cleanly
ipc,shm: fix shm_file deletion races
mm: thp: give transparent hugepage code a separate copy_page
checkpatch: fix "Use of uninitialized value" warnings
configfs: fix race between dentry put and lookup
| -rw-r--r-- | Documentation/vm/split_page_table_lock | 6 | ||||
| -rw-r--r-- | MAINTAINERS | 1 | ||||
| -rw-r--r-- | arch/x86/mm/pgtable.c | 4 | ||||
| -rw-r--r-- | block/partitions/efi.c | 5 | ||||
| -rw-r--r-- | drivers/block/null_blk.c | 8 | ||||
| -rw-r--r-- | drivers/rtc/rtc-at91rm9200.c | 9 | ||||
| -rw-r--r-- | fs/configfs/dir.c | 16 | ||||
| -rw-r--r-- | include/linux/hugetlb.h | 10 | ||||
| -rw-r--r-- | include/linux/mm_types.h | 6 | ||||
| -rw-r--r-- | ipc/shm.c | 37 | ||||
| -rw-r--r-- | mm/hugetlb.c | 51 | ||||
| -rw-r--r-- | mm/mempolicy.c | 2 | ||||
| -rw-r--r-- | mm/migrate.c | 48 | ||||
| -rw-r--r-- | mm/swap.c | 143 | ||||
| -rw-r--r-- | net/Kconfig | 4 | ||||
| -rwxr-xr-x | scripts/checkpatch.pl | 1 |
16 files changed, 227 insertions, 124 deletions
diff --git a/Documentation/vm/split_page_table_lock b/Documentation/vm/split_page_table_lock index 7521d367f21d..6dea4fd5c961 100644 --- a/Documentation/vm/split_page_table_lock +++ b/Documentation/vm/split_page_table_lock | |||
| @@ -63,9 +63,9 @@ levels. | |||
| 63 | PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table | 63 | PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table |
| 64 | allocation and pgtable_pmd_page_dtor() on freeing. | 64 | allocation and pgtable_pmd_page_dtor() on freeing. |
| 65 | 65 | ||
| 66 | Allocation usually happens in pmd_alloc_one(), freeing in pmd_free(), but | 66 | Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and |
| 67 | make sure you cover all PMD table allocation / freeing paths: i.e X86_PAE | 67 | pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing |
| 68 | preallocate few PMDs on pgd_alloc(). | 68 | paths: i.e X86_PAE preallocate few PMDs on pgd_alloc(). |
| 69 | 69 | ||
| 70 | With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK. | 70 | With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK. |
| 71 | 71 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 8d98e54f2bf2..d9c97dce6635 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -4065,6 +4065,7 @@ F: arch/x86/include/uapi/asm/hyperv.h | |||
| 4065 | F: arch/x86/kernel/cpu/mshyperv.c | 4065 | F: arch/x86/kernel/cpu/mshyperv.c |
| 4066 | F: drivers/hid/hid-hyperv.c | 4066 | F: drivers/hid/hid-hyperv.c |
| 4067 | F: drivers/hv/ | 4067 | F: drivers/hv/ |
| 4068 | F: drivers/input/serio/hyperv-keyboard.c | ||
| 4068 | F: drivers/net/hyperv/ | 4069 | F: drivers/net/hyperv/ |
| 4069 | F: drivers/scsi/storvsc_drv.c | 4070 | F: drivers/scsi/storvsc_drv.c |
| 4070 | F: drivers/video/hyperv_fb.c | 4071 | F: drivers/video/hyperv_fb.c |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 36aa999b2631..c96314abd144 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
| @@ -61,6 +61,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) | |||
| 61 | #if PAGETABLE_LEVELS > 2 | 61 | #if PAGETABLE_LEVELS > 2 |
| 62 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | 62 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
| 63 | { | 63 | { |
| 64 | struct page *page = virt_to_page(pmd); | ||
| 64 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); | 65 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
| 65 | /* | 66 | /* |
| 66 | * NOTE! For PAE, any changes to the top page-directory-pointer-table | 67 | * NOTE! For PAE, any changes to the top page-directory-pointer-table |
| @@ -69,7 +70,8 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | |||
| 69 | #ifdef CONFIG_X86_PAE | 70 | #ifdef CONFIG_X86_PAE |
| 70 | tlb->need_flush_all = 1; | 71 | tlb->need_flush_all = 1; |
| 71 | #endif | 72 | #endif |
| 72 | tlb_remove_page(tlb, virt_to_page(pmd)); | 73 | pgtable_pmd_page_dtor(page); |
| 74 | tlb_remove_page(tlb, page); | ||
| 73 | } | 75 | } |
| 74 | 76 | ||
| 75 | #if PAGETABLE_LEVELS > 3 | 77 | #if PAGETABLE_LEVELS > 3 |
diff --git a/block/partitions/efi.c b/block/partitions/efi.c index a8287b49d062..dc51f467a560 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c | |||
| @@ -96,6 +96,7 @@ | |||
| 96 | * - Code works, detects all the partitions. | 96 | * - Code works, detects all the partitions. |
| 97 | * | 97 | * |
| 98 | ************************************************************/ | 98 | ************************************************************/ |
| 99 | #include <linux/kernel.h> | ||
| 99 | #include <linux/crc32.h> | 100 | #include <linux/crc32.h> |
| 100 | #include <linux/ctype.h> | 101 | #include <linux/ctype.h> |
| 101 | #include <linux/math64.h> | 102 | #include <linux/math64.h> |
| @@ -715,8 +716,8 @@ int efi_partition(struct parsed_partitions *state) | |||
| 715 | efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid); | 716 | efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid); |
| 716 | 717 | ||
| 717 | /* Naively convert UTF16-LE to 7 bits. */ | 718 | /* Naively convert UTF16-LE to 7 bits. */ |
| 718 | label_max = min(sizeof(info->volname) - 1, | 719 | label_max = min(ARRAY_SIZE(info->volname) - 1, |
| 719 | sizeof(ptes[i].partition_name)); | 720 | ARRAY_SIZE(ptes[i].partition_name)); |
| 720 | info->volname[label_max] = 0; | 721 | info->volname[label_max] = 0; |
| 721 | while (label_count < label_max) { | 722 | while (label_count < label_max) { |
| 722 | u8 c = ptes[i].partition_name[label_count] & 0xff; | 723 | u8 c = ptes[i].partition_name[label_count] & 0xff; |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index b5d842370cc9..ea192ec029c4 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
| @@ -223,7 +223,7 @@ static void null_softirq_done_fn(struct request *rq) | |||
| 223 | blk_end_request_all(rq, 0); | 223 | blk_end_request_all(rq, 0); |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | #if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) | 226 | #ifdef CONFIG_SMP |
| 227 | 227 | ||
| 228 | static void null_ipi_cmd_end_io(void *data) | 228 | static void null_ipi_cmd_end_io(void *data) |
| 229 | { | 229 | { |
| @@ -260,7 +260,7 @@ static void null_cmd_end_ipi(struct nullb_cmd *cmd) | |||
| 260 | put_cpu(); | 260 | put_cpu(); |
| 261 | } | 261 | } |
| 262 | 262 | ||
| 263 | #endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ | 263 | #endif /* CONFIG_SMP */ |
| 264 | 264 | ||
| 265 | static inline void null_handle_cmd(struct nullb_cmd *cmd) | 265 | static inline void null_handle_cmd(struct nullb_cmd *cmd) |
| 266 | { | 266 | { |
| @@ -270,7 +270,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) | |||
| 270 | end_cmd(cmd); | 270 | end_cmd(cmd); |
| 271 | break; | 271 | break; |
| 272 | case NULL_IRQ_SOFTIRQ: | 272 | case NULL_IRQ_SOFTIRQ: |
| 273 | #if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) | 273 | #ifdef CONFIG_SMP |
| 274 | null_cmd_end_ipi(cmd); | 274 | null_cmd_end_ipi(cmd); |
| 275 | #else | 275 | #else |
| 276 | end_cmd(cmd); | 276 | end_cmd(cmd); |
| @@ -571,7 +571,7 @@ static int __init null_init(void) | |||
| 571 | { | 571 | { |
| 572 | unsigned int i; | 572 | unsigned int i; |
| 573 | 573 | ||
| 574 | #if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS) | 574 | #if !defined(CONFIG_SMP) |
| 575 | if (irqmode == NULL_IRQ_SOFTIRQ) { | 575 | if (irqmode == NULL_IRQ_SOFTIRQ) { |
| 576 | pr_warn("null_blk: softirq completions not available.\n"); | 576 | pr_warn("null_blk: softirq completions not available.\n"); |
| 577 | pr_warn("null_blk: using direct completions.\n"); | 577 | pr_warn("null_blk: using direct completions.\n"); |
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 8b2cd8a5a2ff..c0da95e95702 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
| @@ -428,6 +428,14 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) | |||
| 428 | return 0; | 428 | return 0; |
| 429 | } | 429 | } |
| 430 | 430 | ||
| 431 | static void at91_rtc_shutdown(struct platform_device *pdev) | ||
| 432 | { | ||
| 433 | /* Disable all interrupts */ | ||
| 434 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | | ||
| 435 | AT91_RTC_SECEV | AT91_RTC_TIMEV | | ||
| 436 | AT91_RTC_CALEV); | ||
| 437 | } | ||
| 438 | |||
| 431 | #ifdef CONFIG_PM_SLEEP | 439 | #ifdef CONFIG_PM_SLEEP |
| 432 | 440 | ||
| 433 | /* AT91RM9200 RTC Power management control */ | 441 | /* AT91RM9200 RTC Power management control */ |
| @@ -466,6 +474,7 @@ static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume); | |||
| 466 | 474 | ||
| 467 | static struct platform_driver at91_rtc_driver = { | 475 | static struct platform_driver at91_rtc_driver = { |
| 468 | .remove = __exit_p(at91_rtc_remove), | 476 | .remove = __exit_p(at91_rtc_remove), |
| 477 | .shutdown = at91_rtc_shutdown, | ||
| 469 | .driver = { | 478 | .driver = { |
| 470 | .name = "at91_rtc", | 479 | .name = "at91_rtc", |
| 471 | .owner = THIS_MODULE, | 480 | .owner = THIS_MODULE, |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 4522e0755773..e081acbac2e7 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
| @@ -56,10 +56,19 @@ static void configfs_d_iput(struct dentry * dentry, | |||
| 56 | struct configfs_dirent *sd = dentry->d_fsdata; | 56 | struct configfs_dirent *sd = dentry->d_fsdata; |
| 57 | 57 | ||
| 58 | if (sd) { | 58 | if (sd) { |
| 59 | BUG_ON(sd->s_dentry != dentry); | ||
| 60 | /* Coordinate with configfs_readdir */ | 59 | /* Coordinate with configfs_readdir */ |
| 61 | spin_lock(&configfs_dirent_lock); | 60 | spin_lock(&configfs_dirent_lock); |
| 62 | sd->s_dentry = NULL; | 61 | /* Coordinate with configfs_attach_attr where will increase |
| 62 | * sd->s_count and update sd->s_dentry to new allocated one. | ||
| 63 | * Only set sd->dentry to null when this dentry is the only | ||
| 64 | * sd owner. | ||
| 65 | * If not do so, configfs_d_iput may run just after | ||
| 66 | * configfs_attach_attr and set sd->s_dentry to null | ||
| 67 | * even it's still in use. | ||
| 68 | */ | ||
| 69 | if (atomic_read(&sd->s_count) <= 2) | ||
| 70 | sd->s_dentry = NULL; | ||
| 71 | |||
| 63 | spin_unlock(&configfs_dirent_lock); | 72 | spin_unlock(&configfs_dirent_lock); |
| 64 | configfs_put(sd); | 73 | configfs_put(sd); |
| 65 | } | 74 | } |
| @@ -416,8 +425,11 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den | |||
| 416 | struct configfs_attribute * attr = sd->s_element; | 425 | struct configfs_attribute * attr = sd->s_element; |
| 417 | int error; | 426 | int error; |
| 418 | 427 | ||
| 428 | spin_lock(&configfs_dirent_lock); | ||
| 419 | dentry->d_fsdata = configfs_get(sd); | 429 | dentry->d_fsdata = configfs_get(sd); |
| 420 | sd->s_dentry = dentry; | 430 | sd->s_dentry = dentry; |
| 431 | spin_unlock(&configfs_dirent_lock); | ||
| 432 | |||
| 421 | error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG, | 433 | error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG, |
| 422 | configfs_init_file); | 434 | configfs_init_file); |
| 423 | if (error) { | 435 | if (error) { |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index acd2010328f3..9649ff0c63f8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); | |||
| 31 | void hugepage_put_subpool(struct hugepage_subpool *spool); | 31 | void hugepage_put_subpool(struct hugepage_subpool *spool); |
| 32 | 32 | ||
| 33 | int PageHuge(struct page *page); | 33 | int PageHuge(struct page *page); |
| 34 | int PageHeadHuge(struct page *page_head); | ||
| 34 | 35 | ||
| 35 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); | 36 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); |
| 36 | int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); | 37 | int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
| @@ -69,7 +70,6 @@ int dequeue_hwpoisoned_huge_page(struct page *page); | |||
| 69 | bool isolate_huge_page(struct page *page, struct list_head *list); | 70 | bool isolate_huge_page(struct page *page, struct list_head *list); |
| 70 | void putback_active_hugepage(struct page *page); | 71 | void putback_active_hugepage(struct page *page); |
| 71 | bool is_hugepage_active(struct page *page); | 72 | bool is_hugepage_active(struct page *page); |
| 72 | void copy_huge_page(struct page *dst, struct page *src); | ||
| 73 | 73 | ||
| 74 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE | 74 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE |
| 75 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); | 75 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); |
| @@ -104,6 +104,11 @@ static inline int PageHuge(struct page *page) | |||
| 104 | return 0; | 104 | return 0; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static inline int PageHeadHuge(struct page *page_head) | ||
| 108 | { | ||
| 109 | return 0; | ||
| 110 | } | ||
| 111 | |||
| 107 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) | 112 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
| 108 | { | 113 | { |
| 109 | } | 114 | } |
| @@ -140,9 +145,6 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page) | |||
| 140 | #define isolate_huge_page(p, l) false | 145 | #define isolate_huge_page(p, l) false |
| 141 | #define putback_active_hugepage(p) do {} while (0) | 146 | #define putback_active_hugepage(p) do {} while (0) |
| 142 | #define is_hugepage_active(x) false | 147 | #define is_hugepage_active(x) false |
| 143 | static inline void copy_huge_page(struct page *dst, struct page *src) | ||
| 144 | { | ||
| 145 | } | ||
| 146 | 148 | ||
| 147 | static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | 149 | static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
| 148 | unsigned long address, unsigned long end, pgprot_t newprot) | 150 | unsigned long address, unsigned long end, pgprot_t newprot) |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 10f5a7272b80..011eb85d7b0f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -65,9 +65,6 @@ struct page { | |||
| 65 | * this page is only used to | 65 | * this page is only used to |
| 66 | * free other pages. | 66 | * free other pages. |
| 67 | */ | 67 | */ |
| 68 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS | ||
| 69 | pgtable_t pmd_huge_pte; /* protected by page->ptl */ | ||
| 70 | #endif | ||
| 71 | }; | 68 | }; |
| 72 | 69 | ||
| 73 | union { | 70 | union { |
| @@ -135,6 +132,9 @@ struct page { | |||
| 135 | 132 | ||
| 136 | struct list_head list; /* slobs list of pages */ | 133 | struct list_head list; /* slobs list of pages */ |
| 137 | struct slab *slab_page; /* slab fields */ | 134 | struct slab *slab_page; /* slab fields */ |
| 135 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS | ||
| 136 | pgtable_t pmd_huge_pte; /* protected by page->ptl */ | ||
| 137 | #endif | ||
| 138 | }; | 138 | }; |
| 139 | 139 | ||
| 140 | /* Remainder is not double word aligned */ | 140 | /* Remainder is not double word aligned */ |
| @@ -208,15 +208,18 @@ static void shm_open(struct vm_area_struct *vma) | |||
| 208 | */ | 208 | */ |
| 209 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) | 209 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
| 210 | { | 210 | { |
| 211 | struct file *shm_file; | ||
| 212 | |||
| 213 | shm_file = shp->shm_file; | ||
| 214 | shp->shm_file = NULL; | ||
| 211 | ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; | 215 | ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 212 | shm_rmid(ns, shp); | 216 | shm_rmid(ns, shp); |
| 213 | shm_unlock(shp); | 217 | shm_unlock(shp); |
| 214 | if (!is_file_hugepages(shp->shm_file)) | 218 | if (!is_file_hugepages(shm_file)) |
| 215 | shmem_lock(shp->shm_file, 0, shp->mlock_user); | 219 | shmem_lock(shm_file, 0, shp->mlock_user); |
| 216 | else if (shp->mlock_user) | 220 | else if (shp->mlock_user) |
| 217 | user_shm_unlock(file_inode(shp->shm_file)->i_size, | 221 | user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user); |
| 218 | shp->mlock_user); | 222 | fput(shm_file); |
| 219 | fput (shp->shm_file); | ||
| 220 | ipc_rcu_putref(shp, shm_rcu_free); | 223 | ipc_rcu_putref(shp, shm_rcu_free); |
| 221 | } | 224 | } |
| 222 | 225 | ||
| @@ -974,15 +977,25 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) | |||
| 974 | ipc_lock_object(&shp->shm_perm); | 977 | ipc_lock_object(&shp->shm_perm); |
| 975 | if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { | 978 | if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { |
| 976 | kuid_t euid = current_euid(); | 979 | kuid_t euid = current_euid(); |
| 977 | err = -EPERM; | ||
| 978 | if (!uid_eq(euid, shp->shm_perm.uid) && | 980 | if (!uid_eq(euid, shp->shm_perm.uid) && |
| 979 | !uid_eq(euid, shp->shm_perm.cuid)) | 981 | !uid_eq(euid, shp->shm_perm.cuid)) { |
| 982 | err = -EPERM; | ||
| 980 | goto out_unlock0; | 983 | goto out_unlock0; |
| 981 | if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) | 984 | } |
| 985 | if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { | ||
| 986 | err = -EPERM; | ||
| 982 | goto out_unlock0; | 987 | goto out_unlock0; |
| 988 | } | ||
| 983 | } | 989 | } |
| 984 | 990 | ||
| 985 | shm_file = shp->shm_file; | 991 | shm_file = shp->shm_file; |
| 992 | |||
| 993 | /* check if shm_destroy() is tearing down shp */ | ||
| 994 | if (shm_file == NULL) { | ||
| 995 | err = -EIDRM; | ||
| 996 | goto out_unlock0; | ||
| 997 | } | ||
| 998 | |||
| 986 | if (is_file_hugepages(shm_file)) | 999 | if (is_file_hugepages(shm_file)) |
| 987 | goto out_unlock0; | 1000 | goto out_unlock0; |
| 988 | 1001 | ||
| @@ -1101,6 +1114,14 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, | |||
| 1101 | goto out_unlock; | 1114 | goto out_unlock; |
| 1102 | 1115 | ||
| 1103 | ipc_lock_object(&shp->shm_perm); | 1116 | ipc_lock_object(&shp->shm_perm); |
| 1117 | |||
| 1118 | /* check if shm_destroy() is tearing down shp */ | ||
| 1119 | if (shp->shm_file == NULL) { | ||
| 1120 | ipc_unlock_object(&shp->shm_perm); | ||
| 1121 | err = -EIDRM; | ||
| 1122 | goto out_unlock; | ||
| 1123 | } | ||
| 1124 | |||
| 1104 | path = shp->shm_file->f_path; | 1125 | path = shp->shm_file->f_path; |
| 1105 | path_get(&path); | 1126 | path_get(&path); |
| 1106 | shp->shm_nattch++; | 1127 | shp->shm_nattch++; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7d57af21f49e..dee6cf4e6d34 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -476,40 +476,6 @@ static int vma_has_reserves(struct vm_area_struct *vma, long chg) | |||
| 476 | return 0; | 476 | return 0; |
| 477 | } | 477 | } |
| 478 | 478 | ||
| 479 | static void copy_gigantic_page(struct page *dst, struct page *src) | ||
| 480 | { | ||
| 481 | int i; | ||
| 482 | struct hstate *h = page_hstate(src); | ||
| 483 | struct page *dst_base = dst; | ||
| 484 | struct page *src_base = src; | ||
| 485 | |||
| 486 | for (i = 0; i < pages_per_huge_page(h); ) { | ||
| 487 | cond_resched(); | ||
| 488 | copy_highpage(dst, src); | ||
| 489 | |||
| 490 | i++; | ||
| 491 | dst = mem_map_next(dst, dst_base, i); | ||
| 492 | src = mem_map_next(src, src_base, i); | ||
| 493 | } | ||
| 494 | } | ||
| 495 | |||
| 496 | void copy_huge_page(struct page *dst, struct page *src) | ||
| 497 | { | ||
| 498 | int i; | ||
| 499 | struct hstate *h = page_hstate(src); | ||
| 500 | |||
| 501 | if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { | ||
| 502 | copy_gigantic_page(dst, src); | ||
| 503 | return; | ||
| 504 | } | ||
| 505 | |||
| 506 | might_sleep(); | ||
| 507 | for (i = 0; i < pages_per_huge_page(h); i++) { | ||
| 508 | cond_resched(); | ||
| 509 | copy_highpage(dst + i, src + i); | ||
| 510 | } | ||
| 511 | } | ||
| 512 | |||
| 513 | static void enqueue_huge_page(struct hstate *h, struct page *page) | 479 | static void enqueue_huge_page(struct hstate *h, struct page *page) |
| 514 | { | 480 | { |
| 515 | int nid = page_to_nid(page); | 481 | int nid = page_to_nid(page); |
| @@ -736,6 +702,23 @@ int PageHuge(struct page *page) | |||
| 736 | } | 702 | } |
| 737 | EXPORT_SYMBOL_GPL(PageHuge); | 703 | EXPORT_SYMBOL_GPL(PageHuge); |
| 738 | 704 | ||
| 705 | /* | ||
| 706 | * PageHeadHuge() only returns true for hugetlbfs head page, but not for | ||
| 707 | * normal or transparent huge pages. | ||
| 708 | */ | ||
| 709 | int PageHeadHuge(struct page *page_head) | ||
| 710 | { | ||
| 711 | compound_page_dtor *dtor; | ||
| 712 | |||
| 713 | if (!PageHead(page_head)) | ||
| 714 | return 0; | ||
| 715 | |||
| 716 | dtor = get_compound_page_dtor(page_head); | ||
| 717 | |||
| 718 | return dtor == free_huge_page; | ||
| 719 | } | ||
| 720 | EXPORT_SYMBOL_GPL(PageHeadHuge); | ||
| 721 | |||
| 739 | pgoff_t __basepage_index(struct page *page) | 722 | pgoff_t __basepage_index(struct page *page) |
| 740 | { | 723 | { |
| 741 | struct page *page_head = compound_head(page); | 724 | struct page *page_head = compound_head(page); |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index c4403cdf3433..eca4a3129129 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -2950,7 +2950,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) | |||
| 2950 | return; | 2950 | return; |
| 2951 | } | 2951 | } |
| 2952 | 2952 | ||
| 2953 | p += snprintf(p, maxlen, policy_modes[mode]); | 2953 | p += snprintf(p, maxlen, "%s", policy_modes[mode]); |
| 2954 | 2954 | ||
| 2955 | if (flags & MPOL_MODE_FLAGS) { | 2955 | if (flags & MPOL_MODE_FLAGS) { |
| 2956 | p += snprintf(p, buffer + maxlen - p, "="); | 2956 | p += snprintf(p, buffer + maxlen - p, "="); |
diff --git a/mm/migrate.c b/mm/migrate.c index 316e720a2023..bb940045fe85 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -442,6 +442,54 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, | |||
| 442 | } | 442 | } |
| 443 | 443 | ||
| 444 | /* | 444 | /* |
| 445 | * Gigantic pages are so large that we do not guarantee that page++ pointer | ||
| 446 | * arithmetic will work across the entire page. We need something more | ||
| 447 | * specialized. | ||
| 448 | */ | ||
| 449 | static void __copy_gigantic_page(struct page *dst, struct page *src, | ||
| 450 | int nr_pages) | ||
| 451 | { | ||
| 452 | int i; | ||
| 453 | struct page *dst_base = dst; | ||
| 454 | struct page *src_base = src; | ||
| 455 | |||
| 456 | for (i = 0; i < nr_pages; ) { | ||
| 457 | cond_resched(); | ||
| 458 | copy_highpage(dst, src); | ||
| 459 | |||
| 460 | i++; | ||
| 461 | dst = mem_map_next(dst, dst_base, i); | ||
| 462 | src = mem_map_next(src, src_base, i); | ||
| 463 | } | ||
| 464 | } | ||
| 465 | |||
| 466 | static void copy_huge_page(struct page *dst, struct page *src) | ||
| 467 | { | ||
| 468 | int i; | ||
| 469 | int nr_pages; | ||
| 470 | |||
| 471 | if (PageHuge(src)) { | ||
| 472 | /* hugetlbfs page */ | ||
| 473 | struct hstate *h = page_hstate(src); | ||
| 474 | nr_pages = pages_per_huge_page(h); | ||
| 475 | |||
| 476 | if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { | ||
| 477 | __copy_gigantic_page(dst, src, nr_pages); | ||
| 478 | return; | ||
| 479 | } | ||
| 480 | } else { | ||
| 481 | /* thp page */ | ||
| 482 | BUG_ON(!PageTransHuge(src)); | ||
| 483 | nr_pages = hpage_nr_pages(src); | ||
| 484 | } | ||
| 485 | |||
| 486 | for (i = 0; i < nr_pages; i++) { | ||
| 487 | cond_resched(); | ||
| 488 | copy_highpage(dst + i, src + i); | ||
| 489 | } | ||
| 490 | } | ||
| 491 | |||
| 492 | /* | ||
| 445 | * Copy the page to its new location | 493 | * Copy the page to its new location |
| 446 | */ | 494 | */ |
| 447 | void migrate_page_copy(struct page *newpage, struct page *page) | 495 | void migrate_page_copy(struct page *newpage, struct page *page) |
| @@ -82,19 +82,6 @@ static void __put_compound_page(struct page *page) | |||
| 82 | 82 | ||
| 83 | static void put_compound_page(struct page *page) | 83 | static void put_compound_page(struct page *page) |
| 84 | { | 84 | { |
| 85 | /* | ||
| 86 | * hugetlbfs pages cannot be split from under us. If this is a | ||
| 87 | * hugetlbfs page, check refcount on head page and release the page if | ||
| 88 | * the refcount becomes zero. | ||
| 89 | */ | ||
| 90 | if (PageHuge(page)) { | ||
| 91 | page = compound_head(page); | ||
| 92 | if (put_page_testzero(page)) | ||
| 93 | __put_compound_page(page); | ||
| 94 | |||
| 95 | return; | ||
| 96 | } | ||
| 97 | |||
| 98 | if (unlikely(PageTail(page))) { | 85 | if (unlikely(PageTail(page))) { |
| 99 | /* __split_huge_page_refcount can run under us */ | 86 | /* __split_huge_page_refcount can run under us */ |
| 100 | struct page *page_head = compound_trans_head(page); | 87 | struct page *page_head = compound_trans_head(page); |
| @@ -111,14 +98,31 @@ static void put_compound_page(struct page *page) | |||
| 111 | * still hot on arches that do not support | 98 | * still hot on arches that do not support |
| 112 | * this_cpu_cmpxchg_double(). | 99 | * this_cpu_cmpxchg_double(). |
| 113 | */ | 100 | */ |
| 114 | if (PageSlab(page_head)) { | 101 | if (PageSlab(page_head) || PageHeadHuge(page_head)) { |
| 115 | if (PageTail(page)) { | 102 | if (likely(PageTail(page))) { |
| 103 | /* | ||
| 104 | * __split_huge_page_refcount | ||
| 105 | * cannot race here. | ||
| 106 | */ | ||
| 107 | VM_BUG_ON(!PageHead(page_head)); | ||
| 108 | atomic_dec(&page->_mapcount); | ||
| 116 | if (put_page_testzero(page_head)) | 109 | if (put_page_testzero(page_head)) |
| 117 | VM_BUG_ON(1); | 110 | VM_BUG_ON(1); |
| 118 | 111 | if (put_page_testzero(page_head)) | |
| 119 | atomic_dec(&page->_mapcount); | 112 | __put_compound_page(page_head); |
| 120 | goto skip_lock_tail; | 113 | return; |
| 121 | } else | 114 | } else |
| 115 | /* | ||
| 116 | * __split_huge_page_refcount | ||
| 117 | * run before us, "page" was a | ||
| 118 | * THP tail. The split | ||
| 119 | * page_head has been freed | ||
| 120 | * and reallocated as slab or | ||
| 121 | * hugetlbfs page of smaller | ||
| 122 | * order (only possible if | ||
| 123 | * reallocated as slab on | ||
| 124 | * x86). | ||
| 125 | */ | ||
| 122 | goto skip_lock; | 126 | goto skip_lock; |
| 123 | } | 127 | } |
| 124 | /* | 128 | /* |
| @@ -132,8 +136,27 @@ static void put_compound_page(struct page *page) | |||
| 132 | /* __split_huge_page_refcount run before us */ | 136 | /* __split_huge_page_refcount run before us */ |
| 133 | compound_unlock_irqrestore(page_head, flags); | 137 | compound_unlock_irqrestore(page_head, flags); |
| 134 | skip_lock: | 138 | skip_lock: |
| 135 | if (put_page_testzero(page_head)) | 139 | if (put_page_testzero(page_head)) { |
| 136 | __put_single_page(page_head); | 140 | /* |
| 141 | * The head page may have been | ||
| 142 | * freed and reallocated as a | ||
| 143 | * compound page of smaller | ||
| 144 | * order and then freed again. | ||
| 145 | * All we know is that it | ||
| 146 | * cannot have become: a THP | ||
| 147 | * page, a compound page of | ||
| 148 | * higher order, a tail page. | ||
| 149 | * That is because we still | ||
| 150 | * hold the refcount of the | ||
| 151 | * split THP tail and | ||
| 152 | * page_head was the THP head | ||
| 153 | * before the split. | ||
| 154 | */ | ||
| 155 | if (PageHead(page_head)) | ||
| 156 | __put_compound_page(page_head); | ||
| 157 | else | ||
| 158 | __put_single_page(page_head); | ||
| 159 | } | ||
| 137 | out_put_single: | 160 | out_put_single: |
| 138 | if (put_page_testzero(page)) | 161 | if (put_page_testzero(page)) |
| 139 | __put_single_page(page); | 162 | __put_single_page(page); |
| @@ -155,7 +178,6 @@ out_put_single: | |||
| 155 | VM_BUG_ON(atomic_read(&page->_count) != 0); | 178 | VM_BUG_ON(atomic_read(&page->_count) != 0); |
| 156 | compound_unlock_irqrestore(page_head, flags); | 179 | compound_unlock_irqrestore(page_head, flags); |
| 157 | 180 | ||
| 158 | skip_lock_tail: | ||
| 159 | if (put_page_testzero(page_head)) { | 181 | if (put_page_testzero(page_head)) { |
| 160 | if (PageHead(page_head)) | 182 | if (PageHead(page_head)) |
| 161 | __put_compound_page(page_head); | 183 | __put_compound_page(page_head); |
| @@ -198,51 +220,52 @@ bool __get_page_tail(struct page *page) | |||
| 198 | * proper PT lock that already serializes against | 220 | * proper PT lock that already serializes against |
| 199 | * split_huge_page(). | 221 | * split_huge_page(). |
| 200 | */ | 222 | */ |
| 223 | unsigned long flags; | ||
| 201 | bool got = false; | 224 | bool got = false; |
| 202 | struct page *page_head; | 225 | struct page *page_head = compound_trans_head(page); |
| 203 | |||
| 204 | /* | ||
| 205 | * If this is a hugetlbfs page it cannot be split under us. Simply | ||
| 206 | * increment refcount for the head page. | ||
| 207 | */ | ||
| 208 | if (PageHuge(page)) { | ||
| 209 | page_head = compound_head(page); | ||
| 210 | atomic_inc(&page_head->_count); | ||
| 211 | got = true; | ||
| 212 | } else { | ||
| 213 | unsigned long flags; | ||
| 214 | 226 | ||
| 215 | page_head = compound_trans_head(page); | 227 | if (likely(page != page_head && get_page_unless_zero(page_head))) { |
| 216 | if (likely(page != page_head && | 228 | /* Ref to put_compound_page() comment. */ |
| 217 | get_page_unless_zero(page_head))) { | 229 | if (PageSlab(page_head) || PageHeadHuge(page_head)) { |
| 218 | |||
| 219 | /* Ref to put_compound_page() comment. */ | ||
| 220 | if (PageSlab(page_head)) { | ||
| 221 | if (likely(PageTail(page))) { | ||
| 222 | __get_page_tail_foll(page, false); | ||
| 223 | return true; | ||
| 224 | } else { | ||
| 225 | put_page(page_head); | ||
| 226 | return false; | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | /* | ||
| 231 | * page_head wasn't a dangling pointer but it | ||
| 232 | * may not be a head page anymore by the time | ||
| 233 | * we obtain the lock. That is ok as long as it | ||
| 234 | * can't be freed from under us. | ||
| 235 | */ | ||
| 236 | flags = compound_lock_irqsave(page_head); | ||
| 237 | /* here __split_huge_page_refcount won't run anymore */ | ||
| 238 | if (likely(PageTail(page))) { | 230 | if (likely(PageTail(page))) { |
| 231 | /* | ||
| 232 | * This is a hugetlbfs page or a slab | ||
| 233 | * page. __split_huge_page_refcount | ||
| 234 | * cannot race here. | ||
| 235 | */ | ||
| 236 | VM_BUG_ON(!PageHead(page_head)); | ||
| 239 | __get_page_tail_foll(page, false); | 237 | __get_page_tail_foll(page, false); |
| 240 | got = true; | 238 | return true; |
| 241 | } | 239 | } else { |
| 242 | compound_unlock_irqrestore(page_head, flags); | 240 | /* |
| 243 | if (unlikely(!got)) | 241 | * __split_huge_page_refcount run |
| 242 | * before us, "page" was a THP | ||
| 243 | * tail. The split page_head has been | ||
| 244 | * freed and reallocated as slab or | ||
| 245 | * hugetlbfs page of smaller order | ||
| 246 | * (only possible if reallocated as | ||
| 247 | * slab on x86). | ||
| 248 | */ | ||
| 244 | put_page(page_head); | 249 | put_page(page_head); |
| 250 | return false; | ||
| 251 | } | ||
| 252 | } | ||
| 253 | |||
| 254 | /* | ||
| 255 | * page_head wasn't a dangling pointer but it | ||
| 256 | * may not be a head page anymore by the time | ||
| 257 | * we obtain the lock. That is ok as long as it | ||
| 258 | * can't be freed from under us. | ||
| 259 | */ | ||
| 260 | flags = compound_lock_irqsave(page_head); | ||
| 261 | /* here __split_huge_page_refcount won't run anymore */ | ||
| 262 | if (likely(PageTail(page))) { | ||
| 263 | __get_page_tail_foll(page, false); | ||
| 264 | got = true; | ||
| 245 | } | 265 | } |
| 266 | compound_unlock_irqrestore(page_head, flags); | ||
| 267 | if (unlikely(!got)) | ||
| 268 | put_page(page_head); | ||
| 246 | } | 269 | } |
| 247 | return got; | 270 | return got; |
| 248 | } | 271 | } |
diff --git a/net/Kconfig b/net/Kconfig index 0715db64a5c3..d334678c0bd8 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
| @@ -224,7 +224,7 @@ source "net/hsr/Kconfig" | |||
| 224 | 224 | ||
| 225 | config RPS | 225 | config RPS |
| 226 | boolean | 226 | boolean |
| 227 | depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS | 227 | depends on SMP && SYSFS |
| 228 | default y | 228 | default y |
| 229 | 229 | ||
| 230 | config RFS_ACCEL | 230 | config RFS_ACCEL |
| @@ -235,7 +235,7 @@ config RFS_ACCEL | |||
| 235 | 235 | ||
| 236 | config XPS | 236 | config XPS |
| 237 | boolean | 237 | boolean |
| 238 | depends on SMP && USE_GENERIC_SMP_HELPERS | 238 | depends on SMP |
| 239 | default y | 239 | default y |
| 240 | 240 | ||
| 241 | config NETPRIO_CGROUP | 241 | config NETPRIO_CGROUP |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 61090e0ff613..9c9810030377 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
| @@ -3289,6 +3289,7 @@ sub process { | |||
| 3289 | } | 3289 | } |
| 3290 | } | 3290 | } |
| 3291 | if (!defined $suppress_whiletrailers{$linenr} && | 3291 | if (!defined $suppress_whiletrailers{$linenr} && |
| 3292 | defined($stat) && defined($cond) && | ||
| 3292 | $line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) { | 3293 | $line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) { |
| 3293 | my ($s, $c) = ($stat, $cond); | 3294 | my ($s, $c) = ($stat, $cond); |
| 3294 | 3295 | ||
