diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/process.c | 7 | ||||
-rw-r--r-- | arch/i386/pci/common.c | 4 | ||||
-rw-r--r-- | arch/i386/pci/direct.c | 2 | ||||
-rw-r--r-- | arch/i386/pci/i386.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/4xx_mmu.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_32.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 4 | ||||
-rw-r--r-- | arch/x86_64/kernel/process.c | 7 |
12 files changed, 21 insertions, 40 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 1cb261f225d5..df6c2bcde067 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -393,13 +393,6 @@ void flush_thread(void) | |||
393 | { | 393 | { |
394 | struct task_struct *tsk = current; | 394 | struct task_struct *tsk = current; |
395 | 395 | ||
396 | /* | ||
397 | * Remove function-return probe instances associated with this task | ||
398 | * and put them back on the free list. Do not insert an exit probe for | ||
399 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
400 | */ | ||
401 | kprobe_flush_task(tsk); | ||
402 | |||
403 | memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); | 396 | memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); |
404 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | 397 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); |
405 | /* | 398 | /* |
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c index c96bea14b98f..f6bc48da4d2a 100644 --- a/arch/i386/pci/common.c +++ b/arch/i386/pci/common.c | |||
@@ -132,7 +132,7 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum) | |||
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
135 | printk("PCI: Probing PCI hardware (bus %02x)\n", busnum); | 135 | printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum); |
136 | 136 | ||
137 | return pci_scan_bus_parented(NULL, busnum, &pci_root_ops, NULL); | 137 | return pci_scan_bus_parented(NULL, busnum, &pci_root_ops, NULL); |
138 | } | 138 | } |
@@ -144,7 +144,7 @@ static int __init pcibios_init(void) | |||
144 | struct cpuinfo_x86 *c = &boot_cpu_data; | 144 | struct cpuinfo_x86 *c = &boot_cpu_data; |
145 | 145 | ||
146 | if (!raw_pci_ops) { | 146 | if (!raw_pci_ops) { |
147 | printk("PCI: System does not support PCI\n"); | 147 | printk(KERN_WARNING "PCI: System does not support PCI\n"); |
148 | return 0; | 148 | return 0; |
149 | } | 149 | } |
150 | 150 | ||
diff --git a/arch/i386/pci/direct.c b/arch/i386/pci/direct.c index 30b7e9b4f6a2..94331d6be7a3 100644 --- a/arch/i386/pci/direct.c +++ b/arch/i386/pci/direct.c | |||
@@ -201,7 +201,7 @@ static int __init pci_sanity_check(struct pci_raw_ops *o) | |||
201 | return 1; | 201 | return 1; |
202 | } | 202 | } |
203 | 203 | ||
204 | DBG("PCI: Sanity check failed\n"); | 204 | DBG(KERN_WARNING "PCI: Sanity check failed\n"); |
205 | return 0; | 205 | return 0; |
206 | } | 206 | } |
207 | 207 | ||
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c index 6d6338500c3c..ed2c8c899bd3 100644 --- a/arch/i386/pci/i386.c +++ b/arch/i386/pci/i386.c | |||
@@ -221,6 +221,11 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask) | |||
221 | continue; | 221 | continue; |
222 | 222 | ||
223 | r = &dev->resource[idx]; | 223 | r = &dev->resource[idx]; |
224 | if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) | ||
225 | continue; | ||
226 | if ((idx == PCI_ROM_RESOURCE) && | ||
227 | (!(r->flags & IORESOURCE_ROM_ENABLE))) | ||
228 | continue; | ||
224 | if (!r->start && r->end) { | 229 | if (!r->start && r->end) { |
225 | printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); | 230 | printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); |
226 | return -EINVAL; | 231 | return -EINVAL; |
@@ -230,8 +235,6 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask) | |||
230 | if (r->flags & IORESOURCE_MEM) | 235 | if (r->flags & IORESOURCE_MEM) |
231 | cmd |= PCI_COMMAND_MEMORY; | 236 | cmd |= PCI_COMMAND_MEMORY; |
232 | } | 237 | } |
233 | if (dev->resource[PCI_ROM_RESOURCE].start) | ||
234 | cmd |= PCI_COMMAND_MEMORY; | ||
235 | if (cmd != old_cmd) { | 238 | if (cmd != old_cmd) { |
236 | printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); | 239 | printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); |
237 | pci_write_config_word(dev, PCI_COMMAND, cmd); | 240 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 4305d2ba76f6..2e33665d9c18 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -718,13 +718,6 @@ kernel_thread_helper (int (*fn)(void *), void *arg) | |||
718 | void | 718 | void |
719 | flush_thread (void) | 719 | flush_thread (void) |
720 | { | 720 | { |
721 | /* | ||
722 | * Remove function-return probe instances associated with this task | ||
723 | * and put them back on the free list. Do not insert an exit probe for | ||
724 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
725 | */ | ||
726 | kprobe_flush_task(current); | ||
727 | |||
728 | /* drop floating-point and debug-register state if it exists: */ | 721 | /* drop floating-point and debug-register state if it exists: */ |
729 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); | 722 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); |
730 | ia64_drop_fpu(current); | 723 | ia64_drop_fpu(current); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index de69fb37c731..a5a7542a8ff3 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -457,7 +457,6 @@ void flush_thread(void) | |||
457 | if (t->flags & _TIF_ABI_PENDING) | 457 | if (t->flags & _TIF_ABI_PENDING) |
458 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); | 458 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); |
459 | #endif | 459 | #endif |
460 | kprobe_flush_task(current); | ||
461 | 460 | ||
462 | #ifndef CONFIG_SMP | 461 | #ifndef CONFIG_SMP |
463 | if (last_task_used_math == current) | 462 | if (last_task_used_math == current) |
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c index b7bcbc232f39..4d006aa1a0d1 100644 --- a/arch/powerpc/mm/4xx_mmu.c +++ b/arch/powerpc/mm/4xx_mmu.c | |||
@@ -110,13 +110,11 @@ unsigned long __init mmu_mapin_ram(void) | |||
110 | pmd_t *pmdp; | 110 | pmd_t *pmdp; |
111 | unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; | 111 | unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; |
112 | 112 | ||
113 | spin_lock(&init_mm.page_table_lock); | ||
114 | pmdp = pmd_offset(pgd_offset_k(v), v); | 113 | pmdp = pmd_offset(pgd_offset_k(v), v); |
115 | pmd_val(*pmdp++) = val; | 114 | pmd_val(*pmdp++) = val; |
116 | pmd_val(*pmdp++) = val; | 115 | pmd_val(*pmdp++) = val; |
117 | pmd_val(*pmdp++) = val; | 116 | pmd_val(*pmdp++) = val; |
118 | pmd_val(*pmdp++) = val; | 117 | pmd_val(*pmdp++) = val; |
119 | spin_unlock(&init_mm.page_table_lock); | ||
120 | 118 | ||
121 | v += LARGE_PAGE_SIZE_16M; | 119 | v += LARGE_PAGE_SIZE_16M; |
122 | p += LARGE_PAGE_SIZE_16M; | 120 | p += LARGE_PAGE_SIZE_16M; |
@@ -127,10 +125,8 @@ unsigned long __init mmu_mapin_ram(void) | |||
127 | pmd_t *pmdp; | 125 | pmd_t *pmdp; |
128 | unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; | 126 | unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; |
129 | 127 | ||
130 | spin_lock(&init_mm.page_table_lock); | ||
131 | pmdp = pmd_offset(pgd_offset_k(v), v); | 128 | pmdp = pmd_offset(pgd_offset_k(v), v); |
132 | pmd_val(*pmdp) = val; | 129 | pmd_val(*pmdp) = val; |
133 | spin_unlock(&init_mm.page_table_lock); | ||
134 | 130 | ||
135 | v += LARGE_PAGE_SIZE_4M; | 131 | v += LARGE_PAGE_SIZE_4M; |
136 | p += LARGE_PAGE_SIZE_4M; | 132 | p += LARGE_PAGE_SIZE_4M; |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 426c269e552e..f867bba893ca 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -287,15 +287,15 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) | |||
287 | 287 | ||
288 | int prepare_hugepage_range(unsigned long addr, unsigned long len) | 288 | int prepare_hugepage_range(unsigned long addr, unsigned long len) |
289 | { | 289 | { |
290 | int err; | 290 | int err = 0; |
291 | 291 | ||
292 | if ( (addr+len) < addr ) | 292 | if ( (addr+len) < addr ) |
293 | return -EINVAL; | 293 | return -EINVAL; |
294 | 294 | ||
295 | if ((addr + len) < 0x100000000UL) | 295 | if (addr < 0x100000000UL) |
296 | err = open_low_hpage_areas(current->mm, | 296 | err = open_low_hpage_areas(current->mm, |
297 | LOW_ESID_MASK(addr, len)); | 297 | LOW_ESID_MASK(addr, len)); |
298 | else | 298 | if ((addr + len) >= 0x100000000UL) |
299 | err = open_high_hpage_areas(current->mm, | 299 | err = open_high_hpage_areas(current->mm, |
300 | HTLB_AREA_MASK(addr, len)); | 300 | HTLB_AREA_MASK(addr, len)); |
301 | if (err) { | 301 | if (err) { |
@@ -754,9 +754,7 @@ repeat: | |||
754 | } | 754 | } |
755 | 755 | ||
756 | /* | 756 | /* |
757 | * No need to use ldarx/stdcx here because all who | 757 | * No need to use ldarx/stdcx here |
758 | * might be updating the pte will hold the | ||
759 | * page_table_lock | ||
760 | */ | 758 | */ |
761 | *ptep = __pte(new_pte & ~_PAGE_BUSY); | 759 | *ptep = __pte(new_pte & ~_PAGE_BUSY); |
762 | 760 | ||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 4bd7b0a70996..ed6ed2e30dac 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -495,7 +495,7 @@ EXPORT_SYMBOL(flush_icache_user_range); | |||
495 | * We use it to preload an HPTE into the hash table corresponding to | 495 | * We use it to preload an HPTE into the hash table corresponding to |
496 | * the updated linux PTE. | 496 | * the updated linux PTE. |
497 | * | 497 | * |
498 | * This must always be called with the mm->page_table_lock held | 498 | * This must always be called with the pte lock held. |
499 | */ | 499 | */ |
500 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | 500 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, |
501 | pte_t pte) | 501 | pte_t pte) |
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c index 6c3dc3c44c86..ad580f3742e5 100644 --- a/arch/powerpc/mm/tlb_32.c +++ b/arch/powerpc/mm/tlb_32.c | |||
@@ -149,6 +149,12 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
149 | return; | 149 | return; |
150 | } | 150 | } |
151 | 151 | ||
152 | /* | ||
153 | * It is safe to go down the mm's list of vmas when called | ||
154 | * from dup_mmap, holding mmap_sem. It would also be safe from | ||
155 | * unmap_region or exit_mmap, but not from vmtruncate on SMP - | ||
156 | * but it seems dup_mmap is the only SMP case which gets here. | ||
157 | */ | ||
152 | for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) | 158 | for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) |
153 | flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); | 159 | flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); |
154 | FINISH_FLUSH; | 160 | FINISH_FLUSH; |
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index 53e31b834ace..859d29a0cac5 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c | |||
@@ -95,7 +95,7 @@ static void pte_free_submit(struct pte_freelist_batch *batch) | |||
95 | 95 | ||
96 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | 96 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) |
97 | { | 97 | { |
98 | /* This is safe as we are holding page_table_lock */ | 98 | /* This is safe since tlb_gather_mmu has disabled preemption */ |
99 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); | 99 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); |
100 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | 100 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); |
101 | 101 | ||
@@ -206,7 +206,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | |||
206 | 206 | ||
207 | void pte_free_finish(void) | 207 | void pte_free_finish(void) |
208 | { | 208 | { |
209 | /* This is safe as we are holding page_table_lock */ | 209 | /* This is safe since tlb_gather_mmu has disabled preemption */ |
210 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | 210 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); |
211 | 211 | ||
212 | if (*batchp == NULL) | 212 | if (*batchp == NULL) |
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 5afd63e8cef7..7519fc520eb3 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c | |||
@@ -351,13 +351,6 @@ void flush_thread(void) | |||
351 | struct task_struct *tsk = current; | 351 | struct task_struct *tsk = current; |
352 | struct thread_info *t = current_thread_info(); | 352 | struct thread_info *t = current_thread_info(); |
353 | 353 | ||
354 | /* | ||
355 | * Remove function-return probe instances associated with this task | ||
356 | * and put them back on the free list. Do not insert an exit probe for | ||
357 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
358 | */ | ||
359 | kprobe_flush_task(tsk); | ||
360 | |||
361 | if (t->flags & _TIF_ABI_PENDING) | 354 | if (t->flags & _TIF_ABI_PENDING) |
362 | t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32); | 355 | t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32); |
363 | 356 | ||