aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/common.c5
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c6
-rw-r--r--arch/x86/kernel/ds.c2
-rw-r--r--arch/x86/kernel/scx200_32.c2
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/pageattr.c3
-rw-r--r--arch/x86/mm/pgtable_32.c23
-rw-r--r--include/asm-generic/tlb.h1
-rw-r--r--include/asm-x86/pgalloc_32.h20
-rw-r--r--include/linux/swap.h1
10 files changed, 38 insertions, 27 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index db28aa9e2f69..d608c9ebbfe2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -274,8 +274,10 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
274 if (c->x86 >= 0x6) 274 if (c->x86 >= 0x6)
275 c->x86_model += ((tfms >> 16) & 0xF) << 4; 275 c->x86_model += ((tfms >> 16) & 0xF) << 4;
276 c->x86_mask = tfms & 15; 276 c->x86_mask = tfms & 15;
277 if (cap0 & (1<<19)) 277 if (cap0 & (1<<19)) {
278 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; 278 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
279 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
280 }
279 } 281 }
280} 282}
281static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) 283static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
@@ -317,6 +319,7 @@ static void __init early_cpu_detect(void)
317 struct cpuinfo_x86 *c = &boot_cpu_data; 319 struct cpuinfo_x86 *c = &boot_cpu_data;
318 320
319 c->x86_cache_alignment = 32; 321 c->x86_cache_alignment = 32;
322 c->x86_clflush_size = 32;
320 323
321 if (!have_cpuid_p()) 324 if (!have_cpuid_p())
322 return; 325 return;
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 8b4507b8469b..1b889860eb73 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -352,8 +352,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
352 */ 352 */
353 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { 353 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
354 /* supports eax=2 call */ 354 /* supports eax=2 call */
355 int i, j, n; 355 int j, n;
356 int regs[4]; 356 unsigned int regs[4];
357 unsigned char *dp = (unsigned char *)regs; 357 unsigned char *dp = (unsigned char *)regs;
358 int only_trace = 0; 358 int only_trace = 0;
359 359
@@ -368,7 +368,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
368 368
369 /* If bit 31 is set, this is an unknown format */ 369 /* If bit 31 is set, this is an unknown format */
370 for ( j = 0 ; j < 3 ; j++ ) { 370 for ( j = 0 ; j < 3 ; j++ ) {
371 if ( regs[j] < 0 ) regs[j] = 0; 371 if (regs[j] & (1 << 31)) regs[j] = 0;
372 } 372 }
373 373
374 /* Byte 0 is level count, not a descriptor */ 374 /* Byte 0 is level count, not a descriptor */
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 1c5ca4d18787..dcd918c1580d 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -223,7 +223,7 @@ int ds_free(void **dsp)
223 if (*dsp) 223 if (*dsp)
224 kfree((void *)get_bts_buffer_base(*dsp)); 224 kfree((void *)get_bts_buffer_base(*dsp));
225 kfree(*dsp); 225 kfree(*dsp);
226 *dsp = 0; 226 *dsp = NULL;
227 227
228 return 0; 228 return 0;
229} 229}
diff --git a/arch/x86/kernel/scx200_32.c b/arch/x86/kernel/scx200_32.c
index 87bc159d29df..7e004acbe526 100644
--- a/arch/x86/kernel/scx200_32.c
+++ b/arch/x86/kernel/scx200_32.c
@@ -65,7 +65,7 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
65 base = pci_resource_start(pdev, 0); 65 base = pci_resource_start(pdev, 0);
66 printk(KERN_INFO NAME ": GPIO base 0x%x\n", base); 66 printk(KERN_INFO NAME ": GPIO base 0x%x\n", base);
67 67
68 if (request_region(base, SCx200_GPIO_SIZE, "NatSemi SCx200 GPIO") == 0) { 68 if (!request_region(base, SCx200_GPIO_SIZE, "NatSemi SCx200 GPIO")) {
69 printk(KERN_ERR NAME ": can't allocate I/O for GPIOs\n"); 69 printk(KERN_ERR NAME ": can't allocate I/O for GPIOs\n");
70 return -EBUSY; 70 return -EBUSY;
71 } 71 }
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index ed795721ca8e..a177d76e1c53 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -340,7 +340,7 @@ void __init early_ioremap_reset(void)
340 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { 340 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
341 addr = fix_to_virt(idx); 341 addr = fix_to_virt(idx);
342 pte = early_ioremap_pte(addr); 342 pte = early_ioremap_pte(addr);
343 if (!*pte & _PAGE_PRESENT) { 343 if (*pte & _PAGE_PRESENT) {
344 phys = *pte & PAGE_MASK; 344 phys = *pte & PAGE_MASK;
345 set_fixmap(idx, phys); 345 set_fixmap(idx, phys);
346 } 346 }
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 1cc6607eacb0..e297bd65e513 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -399,8 +399,7 @@ static inline int change_page_attr_set(unsigned long addr, int numpages,
399static inline int change_page_attr_clear(unsigned long addr, int numpages, 399static inline int change_page_attr_clear(unsigned long addr, int numpages,
400 pgprot_t mask) 400 pgprot_t mask)
401{ 401{
402 return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); 402 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
403
404} 403}
405 404
406int set_memory_uc(unsigned long addr, int numpages) 405int set_memory_uc(unsigned long addr, int numpages)
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 2ae5999a795a..cb3aa470249b 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -376,3 +376,26 @@ void check_pgt_cache(void)
376{ 376{
377 quicklist_trim(0, pgd_dtor, 25, 16); 377 quicklist_trim(0, pgd_dtor, 25, 16);
378} 378}
379
380void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
381{
382 paravirt_release_pt(page_to_pfn(pte));
383 tlb_remove_page(tlb, pte);
384}
385
386#ifdef CONFIG_X86_PAE
387
388void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
389{
390 /* This is called just after the pmd has been detached from
391 the pgd, which requires a full tlb flush to be recognized
392 by the CPU. Rather than incurring multiple tlb flushes
393 while the address space is being pulled down, make the tlb
394 gathering machinery do a full flush when we're done. */
395 tlb->fullmm = 1;
396
397 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
398 tlb_remove_page(tlb, virt_to_page(pmd));
399}
400
401#endif
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 6ce9f3ab928d..75f2bfab614f 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/swap.h> 16#include <linux/swap.h>
17#include <linux/quicklist.h> 17#include <linux/quicklist.h>
18#include <asm/pgalloc.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19 20
20/* 21/*
diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h
index 10c2b452e64c..7641e7b5d931 100644
--- a/include/asm-x86/pgalloc_32.h
+++ b/include/asm-x86/pgalloc_32.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */ 5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
6#include <asm/tlb.h> 7#include <asm/tlb.h>
7#include <asm-generic/tlb.h> 8#include <asm-generic/tlb.h>
8 9
@@ -51,11 +52,7 @@ static inline void pte_free(struct page *pte)
51} 52}
52 53
53 54
54static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 55extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
55{
56 paravirt_release_pt(page_to_pfn(pte));
57 tlb_remove_page(tlb, pte);
58}
59 56
60#ifdef CONFIG_X86_PAE 57#ifdef CONFIG_X86_PAE
61/* 58/*
@@ -72,18 +69,7 @@ static inline void pmd_free(pmd_t *pmd)
72 free_page((unsigned long)pmd); 69 free_page((unsigned long)pmd);
73} 70}
74 71
75static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 72extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
76{
77 /* This is called just after the pmd has been detached from
78 the pgd, which requires a full tlb flush to be recognized
79 by the CPU. Rather than incurring multiple tlb flushes
80 while the address space is being pulled down, make the tlb
81 gathering machinery do a full flush when we're done. */
82 tlb->fullmm = 1;
83
84 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
85 tlb_remove_page(tlb, virt_to_page(pmd));
86}
87 73
88static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) 74static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
89{ 75{
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2c3ce4c69b25..4f3838adbb30 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -6,7 +6,6 @@
6#include <linux/mmzone.h> 6#include <linux/mmzone.h>
7#include <linux/list.h> 7#include <linux/list.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/pagemap.h>
10 9
11#include <asm/atomic.h> 10#include <asm/atomic.h>
12#include <asm/page.h> 11#include <asm/page.h>