aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/cpuid.c2
-rw-r--r--arch/i386/kernel/msr.c2
-rw-r--r--arch/i386/kernel/vm86.c17
-rw-r--r--arch/i386/mm/discontig.c4
-rw-r--r--arch/i386/mm/init.c62
-rw-r--r--arch/i386/mm/ioremap.c4
-rw-r--r--arch/i386/mm/pgtable.c11
-rw-r--r--arch/i386/oprofile/backtrace.c38
-rw-r--r--arch/i386/pci/fixup.c59
9 files changed, 148 insertions, 51 deletions
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 4647db4ad6de..13bae799e626 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -163,7 +163,7 @@ static int cpuid_class_device_create(int i)
163 int err = 0; 163 int err = 0;
164 struct class_device *class_err; 164 struct class_device *class_err;
165 165
166 class_err = class_device_create(cpuid_class, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i); 166 class_err = class_device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i);
167 if (IS_ERR(class_err)) 167 if (IS_ERR(class_err))
168 err = PTR_ERR(class_err); 168 err = PTR_ERR(class_err);
169 return err; 169 return err;
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 03100d6fc5d6..44470fea4309 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -246,7 +246,7 @@ static int msr_class_device_create(int i)
246 int err = 0; 246 int err = 0;
247 struct class_device *class_err; 247 struct class_device *class_err;
248 248
249 class_err = class_device_create(msr_class, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i); 249 class_err = class_device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i);
250 if (IS_ERR(class_err)) 250 if (IS_ERR(class_err))
251 err = PTR_ERR(class_err); 251 err = PTR_ERR(class_err);
252 return err; 252 return err;
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index 16b485009622..fc1993564f98 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -134,17 +134,16 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
134 return ret; 134 return ret;
135} 135}
136 136
137static void mark_screen_rdonly(struct task_struct * tsk) 137static void mark_screen_rdonly(struct mm_struct *mm)
138{ 138{
139 pgd_t *pgd; 139 pgd_t *pgd;
140 pud_t *pud; 140 pud_t *pud;
141 pmd_t *pmd; 141 pmd_t *pmd;
142 pte_t *pte, *mapped; 142 pte_t *pte;
143 spinlock_t *ptl;
143 int i; 144 int i;
144 145
145 preempt_disable(); 146 pgd = pgd_offset(mm, 0xA0000);
146 spin_lock(&tsk->mm->page_table_lock);
147 pgd = pgd_offset(tsk->mm, 0xA0000);
148 if (pgd_none_or_clear_bad(pgd)) 147 if (pgd_none_or_clear_bad(pgd))
149 goto out; 148 goto out;
150 pud = pud_offset(pgd, 0xA0000); 149 pud = pud_offset(pgd, 0xA0000);
@@ -153,16 +152,14 @@ static void mark_screen_rdonly(struct task_struct * tsk)
153 pmd = pmd_offset(pud, 0xA0000); 152 pmd = pmd_offset(pud, 0xA0000);
154 if (pmd_none_or_clear_bad(pmd)) 153 if (pmd_none_or_clear_bad(pmd))
155 goto out; 154 goto out;
156 pte = mapped = pte_offset_map(pmd, 0xA0000); 155 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
157 for (i = 0; i < 32; i++) { 156 for (i = 0; i < 32; i++) {
158 if (pte_present(*pte)) 157 if (pte_present(*pte))
159 set_pte(pte, pte_wrprotect(*pte)); 158 set_pte(pte, pte_wrprotect(*pte));
160 pte++; 159 pte++;
161 } 160 }
162 pte_unmap(mapped); 161 pte_unmap_unlock(pte, ptl);
163out: 162out:
164 spin_unlock(&tsk->mm->page_table_lock);
165 preempt_enable();
166 flush_tlb(); 163 flush_tlb();
167} 164}
168 165
@@ -306,7 +303,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
306 303
307 tsk->thread.screen_bitmap = info->screen_bitmap; 304 tsk->thread.screen_bitmap = info->screen_bitmap;
308 if (info->flags & VM86_SCREEN_BITMAP) 305 if (info->flags & VM86_SCREEN_BITMAP)
309 mark_screen_rdonly(tsk); 306 mark_screen_rdonly(tsk->mm);
310 __asm__ __volatile__( 307 __asm__ __volatile__(
311 "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t" 308 "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
312 "movl %0,%%esp\n\t" 309 "movl %0,%%esp\n\t"
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 244d8ec66be2..c4af9638dbfa 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -98,7 +98,7 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
98 98
99extern unsigned long find_max_low_pfn(void); 99extern unsigned long find_max_low_pfn(void);
100extern void find_max_pfn(void); 100extern void find_max_pfn(void);
101extern void one_highpage_init(struct page *, int, int); 101extern void add_one_highpage_init(struct page *, int, int);
102 102
103extern struct e820map e820; 103extern struct e820map e820;
104extern unsigned long init_pg_tables_end; 104extern unsigned long init_pg_tables_end;
@@ -427,7 +427,7 @@ void __init set_highmem_pages_init(int bad_ppro)
427 if (!pfn_valid(node_pfn)) 427 if (!pfn_valid(node_pfn))
428 continue; 428 continue;
429 page = pfn_to_page(node_pfn); 429 page = pfn_to_page(node_pfn);
430 one_highpage_init(page, node_pfn, bad_ppro); 430 add_one_highpage_init(page, node_pfn, bad_ppro);
431 } 431 }
432 } 432 }
433 totalram_pages += totalhigh_pages; 433 totalram_pages += totalhigh_pages;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 2ebaf75f732e..542d9298da5e 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/proc_fs.h> 28#include <linux/proc_fs.h>
29#include <linux/efi.h> 29#include <linux/efi.h>
30#include <linux/memory_hotplug.h>
30 31
31#include <asm/processor.h> 32#include <asm/processor.h>
32#include <asm/system.h> 33#include <asm/system.h>
@@ -266,17 +267,46 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
266 pkmap_page_table = pte; 267 pkmap_page_table = pte;
267} 268}
268 269
269void __init one_highpage_init(struct page *page, int pfn, int bad_ppro) 270void __devinit free_new_highpage(struct page *page)
271{
272 set_page_count(page, 1);
273 __free_page(page);
274 totalhigh_pages++;
275}
276
277void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
270{ 278{
271 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { 279 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
272 ClearPageReserved(page); 280 ClearPageReserved(page);
273 set_page_count(page, 1); 281 free_new_highpage(page);
274 __free_page(page);
275 totalhigh_pages++;
276 } else 282 } else
277 SetPageReserved(page); 283 SetPageReserved(page);
278} 284}
279 285
286static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
287{
288 free_new_highpage(page);
289 totalram_pages++;
290#ifdef CONFIG_FLATMEM
291 max_mapnr = max(pfn, max_mapnr);
292#endif
293 num_physpages++;
294 return 0;
295}
296
297/*
298 * Not currently handling the NUMA case.
299 * Assuming single node and all memory that
300 * has been added dynamically that would be
301 * onlined here is in HIGHMEM
302 */
303void online_page(struct page *page)
304{
305 ClearPageReserved(page);
306 add_one_highpage_hotplug(page, page_to_pfn(page));
307}
308
309
280#ifdef CONFIG_NUMA 310#ifdef CONFIG_NUMA
281extern void set_highmem_pages_init(int); 311extern void set_highmem_pages_init(int);
282#else 312#else
@@ -284,7 +314,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
284{ 314{
285 int pfn; 315 int pfn;
286 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) 316 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
287 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); 317 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
288 totalram_pages += totalhigh_pages; 318 totalram_pages += totalhigh_pages;
289} 319}
290#endif /* CONFIG_FLATMEM */ 320#endif /* CONFIG_FLATMEM */
@@ -615,6 +645,28 @@ void __init mem_init(void)
615#endif 645#endif
616} 646}
617 647
648/*
649 * this is for the non-NUMA, single node SMP system case.
650 * Specifically, in the case of x86, we will always add
651 * memory to the highmem for now.
652 */
653#ifndef CONFIG_NEED_MULTIPLE_NODES
654int add_memory(u64 start, u64 size)
655{
656 struct pglist_data *pgdata = &contig_page_data;
657 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
658 unsigned long start_pfn = start >> PAGE_SHIFT;
659 unsigned long nr_pages = size >> PAGE_SHIFT;
660
661 return __add_pages(zone, start_pfn, nr_pages);
662}
663
664int remove_memory(u64 start, u64 size)
665{
666 return -EINVAL;
667}
668#endif
669
618kmem_cache_t *pgd_cache; 670kmem_cache_t *pgd_cache;
619kmem_cache_t *pmd_cache; 671kmem_cache_t *pmd_cache;
620 672
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index f379b8d67558..5d09de8d1c6b 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -28,7 +28,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
28 unsigned long pfn; 28 unsigned long pfn;
29 29
30 pfn = phys_addr >> PAGE_SHIFT; 30 pfn = phys_addr >> PAGE_SHIFT;
31 pte = pte_alloc_kernel(&init_mm, pmd, addr); 31 pte = pte_alloc_kernel(pmd, addr);
32 if (!pte) 32 if (!pte)
33 return -ENOMEM; 33 return -ENOMEM;
34 do { 34 do {
@@ -87,14 +87,12 @@ static int ioremap_page_range(unsigned long addr,
87 flush_cache_all(); 87 flush_cache_all();
88 phys_addr -= addr; 88 phys_addr -= addr;
89 pgd = pgd_offset_k(addr); 89 pgd = pgd_offset_k(addr);
90 spin_lock(&init_mm.page_table_lock);
91 do { 90 do {
92 next = pgd_addr_end(addr, end); 91 next = pgd_addr_end(addr, end);
93 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags); 92 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags);
94 if (err) 93 if (err)
95 break; 94 break;
96 } while (pgd++, addr = next, addr != end); 95 } while (pgd++, addr = next, addr != end);
97 spin_unlock(&init_mm.page_table_lock);
98 flush_tlb_all(); 96 flush_tlb_all();
99 return err; 97 return err;
100} 98}
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index dcdce2c6c532..9db3242103be 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -31,11 +31,13 @@ void show_mem(void)
31 pg_data_t *pgdat; 31 pg_data_t *pgdat;
32 unsigned long i; 32 unsigned long i;
33 struct page_state ps; 33 struct page_state ps;
34 unsigned long flags;
34 35
35 printk(KERN_INFO "Mem-info:\n"); 36 printk(KERN_INFO "Mem-info:\n");
36 show_free_areas(); 37 show_free_areas();
37 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 38 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
38 for_each_pgdat(pgdat) { 39 for_each_pgdat(pgdat) {
40 pgdat_resize_lock(pgdat, &flags);
39 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 41 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
40 page = pgdat_page_nr(pgdat, i); 42 page = pgdat_page_nr(pgdat, i);
41 total++; 43 total++;
@@ -48,6 +50,7 @@ void show_mem(void)
48 else if (page_count(page)) 50 else if (page_count(page))
49 shared += page_count(page) - 1; 51 shared += page_count(page) - 1;
50 } 52 }
53 pgdat_resize_unlock(pgdat, &flags);
51 } 54 }
52 printk(KERN_INFO "%d pages of RAM\n", total); 55 printk(KERN_INFO "%d pages of RAM\n", total);
53 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); 56 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
@@ -188,19 +191,19 @@ static inline void pgd_list_add(pgd_t *pgd)
188 struct page *page = virt_to_page(pgd); 191 struct page *page = virt_to_page(pgd);
189 page->index = (unsigned long)pgd_list; 192 page->index = (unsigned long)pgd_list;
190 if (pgd_list) 193 if (pgd_list)
191 pgd_list->private = (unsigned long)&page->index; 194 set_page_private(pgd_list, (unsigned long)&page->index);
192 pgd_list = page; 195 pgd_list = page;
193 page->private = (unsigned long)&pgd_list; 196 set_page_private(page, (unsigned long)&pgd_list);
194} 197}
195 198
196static inline void pgd_list_del(pgd_t *pgd) 199static inline void pgd_list_del(pgd_t *pgd)
197{ 200{
198 struct page *next, **pprev, *page = virt_to_page(pgd); 201 struct page *next, **pprev, *page = virt_to_page(pgd);
199 next = (struct page *)page->index; 202 next = (struct page *)page->index;
200 pprev = (struct page **)page->private; 203 pprev = (struct page **)page_private(page);
201 *pprev = next; 204 *pprev = next;
202 if (next) 205 if (next)
203 next->private = (unsigned long)pprev; 206 set_page_private(next, (unsigned long)pprev);
204} 207}
205 208
206void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) 209void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c
index 65dfd2edb671..21654be3f73f 100644
--- a/arch/i386/oprofile/backtrace.c
+++ b/arch/i386/oprofile/backtrace.c
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/uaccess.h>
15 16
16struct frame_head { 17struct frame_head {
17 struct frame_head * ebp; 18 struct frame_head * ebp;
@@ -21,26 +22,22 @@ struct frame_head {
21static struct frame_head * 22static struct frame_head *
22dump_backtrace(struct frame_head * head) 23dump_backtrace(struct frame_head * head)
23{ 24{
24 oprofile_add_trace(head->ret); 25 struct frame_head bufhead[2];
25 26
26 /* frame pointers should strictly progress back up the stack 27 /* Also check accessibility of one struct frame_head beyond */
27 * (towards higher addresses) */ 28 if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
28 if (head >= head->ebp) 29 return NULL;
30 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
29 return NULL; 31 return NULL;
30 32
31 return head->ebp; 33 oprofile_add_trace(bufhead[0].ret);
32}
33
34/* check that the page(s) containing the frame head are present */
35static int pages_present(struct frame_head * head)
36{
37 struct mm_struct * mm = current->mm;
38 34
39 /* FIXME: only necessary once per page */ 35 /* frame pointers should strictly progress back up the stack
40 if (!check_user_page_readable(mm, (unsigned long)head)) 36 * (towards higher addresses) */
41 return 0; 37 if (head >= bufhead[0].ebp)
38 return NULL;
42 39
43 return check_user_page_readable(mm, (unsigned long)(head + 1)); 40 return bufhead[0].ebp;
44} 41}
45 42
46/* 43/*
@@ -97,15 +94,6 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
97 return; 94 return;
98 } 95 }
99 96
100#ifdef CONFIG_SMP 97 while (depth-- && head)
101 if (!spin_trylock(&current->mm->page_table_lock))
102 return;
103#endif
104
105 while (depth-- && head && pages_present(head))
106 head = dump_backtrace(head); 98 head = dump_backtrace(head);
107
108#ifdef CONFIG_SMP
109 spin_unlock(&current->mm->page_table_lock);
110#endif
111} 99}
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c
index 8e8e895e1b5a..330fd2b68075 100644
--- a/arch/i386/pci/fixup.c
+++ b/arch/i386/pci/fixup.c
@@ -2,6 +2,8 @@
2 * Exceptions for specific devices. Usually work-arounds for fatal design flaws. 2 * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
3 */ 3 */
4 4
5#include <linux/delay.h>
6#include <linux/dmi.h>
5#include <linux/pci.h> 7#include <linux/pci.h>
6#include <linux/init.h> 8#include <linux/init.h>
7#include "pci.h" 9#include "pci.h"
@@ -384,3 +386,60 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
384 } 386 }
385} 387}
386DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); 388DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
389
390/*
391 * Some Toshiba laptops need extra code to enable their TI TSB43AB22/A.
392 *
393 * We pretend to bring them out of full D3 state, and restore the proper
394 * IRQ, PCI cache line size, and BARs, otherwise the device won't function
395 * properly. In some cases, the device will generate an interrupt on
396 * the wrong IRQ line, causing any devices sharing the the line it's
397 * *supposed* to use to be disabled by the kernel's IRQ debug code.
398 */
399static u16 toshiba_line_size;
400
401static struct dmi_system_id __devinit toshiba_ohci1394_dmi_table[] = {
402 {
403 .ident = "Toshiba PS5 based laptop",
404 .matches = {
405 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
406 DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"),
407 },
408 },
409 {
410 .ident = "Toshiba PSM4 based laptop",
411 .matches = {
412 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
413 DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"),
414 },
415 },
416 { }
417};
418
419static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
420{
421 if (!dmi_check_system(toshiba_ohci1394_dmi_table))
422 return; /* only applies to certain Toshibas (so far) */
423
424 dev->current_state = PCI_D3cold;
425 pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size);
426}
427DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032,
428 pci_pre_fixup_toshiba_ohci1394);
429
430static void __devinit pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev)
431{
432 if (!dmi_check_system(toshiba_ohci1394_dmi_table))
433 return; /* only applies to certain Toshibas (so far) */
434
435 /* Restore config space on Toshiba laptops */
436 mdelay(10);
437 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size);
438 pci_write_config_word(dev, PCI_INTERRUPT_LINE, dev->irq);
439 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
440 pci_resource_start(dev, 0));
441 pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
442 pci_resource_start(dev, 1));
443}
444DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032,
445 pci_post_fixup_toshiba_ohci1394);