diff options
Diffstat (limited to 'arch/x86_64')
-rw-r--r-- | arch/x86_64/Kconfig.debug | 10 | ||||
-rw-r--r-- | arch/x86_64/ia32/ia32entry.S | 2 | ||||
-rw-r--r-- | arch/x86_64/kernel/process.c | 5 | ||||
-rw-r--r-- | arch/x86_64/kernel/syscall.c | 2 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 23 | ||||
-rw-r--r-- | arch/x86_64/mm/pageattr.c | 9 |
6 files changed, 45 insertions, 6 deletions
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug index e2c6e64a85ec..fcb06a50fdd2 100644 --- a/arch/x86_64/Kconfig.debug +++ b/arch/x86_64/Kconfig.debug | |||
@@ -9,6 +9,16 @@ config INIT_DEBUG | |||
9 | Fill __init and __initdata at the end of boot. This helps debugging | 9 | Fill __init and __initdata at the end of boot. This helps debugging |
10 | illegal uses of __init and __initdata after initialization. | 10 | illegal uses of __init and __initdata after initialization. |
11 | 11 | ||
12 | config DEBUG_RODATA | ||
13 | bool "Write protect kernel read-only data structures" | ||
14 | depends on DEBUG_KERNEL | ||
15 | help | ||
16 | Mark the kernel read-only data as write-protected in the pagetables, | ||
17 | in order to catch accidental (and incorrect) writes to such const data. | ||
18 | This option may have a slight performance impact because a portion | ||
19 | of the kernel code won't be covered by a 2MB TLB anymore. | ||
20 | If in doubt, say "N". | ||
21 | |||
12 | config IOMMU_DEBUG | 22 | config IOMMU_DEBUG |
13 | depends on GART_IOMMU && DEBUG_KERNEL | 23 | depends on GART_IOMMU && DEBUG_KERNEL |
14 | bool "Enable IOMMU debugging" | 24 | bool "Enable IOMMU debugging" |
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index e0eb0c712fe9..df0773c9bdbe 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S | |||
@@ -341,7 +341,7 @@ ENTRY(ia32_ptregs_common) | |||
341 | jmp ia32_sysret /* misbalances the return cache */ | 341 | jmp ia32_sysret /* misbalances the return cache */ |
342 | CFI_ENDPROC | 342 | CFI_ENDPROC |
343 | 343 | ||
344 | .data | 344 | .section .rodata,"a" |
345 | .align 8 | 345 | .align 8 |
346 | .globl ia32_sys_call_table | 346 | .globl ia32_sys_call_table |
347 | ia32_sys_call_table: | 347 | ia32_sys_call_table: |
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 7519fc520eb3..3060ed97b755 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c | |||
@@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); | |||
157 | DECLARE_PER_CPU(int, cpu_state); | 157 | DECLARE_PER_CPU(int, cpu_state); |
158 | 158 | ||
159 | #include <asm/nmi.h> | 159 | #include <asm/nmi.h> |
160 | /* We don't actually take CPU down, just spin without interrupts. */ | 160 | /* We halt the CPU with physical CPU hotplug */ |
161 | static inline void play_dead(void) | 161 | static inline void play_dead(void) |
162 | { | 162 | { |
163 | idle_task_exit(); | 163 | idle_task_exit(); |
@@ -166,8 +166,9 @@ static inline void play_dead(void) | |||
166 | /* Ack it */ | 166 | /* Ack it */ |
167 | __get_cpu_var(cpu_state) = CPU_DEAD; | 167 | __get_cpu_var(cpu_state) = CPU_DEAD; |
168 | 168 | ||
169 | local_irq_disable(); | ||
169 | while (1) | 170 | while (1) |
170 | safe_halt(); | 171 | halt(); |
171 | } | 172 | } |
172 | #else | 173 | #else |
173 | static inline void play_dead(void) | 174 | static inline void play_dead(void) |
diff --git a/arch/x86_64/kernel/syscall.c b/arch/x86_64/kernel/syscall.c index e263685f864c..7c176b3edde0 100644 --- a/arch/x86_64/kernel/syscall.c +++ b/arch/x86_64/kernel/syscall.c | |||
@@ -19,7 +19,7 @@ typedef void (*sys_call_ptr_t)(void); | |||
19 | 19 | ||
20 | extern void sys_ni_syscall(void); | 20 | extern void sys_ni_syscall(void); |
21 | 21 | ||
22 | sys_call_ptr_t sys_call_table[__NR_syscall_max+1] __cacheline_aligned = { | 22 | const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { |
23 | /* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */ | 23 | /* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */ |
24 | [0 ... __NR_syscall_max] = &sys_ni_syscall, | 24 | [0 ... __NR_syscall_max] = &sys_ni_syscall, |
25 | #include <asm-x86_64/unistd.h> | 25 | #include <asm-x86_64/unistd.h> |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index c016dfe84784..1faae5fc1c01 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -498,6 +498,29 @@ void free_initmem(void) | |||
498 | printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10); | 498 | printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10); |
499 | } | 499 | } |
500 | 500 | ||
501 | #ifdef CONFIG_DEBUG_RODATA | ||
502 | |||
503 | extern char __start_rodata, __end_rodata; | ||
504 | void mark_rodata_ro(void) | ||
505 | { | ||
506 | unsigned long addr = (unsigned long)&__start_rodata; | ||
507 | |||
508 | for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE) | ||
509 | change_page_attr_addr(addr, 1, PAGE_KERNEL_RO); | ||
510 | |||
511 | printk ("Write protecting the kernel read-only data: %luk\n", | ||
512 | (&__end_rodata - &__start_rodata) >> 10); | ||
513 | |||
514 | /* | ||
515 | * change_page_attr_addr() requires a global_flush_tlb() call after it. | ||
516 | * We do this after the printk so that if something went wrong in the | ||
517 | * change, the printk gets out at least to give a better debug hint | ||
518 | * of who is the culprit. | ||
519 | */ | ||
520 | global_flush_tlb(); | ||
521 | } | ||
522 | #endif | ||
523 | |||
501 | #ifdef CONFIG_BLK_DEV_INITRD | 524 | #ifdef CONFIG_BLK_DEV_INITRD |
502 | void free_initrd_mem(unsigned long start, unsigned long end) | 525 | void free_initrd_mem(unsigned long start, unsigned long end) |
503 | { | 526 | { |
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index b90e8fe9eeb0..35f1f1aab063 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c | |||
@@ -128,6 +128,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
128 | pte_t *kpte; | 128 | pte_t *kpte; |
129 | struct page *kpte_page; | 129 | struct page *kpte_page; |
130 | unsigned kpte_flags; | 130 | unsigned kpte_flags; |
131 | pgprot_t ref_prot2; | ||
131 | kpte = lookup_address(address); | 132 | kpte = lookup_address(address); |
132 | if (!kpte) return 0; | 133 | if (!kpte) return 0; |
133 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); | 134 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); |
@@ -140,10 +141,14 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
140 | * split_large_page will take the reference for this change_page_attr | 141 | * split_large_page will take the reference for this change_page_attr |
141 | * on the split page. | 142 | * on the split page. |
142 | */ | 143 | */ |
143 | struct page *split = split_large_page(address, prot, ref_prot); | 144 | |
145 | struct page *split; | ||
146 | ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE)); | ||
147 | |||
148 | split = split_large_page(address, prot, ref_prot2); | ||
144 | if (!split) | 149 | if (!split) |
145 | return -ENOMEM; | 150 | return -ENOMEM; |
146 | set_pte(kpte,mk_pte(split, ref_prot)); | 151 | set_pte(kpte,mk_pte(split, ref_prot2)); |
147 | kpte_page = split; | 152 | kpte_page = split; |
148 | } | 153 | } |
149 | get_page(kpte_page); | 154 | get_page(kpte_page); |