diff options
72 files changed, 589 insertions, 389 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index bc859a311eaf..45bf04eb7d70 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -2034,7 +2034,8 @@ sba_init(void) | |||
2034 | if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) | 2034 | if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) |
2035 | return 0; | 2035 | return 0; |
2036 | 2036 | ||
2037 | #if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP) | 2037 | #if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP) && \ |
2038 | defined(CONFIG_PROC_FS) | ||
2038 | /* If we are booting a kdump kernel, the sba_iommu will | 2039 | /* If we are booting a kdump kernel, the sba_iommu will |
2039 | * cause devices that were not shutdown properly to MCA | 2040 | * cause devices that were not shutdown properly to MCA |
2040 | * as soon as they are turned back on. Our only option for | 2041 | * as soon as they are turned back on. Our only option for |
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c index f6ae3ec93810..3e35987af458 100644 --- a/arch/ia64/ia32/binfmt_elf32.c +++ b/arch/ia64/ia32/binfmt_elf32.c | |||
@@ -52,33 +52,29 @@ extern struct page *ia32_shared_page[]; | |||
52 | extern unsigned long *ia32_gdt; | 52 | extern unsigned long *ia32_gdt; |
53 | extern struct page *ia32_gate_page; | 53 | extern struct page *ia32_gate_page; |
54 | 54 | ||
55 | struct page * | 55 | int |
56 | ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int *type) | 56 | ia32_install_shared_page (struct vm_area_struct *vma, struct vm_fault *vmf) |
57 | { | 57 | { |
58 | struct page *pg = ia32_shared_page[smp_processor_id()]; | 58 | vmf->page = ia32_shared_page[smp_processor_id()]; |
59 | get_page(pg); | 59 | get_page(vmf->page); |
60 | if (type) | 60 | return 0; |
61 | *type = VM_FAULT_MINOR; | ||
62 | return pg; | ||
63 | } | 61 | } |
64 | 62 | ||
65 | struct page * | 63 | int |
66 | ia32_install_gate_page (struct vm_area_struct *vma, unsigned long address, int *type) | 64 | ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf) |
67 | { | 65 | { |
68 | struct page *pg = ia32_gate_page; | 66 | vmf->page = ia32_gate_page; |
69 | get_page(pg); | 67 | get_page(vmf->page); |
70 | if (type) | 68 | return 0; |
71 | *type = VM_FAULT_MINOR; | ||
72 | return pg; | ||
73 | } | 69 | } |
74 | 70 | ||
75 | 71 | ||
76 | static struct vm_operations_struct ia32_shared_page_vm_ops = { | 72 | static struct vm_operations_struct ia32_shared_page_vm_ops = { |
77 | .nopage = ia32_install_shared_page | 73 | .fault = ia32_install_shared_page |
78 | }; | 74 | }; |
79 | 75 | ||
80 | static struct vm_operations_struct ia32_gate_page_vm_ops = { | 76 | static struct vm_operations_struct ia32_gate_page_vm_ops = { |
81 | .nopage = ia32_install_gate_page | 77 | .fault = ia32_install_gate_page |
82 | }; | 78 | }; |
83 | 79 | ||
84 | void | 80 | void |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 4e5e27540e27..d3a41d5f8d12 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1176,6 +1176,7 @@ tlb_purge_done: | |||
1176 | RESTORE_REG(cr.dcr, r25, r17);; | 1176 | RESTORE_REG(cr.dcr, r25, r17);; |
1177 | RESTORE_REG(cr.iva, r25, r17);; | 1177 | RESTORE_REG(cr.iva, r25, r17);; |
1178 | RESTORE_REG(cr.pta, r25, r17);; | 1178 | RESTORE_REG(cr.pta, r25, r17);; |
1179 | srlz.d;; // required not to violate RAW dependency | ||
1179 | RESTORE_REG(cr.itv, r25, r17);; | 1180 | RESTORE_REG(cr.itv, r25, r17);; |
1180 | RESTORE_REG(cr.pmv, r25, r17);; | 1181 | RESTORE_REG(cr.pmv, r25, r17);; |
1181 | RESTORE_REG(cr.cmcv, r25, r17);; | 1182 | RESTORE_REG(cr.cmcv, r25, r17);; |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 00a4599e5f47..0b52f19ed046 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -200,7 +200,7 @@ assign_irq_vector (int irq) | |||
200 | { | 200 | { |
201 | unsigned long flags; | 201 | unsigned long flags; |
202 | int vector, cpu; | 202 | int vector, cpu; |
203 | cpumask_t domain; | 203 | cpumask_t domain = CPU_MASK_NONE; |
204 | 204 | ||
205 | vector = -ENOSPC; | 205 | vector = -ENOSPC; |
206 | 206 | ||
@@ -340,7 +340,7 @@ int create_irq(void) | |||
340 | { | 340 | { |
341 | unsigned long flags; | 341 | unsigned long flags; |
342 | int irq, vector, cpu; | 342 | int irq, vector, cpu; |
343 | cpumask_t domain; | 343 | cpumask_t domain = CPU_MASK_NONE; |
344 | 344 | ||
345 | irq = vector = -ENOSPC; | 345 | irq = vector = -ENOSPC; |
346 | spin_lock_irqsave(&vector_lock, flags); | 346 | spin_lock_irqsave(&vector_lock, flags); |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 10b48cd15a87..6dbf5919d2d0 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -75,6 +75,7 @@ | |||
75 | #include <linux/workqueue.h> | 75 | #include <linux/workqueue.h> |
76 | #include <linux/cpumask.h> | 76 | #include <linux/cpumask.h> |
77 | #include <linux/kdebug.h> | 77 | #include <linux/kdebug.h> |
78 | #include <linux/cpu.h> | ||
78 | 79 | ||
79 | #include <asm/delay.h> | 80 | #include <asm/delay.h> |
80 | #include <asm/machvec.h> | 81 | #include <asm/machvec.h> |
@@ -1813,6 +1814,36 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1813 | PAGE_KERNEL)); | 1814 | PAGE_KERNEL)); |
1814 | } | 1815 | } |
1815 | 1816 | ||
1817 | static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy) | ||
1818 | { | ||
1819 | unsigned long flags; | ||
1820 | |||
1821 | local_irq_save(flags); | ||
1822 | if (!cmc_polling_enabled) | ||
1823 | ia64_mca_cmc_vector_enable(NULL); | ||
1824 | local_irq_restore(flags); | ||
1825 | } | ||
1826 | |||
1827 | static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, | ||
1828 | unsigned long action, | ||
1829 | void *hcpu) | ||
1830 | { | ||
1831 | int hotcpu = (unsigned long) hcpu; | ||
1832 | |||
1833 | switch (action) { | ||
1834 | case CPU_ONLINE: | ||
1835 | case CPU_ONLINE_FROZEN: | ||
1836 | smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, | ||
1837 | NULL, 1, 0); | ||
1838 | break; | ||
1839 | } | ||
1840 | return NOTIFY_OK; | ||
1841 | } | ||
1842 | |||
1843 | static struct notifier_block mca_cpu_notifier __cpuinitdata = { | ||
1844 | .notifier_call = mca_cpu_callback | ||
1845 | }; | ||
1846 | |||
1816 | /* | 1847 | /* |
1817 | * ia64_mca_init | 1848 | * ia64_mca_init |
1818 | * | 1849 | * |
@@ -1996,6 +2027,8 @@ ia64_mca_late_init(void) | |||
1996 | if (!mca_init) | 2027 | if (!mca_init) |
1997 | return 0; | 2028 | return 0; |
1998 | 2029 | ||
2030 | register_hotcpu_notifier(&mca_cpu_notifier); | ||
2031 | |||
1999 | /* Setup the CMCI/P vector and handler */ | 2032 | /* Setup the CMCI/P vector and handler */ |
2000 | init_timer(&cmc_poll_timer); | 2033 | init_timer(&cmc_poll_timer); |
2001 | cmc_poll_timer.function = ia64_mca_cmc_poll; | 2034 | cmc_poll_timer.function = ia64_mca_cmc_poll; |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 2418289ee5ca..7377d323131d 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/kdebug.h> | 29 | #include <linux/kdebug.h> |
30 | #include <linux/utsname.h> | ||
30 | 31 | ||
31 | #include <asm/cpu.h> | 32 | #include <asm/cpu.h> |
32 | #include <asm/delay.h> | 33 | #include <asm/delay.h> |
@@ -107,8 +108,9 @@ show_regs (struct pt_regs *regs) | |||
107 | print_modules(); | 108 | print_modules(); |
108 | printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), | 109 | printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), |
109 | smp_processor_id(), current->comm); | 110 | smp_processor_id(), current->comm); |
110 | printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", | 111 | printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", |
111 | regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); | 112 | regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), |
113 | init_utsname()->release); | ||
112 | print_symbol("ip is at %s\n", ip); | 114 | print_symbol("ip is at %s\n", ip); |
113 | printk("unat: %016lx pfs : %016lx rsc : %016lx\n", | 115 | printk("unat: %016lx pfs : %016lx rsc : %016lx\n", |
114 | regs->ar_unat, regs->ar_pfs, regs->ar_rsc); | 116 | regs->ar_unat, regs->ar_pfs, regs->ar_rsc); |
@@ -737,6 +739,7 @@ flush_thread (void) | |||
737 | ia32_drop_ia64_partial_page_list(current); | 739 | ia32_drop_ia64_partial_page_list(current); |
738 | current->thread.task_size = IA32_PAGE_OFFSET; | 740 | current->thread.task_size = IA32_PAGE_OFFSET; |
739 | set_fs(USER_DS); | 741 | set_fs(USER_DS); |
742 | memset(current->thread.tls_array, 0, sizeof(current->thread.tls_array)); | ||
740 | } | 743 | } |
741 | #endif | 744 | #endif |
742 | } | 745 | } |
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 4c730099d58f..309da3567bc8 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
@@ -280,15 +280,7 @@ setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratc | |||
280 | err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */ | 280 | err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */ |
281 | err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); | 281 | err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); |
282 | 282 | ||
283 | if (flags & IA64_SC_FLAG_IN_SYSCALL) { | 283 | if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { |
284 | /* Clear scratch registers if the signal interrupted a system call. */ | ||
285 | err |= __put_user(0, &sc->sc_ar_ccv); /* ar.ccv */ | ||
286 | err |= __put_user(0, &sc->sc_br[7]); /* b7 */ | ||
287 | err |= __put_user(0, &sc->sc_gr[14]); /* r14 */ | ||
288 | err |= __clear_user(&sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ | ||
289 | err |= __clear_user(&sc->sc_gr[2], 2*8); /* r2-r3 */ | ||
290 | err |= __clear_user(&sc->sc_gr[16], 16*8); /* r16-r31 */ | ||
291 | } else { | ||
292 | /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */ | 284 | /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */ |
293 | err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ | 285 | err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ |
294 | err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ | 286 | err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a7be4f203420..2a90c32024f4 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -118,7 +118,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
118 | for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) | 118 | for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) |
119 | SetPageUncached(&page[i]); | 119 | SetPageUncached(&page[i]); |
120 | 120 | ||
121 | flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); | 121 | flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); |
122 | 122 | ||
123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | 123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); |
124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { | 124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { |
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index cef164729db7..655da240d13c 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * IPI based ptc implementation and A-step IPI implementation. | 10 | * IPI based ptc implementation and A-step IPI implementation. |
11 | * Rohit Seth <rohit.seth@intel.com> | 11 | * Rohit Seth <rohit.seth@intel.com> |
12 | * Ken Chen <kenneth.w.chen@intel.com> | 12 | * Ken Chen <kenneth.w.chen@intel.com> |
13 | * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation | ||
13 | */ | 14 | */ |
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
@@ -89,9 +90,16 @@ ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, | |||
89 | { | 90 | { |
90 | static DEFINE_SPINLOCK(ptcg_lock); | 91 | static DEFINE_SPINLOCK(ptcg_lock); |
91 | 92 | ||
92 | if (mm != current->active_mm || !current->mm) { | 93 | struct mm_struct *active_mm = current->active_mm; |
93 | flush_tlb_all(); | 94 | |
94 | return; | 95 | if (mm != active_mm) { |
96 | /* Restore region IDs for mm */ | ||
97 | if (mm && active_mm) { | ||
98 | activate_context(mm); | ||
99 | } else { | ||
100 | flush_tlb_all(); | ||
101 | return; | ||
102 | } | ||
95 | } | 103 | } |
96 | 104 | ||
97 | /* HW requires global serialization of ptc.ga. */ | 105 | /* HW requires global serialization of ptc.ga. */ |
@@ -107,6 +115,10 @@ ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, | |||
107 | } while (start < end); | 115 | } while (start < end); |
108 | } | 116 | } |
109 | spin_unlock(&ptcg_lock); | 117 | spin_unlock(&ptcg_lock); |
118 | |||
119 | if (mm != active_mm) { | ||
120 | activate_context(active_mm); | ||
121 | } | ||
110 | } | 122 | } |
111 | 123 | ||
112 | void | 124 | void |
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c index b362d6d6a8c8..9456d4034024 100644 --- a/arch/ia64/sn/kernel/bte.c +++ b/arch/ia64/sn/kernel/bte.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
@@ -227,7 +227,7 @@ retry_bteop: | |||
227 | BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na)); | 227 | BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na)); |
228 | 228 | ||
229 | if (transfer_stat & IBLS_ERROR) { | 229 | if (transfer_stat & IBLS_ERROR) { |
230 | bte_status = transfer_stat & ~IBLS_ERROR; | 230 | bte_status = BTE_GET_ERROR_STATUS(transfer_stat); |
231 | } else { | 231 | } else { |
232 | bte_status = BTE_SUCCESS; | 232 | bte_status = BTE_SUCCESS; |
233 | } | 233 | } |
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c index 27c5936ccfe9..4cb09f3f1efc 100644 --- a/arch/ia64/sn/kernel/bte_error.c +++ b/arch/ia64/sn/kernel/bte_error.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
@@ -148,7 +148,11 @@ int shub2_bte_error_handler(unsigned long _nodepda) | |||
148 | for (i = 0; i < BTES_PER_NODE; i++) { | 148 | for (i = 0; i < BTES_PER_NODE; i++) { |
149 | bte = &err_nodepda->bte_if[i]; | 149 | bte = &err_nodepda->bte_if[i]; |
150 | status = BTE_LNSTAT_LOAD(bte); | 150 | status = BTE_LNSTAT_LOAD(bte); |
151 | if ((status & IBLS_ERROR) || !(status & IBLS_BUSY)) | 151 | if (status & IBLS_ERROR) { |
152 | bte->bh_error = BTE_SHUB2_ERROR(status); | ||
153 | continue; | ||
154 | } | ||
155 | if (!(status & IBLS_BUSY)) | ||
152 | continue; | 156 | continue; |
153 | mod_timer(recovery_timer, jiffies + (HZ * 5)); | 157 | mod_timer(recovery_timer, jiffies + (HZ * 5)); |
154 | BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, | 158 | BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, |
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index cee9379d44e0..e1a3e19d3d9c 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
@@ -41,7 +41,7 @@ | |||
41 | * } else | 41 | * } else |
42 | * do desired mmr access | 42 | * do desired mmr access |
43 | * | 43 | * |
44 | * According to hw, we can use reads instead of writes to the above addres | 44 | * According to hw, we can use reads instead of writes to the above address |
45 | * | 45 | * |
46 | * Note this WAR can only to be used for accessing internal MMR's in the | 46 | * Note this WAR can only to be used for accessing internal MMR's in the |
47 | * TIOCE Coretalk Address Range 0x0 - 0x07ff_ffff. This includes the | 47 | * TIOCE Coretalk Address Range 0x0 - 0x07ff_ffff. This includes the |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 96986b46bc85..edb5108e5d0e 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -849,7 +849,7 @@ void __init init_bsp_APIC(void) | |||
849 | /** | 849 | /** |
850 | * setup_local_APIC - setup the local APIC | 850 | * setup_local_APIC - setup the local APIC |
851 | */ | 851 | */ |
852 | void __devinit setup_local_APIC(void) | 852 | void __cpuinit setup_local_APIC(void) |
853 | { | 853 | { |
854 | unsigned long oldvalue, value, maxlvt, integrated; | 854 | unsigned long oldvalue, value, maxlvt, integrated; |
855 | int i, j; | 855 | int i, j; |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 6cf27319a91c..c3a565bba106 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -1882,13 +1882,16 @@ __setup("no_timer_check", notimercheck); | |||
1882 | static int __init timer_irq_works(void) | 1882 | static int __init timer_irq_works(void) |
1883 | { | 1883 | { |
1884 | unsigned long t1 = jiffies; | 1884 | unsigned long t1 = jiffies; |
1885 | unsigned long flags; | ||
1885 | 1886 | ||
1886 | if (no_timer_check) | 1887 | if (no_timer_check) |
1887 | return 1; | 1888 | return 1; |
1888 | 1889 | ||
1890 | local_save_flags(flags); | ||
1889 | local_irq_enable(); | 1891 | local_irq_enable(); |
1890 | /* Let ten ticks pass... */ | 1892 | /* Let ten ticks pass... */ |
1891 | mdelay((10 * 1000) / HZ); | 1893 | mdelay((10 * 1000) / HZ); |
1894 | local_irq_restore(flags); | ||
1892 | 1895 | ||
1893 | /* | 1896 | /* |
1894 | * Expect a few ticks at least, to be sure some possible | 1897 | * Expect a few ticks at least, to be sure some possible |
@@ -2167,6 +2170,9 @@ static inline void __init check_timer(void) | |||
2167 | int apic1, pin1, apic2, pin2; | 2170 | int apic1, pin1, apic2, pin2; |
2168 | int vector; | 2171 | int vector; |
2169 | unsigned int ver; | 2172 | unsigned int ver; |
2173 | unsigned long flags; | ||
2174 | |||
2175 | local_irq_save(flags); | ||
2170 | 2176 | ||
2171 | ver = apic_read(APIC_LVR); | 2177 | ver = apic_read(APIC_LVR); |
2172 | ver = GET_APIC_VERSION(ver); | 2178 | ver = GET_APIC_VERSION(ver); |
@@ -2219,7 +2225,7 @@ static inline void __init check_timer(void) | |||
2219 | } | 2225 | } |
2220 | if (disable_timer_pin_1 > 0) | 2226 | if (disable_timer_pin_1 > 0) |
2221 | clear_IO_APIC_pin(0, pin1); | 2227 | clear_IO_APIC_pin(0, pin1); |
2222 | return; | 2228 | goto out; |
2223 | } | 2229 | } |
2224 | clear_IO_APIC_pin(apic1, pin1); | 2230 | clear_IO_APIC_pin(apic1, pin1); |
2225 | printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to " | 2231 | printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to " |
@@ -2242,7 +2248,7 @@ static inline void __init check_timer(void) | |||
2242 | if (nmi_watchdog == NMI_IO_APIC) { | 2248 | if (nmi_watchdog == NMI_IO_APIC) { |
2243 | setup_nmi(); | 2249 | setup_nmi(); |
2244 | } | 2250 | } |
2245 | return; | 2251 | goto out; |
2246 | } | 2252 | } |
2247 | /* | 2253 | /* |
2248 | * Cleanup, just in case ... | 2254 | * Cleanup, just in case ... |
@@ -2266,7 +2272,7 @@ static inline void __init check_timer(void) | |||
2266 | 2272 | ||
2267 | if (timer_irq_works()) { | 2273 | if (timer_irq_works()) { |
2268 | printk(" works.\n"); | 2274 | printk(" works.\n"); |
2269 | return; | 2275 | goto out; |
2270 | } | 2276 | } |
2271 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); | 2277 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); |
2272 | printk(" failed.\n"); | 2278 | printk(" failed.\n"); |
@@ -2282,11 +2288,13 @@ static inline void __init check_timer(void) | |||
2282 | 2288 | ||
2283 | if (timer_irq_works()) { | 2289 | if (timer_irq_works()) { |
2284 | printk(" works.\n"); | 2290 | printk(" works.\n"); |
2285 | return; | 2291 | goto out; |
2286 | } | 2292 | } |
2287 | printk(" failed :(.\n"); | 2293 | printk(" failed :(.\n"); |
2288 | panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " | 2294 | panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " |
2289 | "report. Then try booting with the 'noapic' option"); | 2295 | "report. Then try booting with the 'noapic' option"); |
2296 | out: | ||
2297 | local_irq_restore(flags); | ||
2290 | } | 2298 | } |
2291 | 2299 | ||
2292 | /* | 2300 | /* |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 435a8c9b55f8..cbac1670c7c3 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -1281,10 +1281,13 @@ void disable_IO_APIC(void) | |||
1281 | static int __init timer_irq_works(void) | 1281 | static int __init timer_irq_works(void) |
1282 | { | 1282 | { |
1283 | unsigned long t1 = jiffies; | 1283 | unsigned long t1 = jiffies; |
1284 | unsigned long flags; | ||
1284 | 1285 | ||
1286 | local_save_flags(flags); | ||
1285 | local_irq_enable(); | 1287 | local_irq_enable(); |
1286 | /* Let ten ticks pass... */ | 1288 | /* Let ten ticks pass... */ |
1287 | mdelay((10 * 1000) / HZ); | 1289 | mdelay((10 * 1000) / HZ); |
1290 | local_irq_restore(flags); | ||
1288 | 1291 | ||
1289 | /* | 1292 | /* |
1290 | * Expect a few ticks at least, to be sure some possible | 1293 | * Expect a few ticks at least, to be sure some possible |
@@ -1655,6 +1658,9 @@ static inline void check_timer(void) | |||
1655 | { | 1658 | { |
1656 | struct irq_cfg *cfg = irq_cfg + 0; | 1659 | struct irq_cfg *cfg = irq_cfg + 0; |
1657 | int apic1, pin1, apic2, pin2; | 1660 | int apic1, pin1, apic2, pin2; |
1661 | unsigned long flags; | ||
1662 | |||
1663 | local_irq_save(flags); | ||
1658 | 1664 | ||
1659 | /* | 1665 | /* |
1660 | * get/set the timer IRQ vector: | 1666 | * get/set the timer IRQ vector: |
@@ -1696,7 +1702,7 @@ static inline void check_timer(void) | |||
1696 | } | 1702 | } |
1697 | if (disable_timer_pin_1 > 0) | 1703 | if (disable_timer_pin_1 > 0) |
1698 | clear_IO_APIC_pin(0, pin1); | 1704 | clear_IO_APIC_pin(0, pin1); |
1699 | return; | 1705 | goto out; |
1700 | } | 1706 | } |
1701 | clear_IO_APIC_pin(apic1, pin1); | 1707 | clear_IO_APIC_pin(apic1, pin1); |
1702 | apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not " | 1708 | apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not " |
@@ -1718,7 +1724,7 @@ static inline void check_timer(void) | |||
1718 | if (nmi_watchdog == NMI_IO_APIC) { | 1724 | if (nmi_watchdog == NMI_IO_APIC) { |
1719 | setup_nmi(); | 1725 | setup_nmi(); |
1720 | } | 1726 | } |
1721 | return; | 1727 | goto out; |
1722 | } | 1728 | } |
1723 | /* | 1729 | /* |
1724 | * Cleanup, just in case ... | 1730 | * Cleanup, just in case ... |
@@ -1741,7 +1747,7 @@ static inline void check_timer(void) | |||
1741 | 1747 | ||
1742 | if (timer_irq_works()) { | 1748 | if (timer_irq_works()) { |
1743 | apic_printk(APIC_VERBOSE," works.\n"); | 1749 | apic_printk(APIC_VERBOSE," works.\n"); |
1744 | return; | 1750 | goto out; |
1745 | } | 1751 | } |
1746 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); | 1752 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); |
1747 | apic_printk(APIC_VERBOSE," failed.\n"); | 1753 | apic_printk(APIC_VERBOSE," failed.\n"); |
@@ -1756,10 +1762,12 @@ static inline void check_timer(void) | |||
1756 | 1762 | ||
1757 | if (timer_irq_works()) { | 1763 | if (timer_irq_works()) { |
1758 | apic_printk(APIC_VERBOSE," works.\n"); | 1764 | apic_printk(APIC_VERBOSE," works.\n"); |
1759 | return; | 1765 | goto out; |
1760 | } | 1766 | } |
1761 | apic_printk(APIC_VERBOSE," failed :(.\n"); | 1767 | apic_printk(APIC_VERBOSE," failed :(.\n"); |
1762 | panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n"); | 1768 | panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n"); |
1769 | out: | ||
1770 | local_irq_restore(flags); | ||
1763 | } | 1771 | } |
1764 | 1772 | ||
1765 | static int __init notimercheck(char *s) | 1773 | static int __init notimercheck(char *s) |
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c index d87a523070d1..3a020f79f82b 100644 --- a/arch/x86/kernel/kprobes_32.c +++ b/arch/x86/kernel/kprobes_32.c | |||
@@ -727,9 +727,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
727 | 727 | ||
728 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { | 728 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { |
729 | if (®s->esp != kcb->jprobe_saved_esp) { | 729 | if (®s->esp != kcb->jprobe_saved_esp) { |
730 | struct pt_regs *saved_regs = | 730 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; |
731 | container_of(kcb->jprobe_saved_esp, | ||
732 | struct pt_regs, esp); | ||
733 | printk("current esp %p does not match saved esp %p\n", | 731 | printk("current esp %p does not match saved esp %p\n", |
734 | ®s->esp, kcb->jprobe_saved_esp); | 732 | ®s->esp, kcb->jprobe_saved_esp); |
735 | printk("Saved registers for jprobe %p\n", jp); | 733 | printk("Saved registers for jprobe %p\n", jp); |
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c index 0c467644589c..5df19a9f9239 100644 --- a/arch/x86/kernel/kprobes_64.c +++ b/arch/x86/kernel/kprobes_64.c | |||
@@ -485,7 +485,6 @@ static void __kprobes resume_execution(struct kprobe *p, | |||
485 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | 485 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) |
486 | { | 486 | { |
487 | unsigned long *tos = (unsigned long *)regs->rsp; | 487 | unsigned long *tos = (unsigned long *)regs->rsp; |
488 | unsigned long next_rip = 0; | ||
489 | unsigned long copy_rip = (unsigned long)p->ainsn.insn; | 488 | unsigned long copy_rip = (unsigned long)p->ainsn.insn; |
490 | unsigned long orig_rip = (unsigned long)p->addr; | 489 | unsigned long orig_rip = (unsigned long)p->addr; |
491 | kprobe_opcode_t *insn = p->ainsn.insn; | 490 | kprobe_opcode_t *insn = p->ainsn.insn; |
@@ -494,46 +493,42 @@ static void __kprobes resume_execution(struct kprobe *p, | |||
494 | if (*insn >= 0x40 && *insn <= 0x4f) | 493 | if (*insn >= 0x40 && *insn <= 0x4f) |
495 | insn++; | 494 | insn++; |
496 | 495 | ||
496 | regs->eflags &= ~TF_MASK; | ||
497 | switch (*insn) { | 497 | switch (*insn) { |
498 | case 0x9c: /* pushfl */ | 498 | case 0x9c: /* pushfl */ |
499 | *tos &= ~(TF_MASK | IF_MASK); | 499 | *tos &= ~(TF_MASK | IF_MASK); |
500 | *tos |= kcb->kprobe_old_rflags; | 500 | *tos |= kcb->kprobe_old_rflags; |
501 | break; | 501 | break; |
502 | case 0xc3: /* ret/lret */ | 502 | case 0xc2: /* iret/ret/lret */ |
503 | case 0xcb: | 503 | case 0xc3: |
504 | case 0xc2: | ||
505 | case 0xca: | 504 | case 0xca: |
506 | regs->eflags &= ~TF_MASK; | 505 | case 0xcb: |
507 | /* rip is already adjusted, no more changes required*/ | 506 | case 0xcf: |
508 | return; | 507 | case 0xea: /* jmp absolute -- ip is correct */ |
509 | case 0xe8: /* call relative - Fix return addr */ | 508 | /* ip is already adjusted, no more changes required */ |
509 | goto no_change; | ||
510 | case 0xe8: /* call relative - Fix return addr */ | ||
510 | *tos = orig_rip + (*tos - copy_rip); | 511 | *tos = orig_rip + (*tos - copy_rip); |
511 | break; | 512 | break; |
512 | case 0xff: | 513 | case 0xff: |
513 | if ((insn[1] & 0x30) == 0x10) { | 514 | if ((insn[1] & 0x30) == 0x10) { |
514 | /* call absolute, indirect */ | 515 | /* call absolute, indirect */ |
515 | /* Fix return addr; rip is correct. */ | 516 | /* Fix return addr; ip is correct. */ |
516 | next_rip = regs->rip; | ||
517 | *tos = orig_rip + (*tos - copy_rip); | 517 | *tos = orig_rip + (*tos - copy_rip); |
518 | goto no_change; | ||
518 | } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ | 519 | } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ |
519 | ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ | 520 | ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ |
520 | /* rip is correct. */ | 521 | /* ip is correct. */ |
521 | next_rip = regs->rip; | 522 | goto no_change; |
522 | } | 523 | } |
523 | break; | ||
524 | case 0xea: /* jmp absolute -- rip is correct */ | ||
525 | next_rip = regs->rip; | ||
526 | break; | ||
527 | default: | 524 | default: |
528 | break; | 525 | break; |
529 | } | 526 | } |
530 | 527 | ||
531 | regs->eflags &= ~TF_MASK; | 528 | regs->rip = orig_rip + (regs->rip - copy_rip); |
532 | if (next_rip) { | 529 | no_change: |
533 | regs->rip = next_rip; | 530 | |
534 | } else { | 531 | return; |
535 | regs->rip = orig_rip + (regs->rip - copy_rip); | ||
536 | } | ||
537 | } | 532 | } |
538 | 533 | ||
539 | int __kprobes post_kprobe_handler(struct pt_regs *regs) | 534 | int __kprobes post_kprobe_handler(struct pt_regs *regs) |
@@ -716,10 +711,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
716 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 711 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
717 | 712 | ||
718 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { | 713 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { |
719 | if ((long *)regs->rsp != kcb->jprobe_saved_rsp) { | 714 | if ((unsigned long *)regs->rsp != kcb->jprobe_saved_rsp) { |
720 | struct pt_regs *saved_regs = | 715 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; |
721 | container_of(kcb->jprobe_saved_rsp, | ||
722 | struct pt_regs, rsp); | ||
723 | printk("current rsp %p does not match saved rsp %p\n", | 716 | printk("current rsp %p does not match saved rsp %p\n", |
724 | (long *)regs->rsp, kcb->jprobe_saved_rsp); | 717 | (long *)regs->rsp, kcb->jprobe_saved_rsp); |
725 | printk("Saved registers for jprobe %p\n", jp); | 718 | printk("Saved registers for jprobe %p\n", jp); |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 7b899584d290..9663c2a74830 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -261,7 +261,7 @@ static void mwait_idle(void) | |||
261 | mwait_idle_with_hints(0, 0); | 261 | mwait_idle_with_hints(0, 0); |
262 | } | 262 | } |
263 | 263 | ||
264 | void __devinit select_idle_routine(const struct cpuinfo_x86 *c) | 264 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
265 | { | 265 | { |
266 | if (cpu_has(c, X86_FEATURE_MWAIT)) { | 266 | if (cpu_has(c, X86_FEATURE_MWAIT)) { |
267 | printk("monitor/mwait feature present.\n"); | 267 | printk("monitor/mwait feature present.\n"); |
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index e1e18c34c821..9c24b45b513c 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c | |||
@@ -67,7 +67,7 @@ | |||
67 | address, and must not be in the .bss segment! */ | 67 | address, and must not be in the .bss segment! */ |
68 | unsigned long init_pg_tables_end __initdata = ~0UL; | 68 | unsigned long init_pg_tables_end __initdata = ~0UL; |
69 | 69 | ||
70 | int disable_pse __devinitdata = 0; | 70 | int disable_pse __cpuinitdata = 0; |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Machine setup.. | 73 | * Machine setup.. |
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index ef0f34ede1ab..4ea80cbe52e5 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c | |||
@@ -60,7 +60,7 @@ | |||
60 | #include <asm/mtrr.h> | 60 | #include <asm/mtrr.h> |
61 | 61 | ||
62 | /* Set if we find a B stepping CPU */ | 62 | /* Set if we find a B stepping CPU */ |
63 | static int __devinitdata smp_b_stepping; | 63 | static int __cpuinitdata smp_b_stepping; |
64 | 64 | ||
65 | /* Number of siblings per CPU package */ | 65 | /* Number of siblings per CPU package */ |
66 | int smp_num_siblings = 1; | 66 | int smp_num_siblings = 1; |
@@ -745,8 +745,8 @@ static inline int alloc_cpu_id(void) | |||
745 | } | 745 | } |
746 | 746 | ||
747 | #ifdef CONFIG_HOTPLUG_CPU | 747 | #ifdef CONFIG_HOTPLUG_CPU |
748 | static struct task_struct * __devinitdata cpu_idle_tasks[NR_CPUS]; | 748 | static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS]; |
749 | static inline struct task_struct * alloc_idle_task(int cpu) | 749 | static inline struct task_struct * __cpuinit alloc_idle_task(int cpu) |
750 | { | 750 | { |
751 | struct task_struct *idle; | 751 | struct task_struct *idle; |
752 | 752 | ||
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 500670c93d81..594889521da1 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c | |||
@@ -141,8 +141,8 @@ static void __cpuinit smp_store_cpu_info(int id) | |||
141 | struct cpuinfo_x86 *c = &cpu_data(id); | 141 | struct cpuinfo_x86 *c = &cpu_data(id); |
142 | 142 | ||
143 | *c = boot_cpu_data; | 143 | *c = boot_cpu_data; |
144 | c->cpu_index = id; | ||
145 | identify_cpu(c); | 144 | identify_cpu(c); |
145 | c->cpu_index = id; | ||
146 | print_cpu_info(c); | 146 | print_cpu_info(c); |
147 | } | 147 | } |
148 | 148 | ||
diff --git a/arch/x86/oprofile/op_model_athlon.c b/arch/x86/oprofile/op_model_athlon.c index 3057a19e4641..c3ee43333f26 100644 --- a/arch/x86/oprofile/op_model_athlon.c +++ b/arch/x86/oprofile/op_model_athlon.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /** | 1 | /** |
2 | * @file op_model_athlon.h | 2 | * @file op_model_athlon.h |
3 | * athlon / K7 model-specific MSR operations | 3 | * athlon / K7 / K8 / Family 10h model-specific MSR operations |
4 | * | 4 | * |
5 | * @remark Copyright 2002 OProfile authors | 5 | * @remark Copyright 2002 OProfile authors |
6 | * @remark Read the file COPYING | 6 | * @remark Read the file COPYING |
@@ -31,12 +31,16 @@ | |||
31 | #define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) | 31 | #define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) |
32 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | 32 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) |
33 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) | 33 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) |
34 | #define CTRL_CLEAR(x) (x &= (1<<21)) | 34 | #define CTRL_CLEAR_LO(x) (x &= (1<<21)) |
35 | #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) | ||
35 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) | 36 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) |
36 | #define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) | 37 | #define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) |
37 | #define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) | 38 | #define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) |
38 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) | 39 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) |
39 | #define CTRL_SET_EVENT(val, e) (val |= e) | 40 | #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) |
41 | #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) | ||
42 | #define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9)) | ||
43 | #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) | ||
40 | 44 | ||
41 | static unsigned long reset_value[NUM_COUNTERS]; | 45 | static unsigned long reset_value[NUM_COUNTERS]; |
42 | 46 | ||
@@ -70,7 +74,8 @@ static void athlon_setup_ctrs(struct op_msrs const * const msrs) | |||
70 | if (unlikely(!CTRL_IS_RESERVED(msrs,i))) | 74 | if (unlikely(!CTRL_IS_RESERVED(msrs,i))) |
71 | continue; | 75 | continue; |
72 | CTRL_READ(low, high, msrs, i); | 76 | CTRL_READ(low, high, msrs, i); |
73 | CTRL_CLEAR(low); | 77 | CTRL_CLEAR_LO(low); |
78 | CTRL_CLEAR_HI(high); | ||
74 | CTRL_WRITE(low, high, msrs, i); | 79 | CTRL_WRITE(low, high, msrs, i); |
75 | } | 80 | } |
76 | 81 | ||
@@ -89,12 +94,17 @@ static void athlon_setup_ctrs(struct op_msrs const * const msrs) | |||
89 | CTR_WRITE(counter_config[i].count, msrs, i); | 94 | CTR_WRITE(counter_config[i].count, msrs, i); |
90 | 95 | ||
91 | CTRL_READ(low, high, msrs, i); | 96 | CTRL_READ(low, high, msrs, i); |
92 | CTRL_CLEAR(low); | 97 | CTRL_CLEAR_LO(low); |
98 | CTRL_CLEAR_HI(high); | ||
93 | CTRL_SET_ENABLE(low); | 99 | CTRL_SET_ENABLE(low); |
94 | CTRL_SET_USR(low, counter_config[i].user); | 100 | CTRL_SET_USR(low, counter_config[i].user); |
95 | CTRL_SET_KERN(low, counter_config[i].kernel); | 101 | CTRL_SET_KERN(low, counter_config[i].kernel); |
96 | CTRL_SET_UM(low, counter_config[i].unit_mask); | 102 | CTRL_SET_UM(low, counter_config[i].unit_mask); |
97 | CTRL_SET_EVENT(low, counter_config[i].event); | 103 | CTRL_SET_EVENT_LOW(low, counter_config[i].event); |
104 | CTRL_SET_EVENT_HIGH(high, counter_config[i].event); | ||
105 | CTRL_SET_HOST_ONLY(high, 0); | ||
106 | CTRL_SET_GUEST_ONLY(high, 0); | ||
107 | |||
98 | CTRL_WRITE(low, high, msrs, i); | 108 | CTRL_WRITE(low, high, msrs, i); |
99 | } else { | 109 | } else { |
100 | reset_value[i] = 0; | 110 | reset_value[i] = 0; |
diff --git a/block/as-iosched.c b/block/as-iosched.c index dc715a562e14..cb5e53b05c7c 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -880,7 +880,7 @@ static void as_remove_queued_request(struct request_queue *q, | |||
880 | } | 880 | } |
881 | 881 | ||
882 | /* | 882 | /* |
883 | * as_fifo_expired returns 0 if there are no expired reads on the fifo, | 883 | * as_fifo_expired returns 0 if there are no expired requests on the fifo, |
884 | * 1 otherwise. It is ratelimited so that we only perform the check once per | 884 | * 1 otherwise. It is ratelimited so that we only perform the check once per |
885 | * `fifo_expire' interval. Otherwise a large number of expired requests | 885 | * `fifo_expire' interval. Otherwise a large number of expired requests |
886 | * would create a hopeless seekstorm. | 886 | * would create a hopeless seekstorm. |
@@ -1097,7 +1097,8 @@ dispatch_writes: | |||
1097 | ad->batch_data_dir = REQ_ASYNC; | 1097 | ad->batch_data_dir = REQ_ASYNC; |
1098 | ad->current_write_count = ad->write_batch_count; | 1098 | ad->current_write_count = ad->write_batch_count; |
1099 | ad->write_batch_idled = 0; | 1099 | ad->write_batch_idled = 0; |
1100 | rq = ad->next_rq[ad->batch_data_dir]; | 1100 | rq = rq_entry_fifo(ad->fifo_list[REQ_ASYNC].next); |
1101 | ad->last_check_fifo[REQ_ASYNC] = jiffies; | ||
1101 | goto dispatch_request; | 1102 | goto dispatch_request; |
1102 | } | 1103 | } |
1103 | 1104 | ||
@@ -1159,7 +1160,7 @@ static void as_add_request(struct request_queue *q, struct request *rq) | |||
1159 | as_add_rq_rb(ad, rq); | 1160 | as_add_rq_rb(ad, rq); |
1160 | 1161 | ||
1161 | /* | 1162 | /* |
1162 | * set expire time (only used for reads) and add to fifo list | 1163 | * set expire time and add to fifo list |
1163 | */ | 1164 | */ |
1164 | rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]); | 1165 | rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]); |
1165 | list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]); | 1166 | list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]); |
@@ -1463,7 +1464,9 @@ static struct elevator_type iosched_as = { | |||
1463 | 1464 | ||
1464 | static int __init as_init(void) | 1465 | static int __init as_init(void) |
1465 | { | 1466 | { |
1466 | return elv_register(&iosched_as); | 1467 | elv_register(&iosched_as); |
1468 | |||
1469 | return 0; | ||
1467 | } | 1470 | } |
1468 | 1471 | ||
1469 | static void __exit as_exit(void) | 1472 | static void __exit as_exit(void) |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 0b4a47905575..13553e015d72 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -2279,8 +2279,6 @@ static struct elevator_type iosched_cfq = { | |||
2279 | 2279 | ||
2280 | static int __init cfq_init(void) | 2280 | static int __init cfq_init(void) |
2281 | { | 2281 | { |
2282 | int ret; | ||
2283 | |||
2284 | /* | 2282 | /* |
2285 | * could be 0 on HZ < 1000 setups | 2283 | * could be 0 on HZ < 1000 setups |
2286 | */ | 2284 | */ |
@@ -2292,11 +2290,9 @@ static int __init cfq_init(void) | |||
2292 | if (cfq_slab_setup()) | 2290 | if (cfq_slab_setup()) |
2293 | return -ENOMEM; | 2291 | return -ENOMEM; |
2294 | 2292 | ||
2295 | ret = elv_register(&iosched_cfq); | 2293 | elv_register(&iosched_cfq); |
2296 | if (ret) | ||
2297 | cfq_slab_kill(); | ||
2298 | 2294 | ||
2299 | return ret; | 2295 | return 0; |
2300 | } | 2296 | } |
2301 | 2297 | ||
2302 | static void __exit cfq_exit(void) | 2298 | static void __exit cfq_exit(void) |
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index a054eef8dff6..342448c3d2dd 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -467,7 +467,9 @@ static struct elevator_type iosched_deadline = { | |||
467 | 467 | ||
468 | static int __init deadline_init(void) | 468 | static int __init deadline_init(void) |
469 | { | 469 | { |
470 | return elv_register(&iosched_deadline); | 470 | elv_register(&iosched_deadline); |
471 | |||
472 | return 0; | ||
471 | } | 473 | } |
472 | 474 | ||
473 | static void __exit deadline_exit(void) | 475 | static void __exit deadline_exit(void) |
diff --git a/block/elevator.c b/block/elevator.c index 446aea2a3cfb..e452deb80395 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -960,7 +960,7 @@ void elv_unregister_queue(struct request_queue *q) | |||
960 | __elv_unregister_queue(q->elevator); | 960 | __elv_unregister_queue(q->elevator); |
961 | } | 961 | } |
962 | 962 | ||
963 | int elv_register(struct elevator_type *e) | 963 | void elv_register(struct elevator_type *e) |
964 | { | 964 | { |
965 | char *def = ""; | 965 | char *def = ""; |
966 | 966 | ||
@@ -975,7 +975,6 @@ int elv_register(struct elevator_type *e) | |||
975 | def = " (default)"; | 975 | def = " (default)"; |
976 | 976 | ||
977 | printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def); | 977 | printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def); |
978 | return 0; | ||
979 | } | 978 | } |
980 | EXPORT_SYMBOL_GPL(elv_register); | 979 | EXPORT_SYMBOL_GPL(elv_register); |
981 | 980 | ||
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 7563d8aa3944..c23e02969650 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
@@ -101,7 +101,9 @@ static struct elevator_type elevator_noop = { | |||
101 | 101 | ||
102 | static int __init noop_init(void) | 102 | static int __init noop_init(void) |
103 | { | 103 | { |
104 | return elv_register(&elevator_noop); | 104 | elv_register(&elevator_noop); |
105 | |||
106 | return 0; | ||
105 | } | 107 | } |
106 | 108 | ||
107 | static void __exit noop_exit(void) | 109 | static void __exit noop_exit(void) |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 91c73224f4c6..9675b34638d4 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -230,7 +230,7 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, | |||
230 | rq->cmd_len = hdr->cmd_len; | 230 | rq->cmd_len = hdr->cmd_len; |
231 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 231 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
232 | 232 | ||
233 | rq->timeout = (hdr->timeout * HZ) / 1000; | 233 | rq->timeout = msecs_to_jiffies(hdr->timeout); |
234 | if (!rq->timeout) | 234 | if (!rq->timeout) |
235 | rq->timeout = q->sg_timeout; | 235 | rq->timeout = q->sg_timeout; |
236 | if (!rq->timeout) | 236 | if (!rq->timeout) |
@@ -366,7 +366,7 @@ static int sg_io(struct file *file, struct request_queue *q, | |||
366 | */ | 366 | */ |
367 | blk_execute_rq(q, bd_disk, rq, 0); | 367 | blk_execute_rq(q, bd_disk, rq, 0); |
368 | 368 | ||
369 | hdr->duration = ((jiffies - start_time) * 1000) / HZ; | 369 | hdr->duration = jiffies_to_msecs(jiffies - start_time); |
370 | 370 | ||
371 | return blk_complete_sghdr_rq(rq, hdr, bio); | 371 | return blk_complete_sghdr_rq(rq, hdr, bio); |
372 | out: | 372 | out: |
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 46dc70e0dee7..c79f066c2bc9 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c | |||
@@ -375,7 +375,7 @@ static int hpt374_pre_reset(struct ata_link *link, unsigned long deadline) | |||
375 | pci_write_config_word(pdev, mcrbase + 2, mcr3 | 0x8000); | 375 | pci_write_config_word(pdev, mcrbase + 2, mcr3 | 0x8000); |
376 | pci_read_config_byte(pdev, 0x5A, &ata66); | 376 | pci_read_config_byte(pdev, 0x5A, &ata66); |
377 | /* Reset TCBLID/FCBLID to output */ | 377 | /* Reset TCBLID/FCBLID to output */ |
378 | pci_write_config_word(pdev, 0x52, mcr3); | 378 | pci_write_config_word(pdev, mcrbase + 2, mcr3); |
379 | 379 | ||
380 | if (ata66 & (2 >> ap->port_no)) | 380 | if (ata66 & (2 >> ap->port_no)) |
381 | ap->cbl = ATA_CBL_PATA40; | 381 | ap->cbl = ATA_CBL_PATA40; |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 5f5095afb06b..c24e1bdbad43 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -34,7 +34,7 @@ | |||
34 | * - set initialised bit then. | 34 | * - set initialised bit then. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | //#define DEBUG /* uncomment if you want debugging info (pr_debug) */ | 37 | #undef DEBUG /* #define DEBUG if you want debugging info (pr_debug) */ |
38 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
39 | #include <linux/bio.h> | 39 | #include <linux/bio.h> |
40 | #include <linux/kernel.h> | 40 | #include <linux/kernel.h> |
@@ -143,17 +143,12 @@ static struct cardinfo cards[MM_MAXCARDS]; | |||
143 | static struct block_device_operations mm_fops; | 143 | static struct block_device_operations mm_fops; |
144 | static struct timer_list battery_timer; | 144 | static struct timer_list battery_timer; |
145 | 145 | ||
146 | static int num_cards = 0; | 146 | static int num_cards; |
147 | 147 | ||
148 | static struct gendisk *mm_gendisk[MM_MAXCARDS]; | 148 | static struct gendisk *mm_gendisk[MM_MAXCARDS]; |
149 | 149 | ||
150 | static void check_batteries(struct cardinfo *card); | 150 | static void check_batteries(struct cardinfo *card); |
151 | 151 | ||
152 | /* | ||
153 | ----------------------------------------------------------------------------------- | ||
154 | -- get_userbit | ||
155 | ----------------------------------------------------------------------------------- | ||
156 | */ | ||
157 | static int get_userbit(struct cardinfo *card, int bit) | 152 | static int get_userbit(struct cardinfo *card, int bit) |
158 | { | 153 | { |
159 | unsigned char led; | 154 | unsigned char led; |
@@ -161,11 +156,7 @@ static int get_userbit(struct cardinfo *card, int bit) | |||
161 | led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); | 156 | led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); |
162 | return led & bit; | 157 | return led & bit; |
163 | } | 158 | } |
164 | /* | 159 | |
165 | ----------------------------------------------------------------------------------- | ||
166 | -- set_userbit | ||
167 | ----------------------------------------------------------------------------------- | ||
168 | */ | ||
169 | static int set_userbit(struct cardinfo *card, int bit, unsigned char state) | 160 | static int set_userbit(struct cardinfo *card, int bit, unsigned char state) |
170 | { | 161 | { |
171 | unsigned char led; | 162 | unsigned char led; |
@@ -179,11 +170,7 @@ static int set_userbit(struct cardinfo *card, int bit, unsigned char state) | |||
179 | 170 | ||
180 | return 0; | 171 | return 0; |
181 | } | 172 | } |
182 | /* | 173 | |
183 | ----------------------------------------------------------------------------------- | ||
184 | -- set_led | ||
185 | ----------------------------------------------------------------------------------- | ||
186 | */ | ||
187 | /* | 174 | /* |
188 | * NOTE: For the power LED, use the LED_POWER_* macros since they differ | 175 | * NOTE: For the power LED, use the LED_POWER_* macros since they differ |
189 | */ | 176 | */ |
@@ -203,11 +190,6 @@ static void set_led(struct cardinfo *card, int shift, unsigned char state) | |||
203 | } | 190 | } |
204 | 191 | ||
205 | #ifdef MM_DIAG | 192 | #ifdef MM_DIAG |
206 | /* | ||
207 | ----------------------------------------------------------------------------------- | ||
208 | -- dump_regs | ||
209 | ----------------------------------------------------------------------------------- | ||
210 | */ | ||
211 | static void dump_regs(struct cardinfo *card) | 193 | static void dump_regs(struct cardinfo *card) |
212 | { | 194 | { |
213 | unsigned char *p; | 195 | unsigned char *p; |
@@ -224,32 +206,28 @@ static void dump_regs(struct cardinfo *card) | |||
224 | } | 206 | } |
225 | } | 207 | } |
226 | #endif | 208 | #endif |
227 | /* | 209 | |
228 | ----------------------------------------------------------------------------------- | ||
229 | -- dump_dmastat | ||
230 | ----------------------------------------------------------------------------------- | ||
231 | */ | ||
232 | static void dump_dmastat(struct cardinfo *card, unsigned int dmastat) | 210 | static void dump_dmastat(struct cardinfo *card, unsigned int dmastat) |
233 | { | 211 | { |
234 | dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - "); | 212 | dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - "); |
235 | if (dmastat & DMASCR_ANY_ERR) | 213 | if (dmastat & DMASCR_ANY_ERR) |
236 | printk("ANY_ERR "); | 214 | printk(KERN_CONT "ANY_ERR "); |
237 | if (dmastat & DMASCR_MBE_ERR) | 215 | if (dmastat & DMASCR_MBE_ERR) |
238 | printk("MBE_ERR "); | 216 | printk(KERN_CONT "MBE_ERR "); |
239 | if (dmastat & DMASCR_PARITY_ERR_REP) | 217 | if (dmastat & DMASCR_PARITY_ERR_REP) |
240 | printk("PARITY_ERR_REP "); | 218 | printk(KERN_CONT "PARITY_ERR_REP "); |
241 | if (dmastat & DMASCR_PARITY_ERR_DET) | 219 | if (dmastat & DMASCR_PARITY_ERR_DET) |
242 | printk("PARITY_ERR_DET "); | 220 | printk(KERN_CONT "PARITY_ERR_DET "); |
243 | if (dmastat & DMASCR_SYSTEM_ERR_SIG) | 221 | if (dmastat & DMASCR_SYSTEM_ERR_SIG) |
244 | printk("SYSTEM_ERR_SIG "); | 222 | printk(KERN_CONT "SYSTEM_ERR_SIG "); |
245 | if (dmastat & DMASCR_TARGET_ABT) | 223 | if (dmastat & DMASCR_TARGET_ABT) |
246 | printk("TARGET_ABT "); | 224 | printk(KERN_CONT "TARGET_ABT "); |
247 | if (dmastat & DMASCR_MASTER_ABT) | 225 | if (dmastat & DMASCR_MASTER_ABT) |
248 | printk("MASTER_ABT "); | 226 | printk(KERN_CONT "MASTER_ABT "); |
249 | if (dmastat & DMASCR_CHAIN_COMPLETE) | 227 | if (dmastat & DMASCR_CHAIN_COMPLETE) |
250 | printk("CHAIN_COMPLETE "); | 228 | printk(KERN_CONT "CHAIN_COMPLETE "); |
251 | if (dmastat & DMASCR_DMA_COMPLETE) | 229 | if (dmastat & DMASCR_DMA_COMPLETE) |
252 | printk("DMA_COMPLETE "); | 230 | printk(KERN_CONT "DMA_COMPLETE "); |
253 | printk("\n"); | 231 | printk("\n"); |
254 | } | 232 | } |
255 | 233 | ||
@@ -286,7 +264,8 @@ static void mm_start_io(struct cardinfo *card) | |||
286 | 264 | ||
287 | /* make the last descriptor end the chain */ | 265 | /* make the last descriptor end the chain */ |
288 | page = &card->mm_pages[card->Active]; | 266 | page = &card->mm_pages[card->Active]; |
289 | pr_debug("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1); | 267 | pr_debug("start_io: %d %d->%d\n", |
268 | card->Active, page->headcnt, page->cnt - 1); | ||
290 | desc = &page->desc[page->cnt-1]; | 269 | desc = &page->desc[page->cnt-1]; |
291 | 270 | ||
292 | desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN); | 271 | desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN); |
@@ -310,8 +289,8 @@ static void mm_start_io(struct cardinfo *card) | |||
310 | writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR); | 289 | writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR); |
311 | writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR + 4); | 290 | writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR + 4); |
312 | 291 | ||
313 | offset = ((char*)desc) - ((char*)page->desc); | 292 | offset = ((char *)desc) - ((char *)page->desc); |
314 | writel(cpu_to_le32((page->page_dma+offset)&0xffffffff), | 293 | writel(cpu_to_le32((page->page_dma+offset) & 0xffffffff), |
315 | card->csr_remap + DMA_DESCRIPTOR_ADDR); | 294 | card->csr_remap + DMA_DESCRIPTOR_ADDR); |
316 | /* Force the value to u64 before shifting otherwise >> 32 is undefined C | 295 | /* Force the value to u64 before shifting otherwise >> 32 is undefined C |
317 | * and on some ports will do nothing ! */ | 296 | * and on some ports will do nothing ! */ |
@@ -352,7 +331,7 @@ static inline void reset_page(struct mm_page *page) | |||
352 | page->cnt = 0; | 331 | page->cnt = 0; |
353 | page->headcnt = 0; | 332 | page->headcnt = 0; |
354 | page->bio = NULL; | 333 | page->bio = NULL; |
355 | page->biotail = & page->bio; | 334 | page->biotail = &page->bio; |
356 | } | 335 | } |
357 | 336 | ||
358 | static void mm_unplug_device(struct request_queue *q) | 337 | static void mm_unplug_device(struct request_queue *q) |
@@ -408,7 +387,7 @@ static int add_bio(struct cardinfo *card) | |||
408 | vec->bv_page, | 387 | vec->bv_page, |
409 | vec->bv_offset, | 388 | vec->bv_offset, |
410 | len, | 389 | len, |
411 | (rw==READ) ? | 390 | (rw == READ) ? |
412 | PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); | 391 | PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); |
413 | 392 | ||
414 | p = &card->mm_pages[card->Ready]; | 393 | p = &card->mm_pages[card->Ready]; |
@@ -427,10 +406,10 @@ static int add_bio(struct cardinfo *card) | |||
427 | desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); | 406 | desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); |
428 | desc->local_addr = cpu_to_le64(card->current_sector << 9); | 407 | desc->local_addr = cpu_to_le64(card->current_sector << 9); |
429 | desc->transfer_size = cpu_to_le32(len); | 408 | desc->transfer_size = cpu_to_le32(len); |
430 | offset = ( ((char*)&desc->sem_control_bits) - ((char*)p->desc)); | 409 | offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); |
431 | desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); | 410 | desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); |
432 | desc->zero1 = desc->zero2 = 0; | 411 | desc->zero1 = desc->zero2 = 0; |
433 | offset = ( ((char*)(desc+1)) - ((char*)p->desc)); | 412 | offset = (((char *)(desc+1)) - ((char *)p->desc)); |
434 | desc->next_desc_addr = cpu_to_le64(p->page_dma+offset); | 413 | desc->next_desc_addr = cpu_to_le64(p->page_dma+offset); |
435 | desc->control_bits = cpu_to_le32(DMASCR_GO|DMASCR_ERR_INT_EN| | 414 | desc->control_bits = cpu_to_le32(DMASCR_GO|DMASCR_ERR_INT_EN| |
436 | DMASCR_PARITY_INT_EN| | 415 | DMASCR_PARITY_INT_EN| |
@@ -455,11 +434,11 @@ static void process_page(unsigned long data) | |||
455 | /* check if any of the requests in the page are DMA_COMPLETE, | 434 | /* check if any of the requests in the page are DMA_COMPLETE, |
456 | * and deal with them appropriately. | 435 | * and deal with them appropriately. |
457 | * If we find a descriptor without DMA_COMPLETE in the semaphore, then | 436 | * If we find a descriptor without DMA_COMPLETE in the semaphore, then |
458 | * dma must have hit an error on that descriptor, so use dma_status instead | 437 | * dma must have hit an error on that descriptor, so use dma_status |
459 | * and assume that all following descriptors must be re-tried. | 438 | * instead and assume that all following descriptors must be re-tried. |
460 | */ | 439 | */ |
461 | struct mm_page *page; | 440 | struct mm_page *page; |
462 | struct bio *return_bio=NULL; | 441 | struct bio *return_bio = NULL; |
463 | struct cardinfo *card = (struct cardinfo *)data; | 442 | struct cardinfo *card = (struct cardinfo *)data; |
464 | unsigned int dma_status = card->dma_status; | 443 | unsigned int dma_status = card->dma_status; |
465 | 444 | ||
@@ -472,12 +451,12 @@ static void process_page(unsigned long data) | |||
472 | struct bio *bio = page->bio; | 451 | struct bio *bio = page->bio; |
473 | struct mm_dma_desc *desc = &page->desc[page->headcnt]; | 452 | struct mm_dma_desc *desc = &page->desc[page->headcnt]; |
474 | int control = le32_to_cpu(desc->sem_control_bits); | 453 | int control = le32_to_cpu(desc->sem_control_bits); |
475 | int last=0; | 454 | int last = 0; |
476 | int idx; | 455 | int idx; |
477 | 456 | ||
478 | if (!(control & DMASCR_DMA_COMPLETE)) { | 457 | if (!(control & DMASCR_DMA_COMPLETE)) { |
479 | control = dma_status; | 458 | control = dma_status; |
480 | last=1; | 459 | last = 1; |
481 | } | 460 | } |
482 | page->headcnt++; | 461 | page->headcnt++; |
483 | idx = page->idx; | 462 | idx = page->idx; |
@@ -489,8 +468,8 @@ static void process_page(unsigned long data) | |||
489 | } | 468 | } |
490 | 469 | ||
491 | pci_unmap_page(card->dev, desc->data_dma_handle, | 470 | pci_unmap_page(card->dev, desc->data_dma_handle, |
492 | bio_iovec_idx(bio,idx)->bv_len, | 471 | bio_iovec_idx(bio, idx)->bv_len, |
493 | (control& DMASCR_TRANSFER_READ) ? | 472 | (control & DMASCR_TRANSFER_READ) ? |
494 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); | 473 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); |
495 | if (control & DMASCR_HARD_ERROR) { | 474 | if (control & DMASCR_HARD_ERROR) { |
496 | /* error */ | 475 | /* error */ |
@@ -501,9 +480,10 @@ static void process_page(unsigned long data) | |||
501 | le32_to_cpu(desc->transfer_size)); | 480 | le32_to_cpu(desc->transfer_size)); |
502 | dump_dmastat(card, control); | 481 | dump_dmastat(card, control); |
503 | } else if (test_bit(BIO_RW, &bio->bi_rw) && | 482 | } else if (test_bit(BIO_RW, &bio->bi_rw) && |
504 | le32_to_cpu(desc->local_addr)>>9 == card->init_size) { | 483 | le32_to_cpu(desc->local_addr) >> 9 == |
505 | card->init_size += le32_to_cpu(desc->transfer_size)>>9; | 484 | card->init_size) { |
506 | if (card->init_size>>1 >= card->mm_size) { | 485 | card->init_size += le32_to_cpu(desc->transfer_size) >> 9; |
486 | if (card->init_size >> 1 >= card->mm_size) { | ||
507 | dev_printk(KERN_INFO, &card->dev->dev, | 487 | dev_printk(KERN_INFO, &card->dev->dev, |
508 | "memory now initialised\n"); | 488 | "memory now initialised\n"); |
509 | set_userbit(card, MEMORY_INITIALIZED, 1); | 489 | set_userbit(card, MEMORY_INITIALIZED, 1); |
@@ -514,7 +494,8 @@ static void process_page(unsigned long data) | |||
514 | return_bio = bio; | 494 | return_bio = bio; |
515 | } | 495 | } |
516 | 496 | ||
517 | if (last) break; | 497 | if (last) |
498 | break; | ||
518 | } | 499 | } |
519 | 500 | ||
520 | if (debug & DEBUG_LED_ON_TRANSFER) | 501 | if (debug & DEBUG_LED_ON_TRANSFER) |
@@ -536,7 +517,7 @@ static void process_page(unsigned long data) | |||
536 | out_unlock: | 517 | out_unlock: |
537 | spin_unlock_bh(&card->lock); | 518 | spin_unlock_bh(&card->lock); |
538 | 519 | ||
539 | while(return_bio) { | 520 | while (return_bio) { |
540 | struct bio *bio = return_bio; | 521 | struct bio *bio = return_bio; |
541 | 522 | ||
542 | return_bio = bio->bi_next; | 523 | return_bio = bio->bi_next; |
@@ -545,11 +526,6 @@ static void process_page(unsigned long data) | |||
545 | } | 526 | } |
546 | } | 527 | } |
547 | 528 | ||
548 | /* | ||
549 | ----------------------------------------------------------------------------------- | ||
550 | -- mm_make_request | ||
551 | ----------------------------------------------------------------------------------- | ||
552 | */ | ||
553 | static int mm_make_request(struct request_queue *q, struct bio *bio) | 529 | static int mm_make_request(struct request_queue *q, struct bio *bio) |
554 | { | 530 | { |
555 | struct cardinfo *card = q->queuedata; | 531 | struct cardinfo *card = q->queuedata; |
@@ -566,11 +542,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio) | |||
566 | return 0; | 542 | return 0; |
567 | } | 543 | } |
568 | 544 | ||
569 | /* | ||
570 | ----------------------------------------------------------------------------------- | ||
571 | -- mm_interrupt | ||
572 | ----------------------------------------------------------------------------------- | ||
573 | */ | ||
574 | static irqreturn_t mm_interrupt(int irq, void *__card) | 545 | static irqreturn_t mm_interrupt(int irq, void *__card) |
575 | { | 546 | { |
576 | struct cardinfo *card = (struct cardinfo *) __card; | 547 | struct cardinfo *card = (struct cardinfo *) __card; |
@@ -584,15 +555,15 @@ HW_TRACE(0x30); | |||
584 | if (!(dma_status & (DMASCR_ERROR_MASK | DMASCR_CHAIN_COMPLETE))) { | 555 | if (!(dma_status & (DMASCR_ERROR_MASK | DMASCR_CHAIN_COMPLETE))) { |
585 | /* interrupt wasn't for me ... */ | 556 | /* interrupt wasn't for me ... */ |
586 | return IRQ_NONE; | 557 | return IRQ_NONE; |
587 | } | 558 | } |
588 | 559 | ||
589 | /* clear COMPLETION interrupts */ | 560 | /* clear COMPLETION interrupts */ |
590 | if (card->flags & UM_FLAG_NO_BYTE_STATUS) | 561 | if (card->flags & UM_FLAG_NO_BYTE_STATUS) |
591 | writel(cpu_to_le32(DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE), | 562 | writel(cpu_to_le32(DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE), |
592 | card->csr_remap+ DMA_STATUS_CTRL); | 563 | card->csr_remap + DMA_STATUS_CTRL); |
593 | else | 564 | else |
594 | writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16, | 565 | writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16, |
595 | card->csr_remap+ DMA_STATUS_CTRL + 2); | 566 | card->csr_remap + DMA_STATUS_CTRL + 2); |
596 | 567 | ||
597 | /* log errors and clear interrupt status */ | 568 | /* log errors and clear interrupt status */ |
598 | if (dma_status & DMASCR_ANY_ERR) { | 569 | if (dma_status & DMASCR_ANY_ERR) { |
@@ -602,9 +573,12 @@ HW_TRACE(0x30); | |||
602 | 573 | ||
603 | stat = readb(card->csr_remap + MEMCTRLCMD_ERRSTATUS); | 574 | stat = readb(card->csr_remap + MEMCTRLCMD_ERRSTATUS); |
604 | 575 | ||
605 | data_log1 = le32_to_cpu(readl(card->csr_remap + ERROR_DATA_LOG)); | 576 | data_log1 = le32_to_cpu(readl(card->csr_remap + |
606 | data_log2 = le32_to_cpu(readl(card->csr_remap + ERROR_DATA_LOG + 4)); | 577 | ERROR_DATA_LOG)); |
607 | addr_log1 = le32_to_cpu(readl(card->csr_remap + ERROR_ADDR_LOG)); | 578 | data_log2 = le32_to_cpu(readl(card->csr_remap + |
579 | ERROR_DATA_LOG + 4)); | ||
580 | addr_log1 = le32_to_cpu(readl(card->csr_remap + | ||
581 | ERROR_ADDR_LOG)); | ||
608 | addr_log2 = readb(card->csr_remap + ERROR_ADDR_LOG + 4); | 582 | addr_log2 = readb(card->csr_remap + ERROR_ADDR_LOG + 4); |
609 | 583 | ||
610 | count = readb(card->csr_remap + ERROR_COUNT); | 584 | count = readb(card->csr_remap + ERROR_COUNT); |
@@ -671,11 +645,7 @@ HW_TRACE(0x36); | |||
671 | 645 | ||
672 | return IRQ_HANDLED; | 646 | return IRQ_HANDLED; |
673 | } | 647 | } |
674 | /* | 648 | |
675 | ----------------------------------------------------------------------------------- | ||
676 | -- set_fault_to_battery_status | ||
677 | ----------------------------------------------------------------------------------- | ||
678 | */ | ||
679 | /* | 649 | /* |
680 | * If both batteries are good, no LED | 650 | * If both batteries are good, no LED |
681 | * If either battery has been warned, solid LED | 651 | * If either battery has been warned, solid LED |
@@ -696,12 +666,6 @@ static void set_fault_to_battery_status(struct cardinfo *card) | |||
696 | 666 | ||
697 | static void init_battery_timer(void); | 667 | static void init_battery_timer(void); |
698 | 668 | ||
699 | |||
700 | /* | ||
701 | ----------------------------------------------------------------------------------- | ||
702 | -- check_battery | ||
703 | ----------------------------------------------------------------------------------- | ||
704 | */ | ||
705 | static int check_battery(struct cardinfo *card, int battery, int status) | 669 | static int check_battery(struct cardinfo *card, int battery, int status) |
706 | { | 670 | { |
707 | if (status != card->battery[battery].good) { | 671 | if (status != card->battery[battery].good) { |
@@ -730,11 +694,7 @@ static int check_battery(struct cardinfo *card, int battery, int status) | |||
730 | 694 | ||
731 | return 0; | 695 | return 0; |
732 | } | 696 | } |
733 | /* | 697 | |
734 | ----------------------------------------------------------------------------------- | ||
735 | -- check_batteries | ||
736 | ----------------------------------------------------------------------------------- | ||
737 | */ | ||
738 | static void check_batteries(struct cardinfo *card) | 698 | static void check_batteries(struct cardinfo *card) |
739 | { | 699 | { |
740 | /* NOTE: this must *never* be called while the card | 700 | /* NOTE: this must *never* be called while the card |
@@ -775,11 +735,7 @@ static void check_all_batteries(unsigned long ptr) | |||
775 | 735 | ||
776 | init_battery_timer(); | 736 | init_battery_timer(); |
777 | } | 737 | } |
778 | /* | 738 | |
779 | ----------------------------------------------------------------------------------- | ||
780 | -- init_battery_timer | ||
781 | ----------------------------------------------------------------------------------- | ||
782 | */ | ||
783 | static void init_battery_timer(void) | 739 | static void init_battery_timer(void) |
784 | { | 740 | { |
785 | init_timer(&battery_timer); | 741 | init_timer(&battery_timer); |
@@ -787,20 +743,12 @@ static void init_battery_timer(void) | |||
787 | battery_timer.expires = jiffies + (HZ * 60); | 743 | battery_timer.expires = jiffies + (HZ * 60); |
788 | add_timer(&battery_timer); | 744 | add_timer(&battery_timer); |
789 | } | 745 | } |
790 | /* | 746 | |
791 | ----------------------------------------------------------------------------------- | ||
792 | -- del_battery_timer | ||
793 | ----------------------------------------------------------------------------------- | ||
794 | */ | ||
795 | static void del_battery_timer(void) | 747 | static void del_battery_timer(void) |
796 | { | 748 | { |
797 | del_timer(&battery_timer); | 749 | del_timer(&battery_timer); |
798 | } | 750 | } |
799 | /* | 751 | |
800 | ----------------------------------------------------------------------------------- | ||
801 | -- mm_revalidate | ||
802 | ----------------------------------------------------------------------------------- | ||
803 | */ | ||
804 | /* | 752 | /* |
805 | * Note no locks taken out here. In a worst case scenario, we could drop | 753 | * Note no locks taken out here. In a worst case scenario, we could drop |
806 | * a chunk of system memory. But that should never happen, since validation | 754 | * a chunk of system memory. But that should never happen, since validation |
@@ -833,33 +781,23 @@ static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
833 | } | 781 | } |
834 | 782 | ||
835 | /* | 783 | /* |
836 | ----------------------------------------------------------------------------------- | 784 | * Future support for removable devices |
837 | -- mm_check_change | 785 | */ |
838 | ----------------------------------------------------------------------------------- | ||
839 | Future support for removable devices | ||
840 | */ | ||
841 | static int mm_check_change(struct gendisk *disk) | 786 | static int mm_check_change(struct gendisk *disk) |
842 | { | 787 | { |
843 | /* struct cardinfo *dev = disk->private_data; */ | 788 | /* struct cardinfo *dev = disk->private_data; */ |
844 | return 0; | 789 | return 0; |
845 | } | 790 | } |
846 | /* | 791 | |
847 | ----------------------------------------------------------------------------------- | ||
848 | -- mm_fops | ||
849 | ----------------------------------------------------------------------------------- | ||
850 | */ | ||
851 | static struct block_device_operations mm_fops = { | 792 | static struct block_device_operations mm_fops = { |
852 | .owner = THIS_MODULE, | 793 | .owner = THIS_MODULE, |
853 | .getgeo = mm_getgeo, | 794 | .getgeo = mm_getgeo, |
854 | .revalidate_disk= mm_revalidate, | 795 | .revalidate_disk = mm_revalidate, |
855 | .media_changed = mm_check_change, | 796 | .media_changed = mm_check_change, |
856 | }; | 797 | }; |
857 | /* | 798 | |
858 | ----------------------------------------------------------------------------------- | 799 | static int __devinit mm_pci_probe(struct pci_dev *dev, |
859 | -- mm_pci_probe | 800 | const struct pci_device_id *id) |
860 | ----------------------------------------------------------------------------------- | ||
861 | */ | ||
862 | static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | ||
863 | { | 801 | { |
864 | int ret = -ENODEV; | 802 | int ret = -ENODEV; |
865 | struct cardinfo *card = &cards[num_cards]; | 803 | struct cardinfo *card = &cards[num_cards]; |
@@ -889,7 +827,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i | |||
889 | return -ENODEV; | 827 | return -ENODEV; |
890 | 828 | ||
891 | dev_printk(KERN_INFO, &dev->dev, | 829 | dev_printk(KERN_INFO, &dev->dev, |
892 | "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n"); | 830 | "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n"); |
893 | 831 | ||
894 | if (pci_set_dma_mask(dev, DMA_64BIT_MASK) && | 832 | if (pci_set_dma_mask(dev, DMA_64BIT_MASK) && |
895 | pci_set_dma_mask(dev, DMA_32BIT_MASK)) { | 833 | pci_set_dma_mask(dev, DMA_32BIT_MASK)) { |
@@ -917,7 +855,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i | |||
917 | "CSR 0x%08lx -> 0x%p (0x%lx)\n", | 855 | "CSR 0x%08lx -> 0x%p (0x%lx)\n", |
918 | csr_base, card->csr_remap, csr_len); | 856 | csr_base, card->csr_remap, csr_len); |
919 | 857 | ||
920 | switch(card->dev->device) { | 858 | switch (card->dev->device) { |
921 | case 0x5415: | 859 | case 0x5415: |
922 | card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG; | 860 | card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG; |
923 | magic_number = 0x59; | 861 | magic_number = 0x59; |
@@ -929,7 +867,8 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i | |||
929 | break; | 867 | break; |
930 | 868 | ||
931 | case 0x6155: | 869 | case 0x6155: |
932 | card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG | UM_FLAG_NO_BATT; | 870 | card->flags |= UM_FLAG_NO_BYTE_STATUS | |
871 | UM_FLAG_NO_BATTREG | UM_FLAG_NO_BATT; | ||
933 | magic_number = 0x99; | 872 | magic_number = 0x99; |
934 | break; | 873 | break; |
935 | 874 | ||
@@ -945,11 +884,11 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i | |||
945 | } | 884 | } |
946 | 885 | ||
947 | card->mm_pages[0].desc = pci_alloc_consistent(card->dev, | 886 | card->mm_pages[0].desc = pci_alloc_consistent(card->dev, |
948 | PAGE_SIZE*2, | 887 | PAGE_SIZE * 2, |
949 | &card->mm_pages[0].page_dma); | 888 | &card->mm_pages[0].page_dma); |
950 | card->mm_pages[1].desc = pci_alloc_consistent(card->dev, | 889 | card->mm_pages[1].desc = pci_alloc_consistent(card->dev, |
951 | PAGE_SIZE*2, | 890 | PAGE_SIZE * 2, |
952 | &card->mm_pages[1].page_dma); | 891 | &card->mm_pages[1].page_dma); |
953 | if (card->mm_pages[0].desc == NULL || | 892 | if (card->mm_pages[0].desc == NULL || |
954 | card->mm_pages[1].desc == NULL) { | 893 | card->mm_pages[1].desc == NULL) { |
955 | dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n"); | 894 | dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n"); |
@@ -1013,9 +952,9 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i | |||
1013 | dev_printk(KERN_INFO, &card->dev->dev, | 952 | dev_printk(KERN_INFO, &card->dev->dev, |
1014 | "Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n", | 953 | "Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n", |
1015 | card->mm_size, | 954 | card->mm_size, |
1016 | (batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled"), | 955 | batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled", |
1017 | card->battery[0].good ? "OK" : "FAILURE", | 956 | card->battery[0].good ? "OK" : "FAILURE", |
1018 | (batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled"), | 957 | batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled", |
1019 | card->battery[1].good ? "OK" : "FAILURE"); | 958 | card->battery[1].good ? "OK" : "FAILURE"); |
1020 | 959 | ||
1021 | set_fault_to_battery_status(card); | 960 | set_fault_to_battery_status(card); |
@@ -1030,18 +969,18 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i | |||
1030 | data = ~data; | 969 | data = ~data; |
1031 | data += 1; | 970 | data += 1; |
1032 | 971 | ||
1033 | if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME, card)) { | 972 | if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME, |
973 | card)) { | ||
1034 | dev_printk(KERN_ERR, &card->dev->dev, | 974 | dev_printk(KERN_ERR, &card->dev->dev, |
1035 | "Unable to allocate IRQ\n"); | 975 | "Unable to allocate IRQ\n"); |
1036 | ret = -ENODEV; | 976 | ret = -ENODEV; |
1037 | |||
1038 | goto failed_req_irq; | 977 | goto failed_req_irq; |
1039 | } | 978 | } |
1040 | 979 | ||
1041 | dev_printk(KERN_INFO, &card->dev->dev, | 980 | dev_printk(KERN_INFO, &card->dev->dev, |
1042 | "Window size %d bytes, IRQ %d\n", data, dev->irq); | 981 | "Window size %d bytes, IRQ %d\n", data, dev->irq); |
1043 | 982 | ||
1044 | spin_lock_init(&card->lock); | 983 | spin_lock_init(&card->lock); |
1045 | 984 | ||
1046 | pci_set_drvdata(dev, card); | 985 | pci_set_drvdata(dev, card); |
1047 | 986 | ||
@@ -1060,7 +999,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i | |||
1060 | 999 | ||
1061 | if (!get_userbit(card, MEMORY_INITIALIZED)) { | 1000 | if (!get_userbit(card, MEMORY_INITIALIZED)) { |
1062 | dev_printk(KERN_INFO, &card->dev->dev, | 1001 | dev_printk(KERN_INFO, &card->dev->dev, |
1063 | "memory NOT initialized. Consider over-writing whole device.\n"); | 1002 | "memory NOT initialized. Consider over-writing whole device.\n"); |
1064 | card->init_size = 0; | 1003 | card->init_size = 0; |
1065 | } else { | 1004 | } else { |
1066 | dev_printk(KERN_INFO, &card->dev->dev, | 1005 | dev_printk(KERN_INFO, &card->dev->dev, |
@@ -1091,11 +1030,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i | |||
1091 | 1030 | ||
1092 | return ret; | 1031 | return ret; |
1093 | } | 1032 | } |
1094 | /* | 1033 | |
1095 | ----------------------------------------------------------------------------------- | ||
1096 | -- mm_pci_remove | ||
1097 | ----------------------------------------------------------------------------------- | ||
1098 | */ | ||
1099 | static void mm_pci_remove(struct pci_dev *dev) | 1034 | static void mm_pci_remove(struct pci_dev *dev) |
1100 | { | 1035 | { |
1101 | struct cardinfo *card = pci_get_drvdata(dev); | 1036 | struct cardinfo *card = pci_get_drvdata(dev); |
@@ -1119,16 +1054,16 @@ static void mm_pci_remove(struct pci_dev *dev) | |||
1119 | } | 1054 | } |
1120 | 1055 | ||
1121 | static const struct pci_device_id mm_pci_ids[] = { | 1056 | static const struct pci_device_id mm_pci_ids[] = { |
1122 | {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY,PCI_DEVICE_ID_MICRO_MEMORY_5415CN)}, | 1057 | {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5415CN)}, |
1123 | {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY,PCI_DEVICE_ID_MICRO_MEMORY_5425CN)}, | 1058 | {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5425CN)}, |
1124 | {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY,PCI_DEVICE_ID_MICRO_MEMORY_6155)}, | 1059 | {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_6155)}, |
1125 | { | 1060 | { |
1126 | .vendor = 0x8086, | 1061 | .vendor = 0x8086, |
1127 | .device = 0xB555, | 1062 | .device = 0xB555, |
1128 | .subvendor= 0x1332, | 1063 | .subvendor = 0x1332, |
1129 | .subdevice= 0x5460, | 1064 | .subdevice = 0x5460, |
1130 | .class = 0x050000, | 1065 | .class = 0x050000, |
1131 | .class_mask= 0, | 1066 | .class_mask = 0, |
1132 | }, { /* end: all zeroes */ } | 1067 | }, { /* end: all zeroes */ } |
1133 | }; | 1068 | }; |
1134 | 1069 | ||
@@ -1141,12 +1076,6 @@ static struct pci_driver mm_pci_driver = { | |||
1141 | .remove = mm_pci_remove, | 1076 | .remove = mm_pci_remove, |
1142 | }; | 1077 | }; |
1143 | 1078 | ||
1144 | /* | ||
1145 | ----------------------------------------------------------------------------------- | ||
1146 | -- mm_init | ||
1147 | ----------------------------------------------------------------------------------- | ||
1148 | */ | ||
1149 | |||
1150 | static int __init mm_init(void) | 1079 | static int __init mm_init(void) |
1151 | { | 1080 | { |
1152 | int retval, i; | 1081 | int retval, i; |
@@ -1193,18 +1122,14 @@ out: | |||
1193 | put_disk(mm_gendisk[i]); | 1122 | put_disk(mm_gendisk[i]); |
1194 | return -ENOMEM; | 1123 | return -ENOMEM; |
1195 | } | 1124 | } |
1196 | /* | 1125 | |
1197 | ----------------------------------------------------------------------------------- | ||
1198 | -- mm_cleanup | ||
1199 | ----------------------------------------------------------------------------------- | ||
1200 | */ | ||
1201 | static void __exit mm_cleanup(void) | 1126 | static void __exit mm_cleanup(void) |
1202 | { | 1127 | { |
1203 | int i; | 1128 | int i; |
1204 | 1129 | ||
1205 | del_battery_timer(); | 1130 | del_battery_timer(); |
1206 | 1131 | ||
1207 | for (i=0; i < num_cards ; i++) { | 1132 | for (i = 0; i < num_cards ; i++) { |
1208 | del_gendisk(mm_gendisk[i]); | 1133 | del_gendisk(mm_gendisk[i]); |
1209 | put_disk(mm_gendisk[i]); | 1134 | put_disk(mm_gendisk[i]); |
1210 | } | 1135 | } |
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 9b6fbf044fd8..3fa7c77d9bd9 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -269,7 +269,7 @@ config DM_MULTIPATH_RDAC | |||
269 | 269 | ||
270 | config DM_MULTIPATH_HP | 270 | config DM_MULTIPATH_HP |
271 | tristate "HP MSA multipath support (EXPERIMENTAL)" | 271 | tristate "HP MSA multipath support (EXPERIMENTAL)" |
272 | depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL | 272 | depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL |
273 | ---help--- | 273 | ---help--- |
274 | Multipath support for HP MSA (Active/Passive) series hardware. | 274 | Multipath support for HP MSA (Active/Passive) series hardware. |
275 | 275 | ||
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 28c6ae095c56..6b66ee46b87d 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -398,7 +398,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) | |||
398 | struct bio *clone; | 398 | struct bio *clone; |
399 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 399 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
400 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; | 400 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
401 | unsigned int i; | 401 | unsigned i, len; |
402 | struct page *page; | ||
402 | 403 | ||
403 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); | 404 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); |
404 | if (!clone) | 405 | if (!clone) |
@@ -407,10 +408,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) | |||
407 | clone_init(io, clone); | 408 | clone_init(io, clone); |
408 | 409 | ||
409 | for (i = 0; i < nr_iovecs; i++) { | 410 | for (i = 0; i < nr_iovecs; i++) { |
410 | struct bio_vec *bv = bio_iovec_idx(clone, i); | 411 | page = mempool_alloc(cc->page_pool, gfp_mask); |
411 | 412 | if (!page) | |
412 | bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); | ||
413 | if (!bv->bv_page) | ||
414 | break; | 413 | break; |
415 | 414 | ||
416 | /* | 415 | /* |
@@ -421,15 +420,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) | |||
421 | if (i == (MIN_BIO_PAGES - 1)) | 420 | if (i == (MIN_BIO_PAGES - 1)) |
422 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; | 421 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
423 | 422 | ||
424 | bv->bv_offset = 0; | 423 | len = (size > PAGE_SIZE) ? PAGE_SIZE : size; |
425 | if (size > PAGE_SIZE) | 424 | |
426 | bv->bv_len = PAGE_SIZE; | 425 | if (!bio_add_page(clone, page, len, 0)) { |
427 | else | 426 | mempool_free(page, cc->page_pool); |
428 | bv->bv_len = size; | 427 | break; |
428 | } | ||
429 | 429 | ||
430 | clone->bi_size += bv->bv_len; | 430 | size -= len; |
431 | clone->bi_vcnt++; | ||
432 | size -= bv->bv_len; | ||
433 | } | 431 | } |
434 | 432 | ||
435 | if (!clone->bi_size) { | 433 | if (!clone->bi_size) { |
@@ -511,6 +509,9 @@ static void crypt_endio(struct bio *clone, int error) | |||
511 | struct crypt_config *cc = io->target->private; | 509 | struct crypt_config *cc = io->target->private; |
512 | unsigned read_io = bio_data_dir(clone) == READ; | 510 | unsigned read_io = bio_data_dir(clone) == READ; |
513 | 511 | ||
512 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) | ||
513 | error = -EIO; | ||
514 | |||
514 | /* | 515 | /* |
515 | * free the processed pages | 516 | * free the processed pages |
516 | */ | 517 | */ |
@@ -519,10 +520,8 @@ static void crypt_endio(struct bio *clone, int error) | |||
519 | goto out; | 520 | goto out; |
520 | } | 521 | } |
521 | 522 | ||
522 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { | 523 | if (unlikely(error)) |
523 | error = -EIO; | ||
524 | goto out; | 524 | goto out; |
525 | } | ||
526 | 525 | ||
527 | bio_put(clone); | 526 | bio_put(clone); |
528 | kcryptd_queue_crypt(io); | 527 | kcryptd_queue_crypt(io); |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 138200bf5e0b..9627fa0f9470 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -332,6 +332,8 @@ static int dm_hash_rename(const char *old, const char *new) | |||
332 | dm_table_put(table); | 332 | dm_table_put(table); |
333 | } | 333 | } |
334 | 334 | ||
335 | dm_kobject_uevent(hc->md); | ||
336 | |||
335 | dm_put(hc->md); | 337 | dm_put(hc->md); |
336 | up_write(&_hash_lock); | 338 | up_write(&_hash_lock); |
337 | kfree(old_name); | 339 | kfree(old_name); |
@@ -1250,21 +1252,17 @@ static int target_message(struct dm_ioctl *param, size_t param_size) | |||
1250 | if (!table) | 1252 | if (!table) |
1251 | goto out_argv; | 1253 | goto out_argv; |
1252 | 1254 | ||
1253 | if (tmsg->sector >= dm_table_get_size(table)) { | 1255 | ti = dm_table_find_target(table, tmsg->sector); |
1256 | if (!dm_target_is_valid(ti)) { | ||
1254 | DMWARN("Target message sector outside device."); | 1257 | DMWARN("Target message sector outside device."); |
1255 | r = -EINVAL; | 1258 | r = -EINVAL; |
1256 | goto out_table; | 1259 | } else if (ti->type->message) |
1257 | } | ||
1258 | |||
1259 | ti = dm_table_find_target(table, tmsg->sector); | ||
1260 | if (ti->type->message) | ||
1261 | r = ti->type->message(ti, argc, argv); | 1260 | r = ti->type->message(ti, argc, argv); |
1262 | else { | 1261 | else { |
1263 | DMWARN("Target type does not support messages"); | 1262 | DMWARN("Target type does not support messages"); |
1264 | r = -EINVAL; | 1263 | r = -EINVAL; |
1265 | } | 1264 | } |
1266 | 1265 | ||
1267 | out_table: | ||
1268 | dm_table_put(table); | 1266 | dm_table_put(table); |
1269 | out_argv: | 1267 | out_argv: |
1270 | kfree(argv); | 1268 | kfree(argv); |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e298d8d11f24..47818d8249cb 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -99,6 +99,9 @@ static void combine_restrictions_low(struct io_restrictions *lhs, | |||
99 | lhs->max_segment_size = | 99 | lhs->max_segment_size = |
100 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); | 100 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); |
101 | 101 | ||
102 | lhs->max_hw_sectors = | ||
103 | min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); | ||
104 | |||
102 | lhs->seg_boundary_mask = | 105 | lhs->seg_boundary_mask = |
103 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); | 106 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); |
104 | 107 | ||
@@ -189,8 +192,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num) | |||
189 | 192 | ||
190 | /* | 193 | /* |
191 | * Allocate both the target array and offset array at once. | 194 | * Allocate both the target array and offset array at once. |
195 | * Append an empty entry to catch sectors beyond the end of | ||
196 | * the device. | ||
192 | */ | 197 | */ |
193 | n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + | 198 | n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + |
194 | sizeof(sector_t)); | 199 | sizeof(sector_t)); |
195 | if (!n_highs) | 200 | if (!n_highs) |
196 | return -ENOMEM; | 201 | return -ENOMEM; |
@@ -564,6 +569,9 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
564 | rs->max_segment_size = | 569 | rs->max_segment_size = |
565 | min_not_zero(rs->max_segment_size, q->max_segment_size); | 570 | min_not_zero(rs->max_segment_size, q->max_segment_size); |
566 | 571 | ||
572 | rs->max_hw_sectors = | ||
573 | min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); | ||
574 | |||
567 | rs->seg_boundary_mask = | 575 | rs->seg_boundary_mask = |
568 | min_not_zero(rs->seg_boundary_mask, | 576 | min_not_zero(rs->seg_boundary_mask, |
569 | q->seg_boundary_mask); | 577 | q->seg_boundary_mask); |
@@ -701,6 +709,8 @@ static void check_for_valid_limits(struct io_restrictions *rs) | |||
701 | { | 709 | { |
702 | if (!rs->max_sectors) | 710 | if (!rs->max_sectors) |
703 | rs->max_sectors = SAFE_MAX_SECTORS; | 711 | rs->max_sectors = SAFE_MAX_SECTORS; |
712 | if (!rs->max_hw_sectors) | ||
713 | rs->max_hw_sectors = SAFE_MAX_SECTORS; | ||
704 | if (!rs->max_phys_segments) | 714 | if (!rs->max_phys_segments) |
705 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; | 715 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; |
706 | if (!rs->max_hw_segments) | 716 | if (!rs->max_hw_segments) |
@@ -867,6 +877,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) | |||
867 | 877 | ||
868 | /* | 878 | /* |
869 | * Search the btree for the correct target. | 879 | * Search the btree for the correct target. |
880 | * | ||
881 | * Caller should check returned pointer with dm_target_is_valid() | ||
882 | * to trap I/O beyond end of device. | ||
870 | */ | 883 | */ |
871 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | 884 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) |
872 | { | 885 | { |
@@ -896,6 +909,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | |||
896 | q->max_hw_segments = t->limits.max_hw_segments; | 909 | q->max_hw_segments = t->limits.max_hw_segments; |
897 | q->hardsect_size = t->limits.hardsect_size; | 910 | q->hardsect_size = t->limits.hardsect_size; |
898 | q->max_segment_size = t->limits.max_segment_size; | 911 | q->max_segment_size = t->limits.max_segment_size; |
912 | q->max_hw_sectors = t->limits.max_hw_sectors; | ||
899 | q->seg_boundary_mask = t->limits.seg_boundary_mask; | 913 | q->seg_boundary_mask = t->limits.seg_boundary_mask; |
900 | q->bounce_pfn = t->limits.bounce_pfn; | 914 | q->bounce_pfn = t->limits.bounce_pfn; |
901 | if (t->limits.no_cluster) | 915 | if (t->limits.no_cluster) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 07cbbb8eb3e0..88c0fd657825 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -672,13 +672,19 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
672 | return clone; | 672 | return clone; |
673 | } | 673 | } |
674 | 674 | ||
675 | static void __clone_and_map(struct clone_info *ci) | 675 | static int __clone_and_map(struct clone_info *ci) |
676 | { | 676 | { |
677 | struct bio *clone, *bio = ci->bio; | 677 | struct bio *clone, *bio = ci->bio; |
678 | struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); | 678 | struct dm_target *ti; |
679 | sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); | 679 | sector_t len = 0, max; |
680 | struct dm_target_io *tio; | 680 | struct dm_target_io *tio; |
681 | 681 | ||
682 | ti = dm_table_find_target(ci->map, ci->sector); | ||
683 | if (!dm_target_is_valid(ti)) | ||
684 | return -EIO; | ||
685 | |||
686 | max = max_io_len(ci->md, ci->sector, ti); | ||
687 | |||
682 | /* | 688 | /* |
683 | * Allocate a target io object. | 689 | * Allocate a target io object. |
684 | */ | 690 | */ |
@@ -736,6 +742,9 @@ static void __clone_and_map(struct clone_info *ci) | |||
736 | do { | 742 | do { |
737 | if (offset) { | 743 | if (offset) { |
738 | ti = dm_table_find_target(ci->map, ci->sector); | 744 | ti = dm_table_find_target(ci->map, ci->sector); |
745 | if (!dm_target_is_valid(ti)) | ||
746 | return -EIO; | ||
747 | |||
739 | max = max_io_len(ci->md, ci->sector, ti); | 748 | max = max_io_len(ci->md, ci->sector, ti); |
740 | 749 | ||
741 | tio = alloc_tio(ci->md); | 750 | tio = alloc_tio(ci->md); |
@@ -759,6 +768,8 @@ static void __clone_and_map(struct clone_info *ci) | |||
759 | 768 | ||
760 | ci->idx++; | 769 | ci->idx++; |
761 | } | 770 | } |
771 | |||
772 | return 0; | ||
762 | } | 773 | } |
763 | 774 | ||
764 | /* | 775 | /* |
@@ -767,6 +778,7 @@ static void __clone_and_map(struct clone_info *ci) | |||
767 | static int __split_bio(struct mapped_device *md, struct bio *bio) | 778 | static int __split_bio(struct mapped_device *md, struct bio *bio) |
768 | { | 779 | { |
769 | struct clone_info ci; | 780 | struct clone_info ci; |
781 | int error = 0; | ||
770 | 782 | ||
771 | ci.map = dm_get_table(md); | 783 | ci.map = dm_get_table(md); |
772 | if (unlikely(!ci.map)) | 784 | if (unlikely(!ci.map)) |
@@ -784,11 +796,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio) | |||
784 | ci.idx = bio->bi_idx; | 796 | ci.idx = bio->bi_idx; |
785 | 797 | ||
786 | start_io_acct(ci.io); | 798 | start_io_acct(ci.io); |
787 | while (ci.sector_count) | 799 | while (ci.sector_count && !error) |
788 | __clone_and_map(&ci); | 800 | error = __clone_and_map(&ci); |
789 | 801 | ||
790 | /* drop the extra reference count */ | 802 | /* drop the extra reference count */ |
791 | dec_pending(ci.io, 0); | 803 | dec_pending(ci.io, error); |
792 | dm_table_put(ci.map); | 804 | dm_table_put(ci.map); |
793 | 805 | ||
794 | return 0; | 806 | return 0; |
@@ -1502,7 +1514,7 @@ int dm_resume(struct mapped_device *md) | |||
1502 | 1514 | ||
1503 | dm_table_unplug_all(map); | 1515 | dm_table_unplug_all(map); |
1504 | 1516 | ||
1505 | kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); | 1517 | dm_kobject_uevent(md); |
1506 | 1518 | ||
1507 | r = 0; | 1519 | r = 0; |
1508 | 1520 | ||
@@ -1516,6 +1528,11 @@ out: | |||
1516 | /*----------------------------------------------------------------- | 1528 | /*----------------------------------------------------------------- |
1517 | * Event notification. | 1529 | * Event notification. |
1518 | *---------------------------------------------------------------*/ | 1530 | *---------------------------------------------------------------*/ |
1531 | void dm_kobject_uevent(struct mapped_device *md) | ||
1532 | { | ||
1533 | kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); | ||
1534 | } | ||
1535 | |||
1519 | uint32_t dm_next_uevent_seq(struct mapped_device *md) | 1536 | uint32_t dm_next_uevent_seq(struct mapped_device *md) |
1520 | { | 1537 | { |
1521 | return atomic_add_return(1, &md->uevent_seq); | 1538 | return atomic_add_return(1, &md->uevent_seq); |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 4b3faa45277e..b4584a39383b 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -112,6 +112,11 @@ int dm_table_resume_targets(struct dm_table *t); | |||
112 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); | 112 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); |
113 | void dm_table_unplug_all(struct dm_table *t); | 113 | void dm_table_unplug_all(struct dm_table *t); |
114 | 114 | ||
115 | /* | ||
116 | * To check the return value from dm_table_find_target(). | ||
117 | */ | ||
118 | #define dm_target_is_valid(t) ((t)->table) | ||
119 | |||
115 | /*----------------------------------------------------------------- | 120 | /*----------------------------------------------------------------- |
116 | * A registry of target types. | 121 | * A registry of target types. |
117 | *---------------------------------------------------------------*/ | 122 | *---------------------------------------------------------------*/ |
@@ -182,4 +187,6 @@ union map_info *dm_get_mapinfo(struct bio *bio); | |||
182 | int dm_open_count(struct mapped_device *md); | 187 | int dm_open_count(struct mapped_device *md); |
183 | int dm_lock_for_deletion(struct mapped_device *md); | 188 | int dm_lock_for_deletion(struct mapped_device *md); |
184 | 189 | ||
190 | void dm_kobject_uevent(struct mapped_device *md); | ||
191 | |||
185 | #endif | 192 | #endif |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index ff59d2e0475b..785bbdcf4a58 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -7,6 +7,10 @@ | |||
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or (at | 8 | * the Free Software Foundation; either version 2 of the License, or (at |
9 | * your option) any later version. | 9 | * your option) any later version. |
10 | * | ||
11 | * Thanks to the following companies for their support: | ||
12 | * | ||
13 | * - JMicron (hardware and technical support) | ||
10 | */ | 14 | */ |
11 | 15 | ||
12 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
@@ -26,13 +30,29 @@ | |||
26 | 30 | ||
27 | static unsigned int debug_quirks = 0; | 31 | static unsigned int debug_quirks = 0; |
28 | 32 | ||
33 | /* | ||
34 | * Different quirks to handle when the hardware deviates from a strict | ||
35 | * interpretation of the SDHCI specification. | ||
36 | */ | ||
37 | |||
38 | /* Controller doesn't honor resets unless we touch the clock register */ | ||
29 | #define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) | 39 | #define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) |
40 | /* Controller has bad caps bits, but really supports DMA */ | ||
30 | #define SDHCI_QUIRK_FORCE_DMA (1<<1) | 41 | #define SDHCI_QUIRK_FORCE_DMA (1<<1) |
31 | /* Controller doesn't like some resets when there is no card inserted. */ | 42 | /* Controller doesn't like some resets when there is no card inserted. */ |
32 | #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) | 43 | #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) |
44 | /* Controller doesn't like clearing the power reg before a change */ | ||
33 | #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) | 45 | #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) |
46 | /* Controller has flaky internal state so reset it on each ios change */ | ||
34 | #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) | 47 | #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) |
48 | /* Controller has an unusable DMA engine */ | ||
35 | #define SDHCI_QUIRK_BROKEN_DMA (1<<5) | 49 | #define SDHCI_QUIRK_BROKEN_DMA (1<<5) |
50 | /* Controller can only DMA from 32-bit aligned addresses */ | ||
51 | #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6) | ||
52 | /* Controller can only DMA chunk sizes that are a multiple of 32 bits */ | ||
53 | #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7) | ||
54 | /* Controller needs to be reset after each request to stay stable */ | ||
55 | #define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8) | ||
36 | 56 | ||
37 | static const struct pci_device_id pci_ids[] __devinitdata = { | 57 | static const struct pci_device_id pci_ids[] __devinitdata = { |
38 | { | 58 | { |
@@ -97,6 +117,16 @@ static const struct pci_device_id pci_ids[] __devinitdata = { | |||
97 | SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS, | 117 | SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS, |
98 | }, | 118 | }, |
99 | 119 | ||
120 | { | ||
121 | .vendor = PCI_VENDOR_ID_JMICRON, | ||
122 | .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, | ||
123 | .subvendor = PCI_ANY_ID, | ||
124 | .subdevice = PCI_ANY_ID, | ||
125 | .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR | | ||
126 | SDHCI_QUIRK_32BIT_DMA_SIZE | | ||
127 | SDHCI_QUIRK_RESET_AFTER_REQUEST, | ||
128 | }, | ||
129 | |||
100 | { /* Generic SD host controller */ | 130 | { /* Generic SD host controller */ |
101 | PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) | 131 | PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) |
102 | }, | 132 | }, |
@@ -419,7 +449,29 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | |||
419 | 449 | ||
420 | writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); | 450 | writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); |
421 | 451 | ||
422 | if (host->flags & SDHCI_USE_DMA) { | 452 | if (host->flags & SDHCI_USE_DMA) |
453 | host->flags |= SDHCI_REQ_USE_DMA; | ||
454 | |||
455 | if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && | ||
456 | (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && | ||
457 | ((data->blksz * data->blocks) & 0x3))) { | ||
458 | DBG("Reverting to PIO because of transfer size (%d)\n", | ||
459 | data->blksz * data->blocks); | ||
460 | host->flags &= ~SDHCI_REQ_USE_DMA; | ||
461 | } | ||
462 | |||
463 | /* | ||
464 | * The assumption here being that alignment is the same after | ||
465 | * translation to device address space. | ||
466 | */ | ||
467 | if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && | ||
468 | (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && | ||
469 | (data->sg->offset & 0x3))) { | ||
470 | DBG("Reverting to PIO because of bad alignment\n"); | ||
471 | host->flags &= ~SDHCI_REQ_USE_DMA; | ||
472 | } | ||
473 | |||
474 | if (host->flags & SDHCI_REQ_USE_DMA) { | ||
423 | int count; | 475 | int count; |
424 | 476 | ||
425 | count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, | 477 | count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, |
@@ -456,7 +508,7 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host, | |||
456 | mode |= SDHCI_TRNS_MULTI; | 508 | mode |= SDHCI_TRNS_MULTI; |
457 | if (data->flags & MMC_DATA_READ) | 509 | if (data->flags & MMC_DATA_READ) |
458 | mode |= SDHCI_TRNS_READ; | 510 | mode |= SDHCI_TRNS_READ; |
459 | if (host->flags & SDHCI_USE_DMA) | 511 | if (host->flags & SDHCI_REQ_USE_DMA) |
460 | mode |= SDHCI_TRNS_DMA; | 512 | mode |= SDHCI_TRNS_DMA; |
461 | 513 | ||
462 | writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE); | 514 | writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE); |
@@ -472,7 +524,7 @@ static void sdhci_finish_data(struct sdhci_host *host) | |||
472 | data = host->data; | 524 | data = host->data; |
473 | host->data = NULL; | 525 | host->data = NULL; |
474 | 526 | ||
475 | if (host->flags & SDHCI_USE_DMA) { | 527 | if (host->flags & SDHCI_REQ_USE_DMA) { |
476 | pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, | 528 | pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, |
477 | (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); | 529 | (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); |
478 | } | 530 | } |
@@ -886,7 +938,8 @@ static void sdhci_tasklet_finish(unsigned long param) | |||
886 | */ | 938 | */ |
887 | if (mrq->cmd->error || | 939 | if (mrq->cmd->error || |
888 | (mrq->data && (mrq->data->error || | 940 | (mrq->data && (mrq->data->error || |
889 | (mrq->data->stop && mrq->data->stop->error)))) { | 941 | (mrq->data->stop && mrq->data->stop->error))) || |
942 | (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { | ||
890 | 943 | ||
891 | /* Some controllers need this kick or reset won't work here */ | 944 | /* Some controllers need this kick or reset won't work here */ |
892 | if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { | 945 | if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { |
@@ -1284,7 +1337,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1284 | 1337 | ||
1285 | version = readw(host->ioaddr + SDHCI_HOST_VERSION); | 1338 | version = readw(host->ioaddr + SDHCI_HOST_VERSION); |
1286 | version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; | 1339 | version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; |
1287 | if (version != 0) { | 1340 | if (version > 1) { |
1288 | printk(KERN_ERR "%s: Unknown controller version (%d). " | 1341 | printk(KERN_ERR "%s: Unknown controller version (%d). " |
1289 | "You may experience problems.\n", host->slot_descr, | 1342 | "You may experience problems.\n", host->slot_descr, |
1290 | version); | 1343 | version); |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 05195ea900f4..e4d77b038bfa 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -171,7 +171,8 @@ struct sdhci_host { | |||
171 | spinlock_t lock; /* Mutex */ | 171 | spinlock_t lock; /* Mutex */ |
172 | 172 | ||
173 | int flags; /* Host attributes */ | 173 | int flags; /* Host attributes */ |
174 | #define SDHCI_USE_DMA (1<<0) | 174 | #define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ |
175 | #define SDHCI_REQ_USE_DMA (1<<1) /* Use DMA for this req. */ | ||
175 | 176 | ||
176 | unsigned int max_clk; /* Max possible freq (MHz) */ | 177 | unsigned int max_clk; /* Max possible freq (MHz) */ |
177 | unsigned int timeout_clk; /* Timeout freq (KHz) */ | 178 | unsigned int timeout_clk; /* Timeout freq (KHz) */ |
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 70f48a1a6d58..b31d1c95c9fb 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
@@ -906,8 +906,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev | |||
906 | } | 906 | } |
907 | 907 | ||
908 | pci_set_master(pDev); | 908 | pci_set_master(pDev); |
909 | if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) && | 909 | if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) |
910 | pci_set_dma_mask(pDev, DMA_32BIT_MASK)) | ||
911 | return -EINVAL; | 910 | return -EINVAL; |
912 | 911 | ||
913 | base_addr0_phys = pci_resource_start(pDev,0); | 912 | base_addr0_phys = pci_resource_start(pDev,0); |
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 4c4465d39a1d..01bf0189367d 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c | |||
@@ -2616,6 +2616,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c | |||
2616 | scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) { | 2616 | scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) { |
2617 | sg->data = cpu_to_le32((u32)sg_dma_address(sglist)); | 2617 | sg->data = cpu_to_le32((u32)sg_dma_address(sglist)); |
2618 | total_len += sg->len = cpu_to_le32((u32)sg_dma_len(sglist)); | 2618 | total_len += sg->len = cpu_to_le32((u32)sg_dma_len(sglist)); |
2619 | ++sg; | ||
2619 | } | 2620 | } |
2620 | 2621 | ||
2621 | cblk->buflen = (scsi_bufflen(cmnd) > total_len) ? | 2622 | cblk->buflen = (scsi_bufflen(cmnd) > total_len) ? |
@@ -2867,6 +2868,7 @@ static int initio_probe_one(struct pci_dev *pdev, | |||
2867 | } | 2868 | } |
2868 | host = (struct initio_host *)shost->hostdata; | 2869 | host = (struct initio_host *)shost->hostdata; |
2869 | memset(host, 0, sizeof(struct initio_host)); | 2870 | memset(host, 0, sizeof(struct initio_host)); |
2871 | host->addr = pci_resource_start(pdev, 0); | ||
2870 | 2872 | ||
2871 | if (!request_region(host->addr, 256, "i91u")) { | 2873 | if (!request_region(host->addr, 256, "i91u")) { |
2872 | printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr); | 2874 | printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr); |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 98dfd6ea209c..328c47c6aeb1 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -3611,6 +3611,7 @@ static struct st_buffer * | |||
3611 | 3611 | ||
3612 | tb->dma = need_dma; | 3612 | tb->dma = need_dma; |
3613 | tb->buffer_size = got; | 3613 | tb->buffer_size = got; |
3614 | sg_init_table(tb->sg, max_sg); | ||
3614 | 3615 | ||
3615 | return tb; | 3616 | return tb; |
3616 | } | 3617 | } |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 0f74aba5b237..9e0908d1981a 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
@@ -1243,7 +1243,7 @@ static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev) | |||
1243 | * Free O/S specific resources. | 1243 | * Free O/S specific resources. |
1244 | */ | 1244 | */ |
1245 | if (pdev->irq) | 1245 | if (pdev->irq) |
1246 | free_irq(pdev->irq, np); | 1246 | free_irq(pdev->irq, np->s.host); |
1247 | if (np->s.ioaddr) | 1247 | if (np->s.ioaddr) |
1248 | pci_iounmap(pdev, np->s.ioaddr); | 1248 | pci_iounmap(pdev, np->s.ioaddr); |
1249 | if (np->s.ramaddr) | 1249 | if (np->s.ramaddr) |
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 463f119f20e9..254bdaeb35ff 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
@@ -2791,7 +2791,7 @@ irqreturn_t sym_interrupt(struct Scsi_Host *shost) | |||
2791 | istat = INB(np, nc_istat); | 2791 | istat = INB(np, nc_istat); |
2792 | if (istat & INTF) { | 2792 | if (istat & INTF) { |
2793 | OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem); | 2793 | OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem); |
2794 | istat = INB(np, nc_istat); /* DUMMY READ */ | 2794 | istat |= INB(np, nc_istat); /* DUMMY READ */ |
2795 | if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); | 2795 | if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); |
2796 | sym_wakeup_done(np); | 2796 | sym_wakeup_done(np); |
2797 | } | 2797 | } |
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c index 9c56c492a693..b3128903d673 100644 --- a/drivers/video/ps3fb.c +++ b/drivers/video/ps3fb.c | |||
@@ -51,7 +51,8 @@ | |||
51 | #define L1GPU_DISPLAY_SYNC_HSYNC 1 | 51 | #define L1GPU_DISPLAY_SYNC_HSYNC 1 |
52 | #define L1GPU_DISPLAY_SYNC_VSYNC 2 | 52 | #define L1GPU_DISPLAY_SYNC_VSYNC 2 |
53 | 53 | ||
54 | #define GPU_CMD_BUF_SIZE (64 * 1024) | 54 | #define GPU_CMD_BUF_SIZE (2 * 1024 * 1024) |
55 | #define GPU_FB_START (64 * 1024) | ||
55 | #define GPU_IOIF (0x0d000000UL) | 56 | #define GPU_IOIF (0x0d000000UL) |
56 | #define GPU_ALIGN_UP(x) _ALIGN_UP((x), 64) | 57 | #define GPU_ALIGN_UP(x) _ALIGN_UP((x), 64) |
57 | #define GPU_MAX_LINE_LENGTH (65536 - 64) | 58 | #define GPU_MAX_LINE_LENGTH (65536 - 64) |
@@ -406,6 +407,7 @@ static void ps3fb_sync_image(struct device *dev, u64 frame_offset, | |||
406 | if (src_line_length != dst_line_length) | 407 | if (src_line_length != dst_line_length) |
407 | line_length |= (u64)src_line_length << 32; | 408 | line_length |= (u64)src_line_length << 32; |
408 | 409 | ||
410 | src_offset += GPU_FB_START; | ||
409 | status = lv1_gpu_context_attribute(ps3fb.context_handle, | 411 | status = lv1_gpu_context_attribute(ps3fb.context_handle, |
410 | L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, | 412 | L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, |
411 | dst_offset, GPU_IOIF + src_offset, | 413 | dst_offset, GPU_IOIF + src_offset, |
@@ -976,9 +978,8 @@ static int ps3fb_xdr_settings(u64 xdr_lpar, struct device *dev) | |||
976 | 978 | ||
977 | status = lv1_gpu_context_attribute(ps3fb.context_handle, | 979 | status = lv1_gpu_context_attribute(ps3fb.context_handle, |
978 | L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP, | 980 | L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP, |
979 | xdr_lpar + ps3fb.xdr_size, | 981 | xdr_lpar, GPU_CMD_BUF_SIZE, |
980 | GPU_CMD_BUF_SIZE, | 982 | GPU_IOIF, 0); |
981 | GPU_IOIF + ps3fb.xdr_size, 0); | ||
982 | if (status) { | 983 | if (status) { |
983 | dev_err(dev, | 984 | dev_err(dev, |
984 | "%s: lv1_gpu_context_attribute FB_SETUP failed: %d\n", | 985 | "%s: lv1_gpu_context_attribute FB_SETUP failed: %d\n", |
@@ -1061,6 +1062,11 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev) | |||
1061 | struct task_struct *task; | 1062 | struct task_struct *task; |
1062 | unsigned long max_ps3fb_size; | 1063 | unsigned long max_ps3fb_size; |
1063 | 1064 | ||
1065 | if (ps3fb_videomemory.size < GPU_CMD_BUF_SIZE) { | ||
1066 | dev_err(&dev->core, "%s: Not enough video memory\n", __func__); | ||
1067 | return -ENOMEM; | ||
1068 | } | ||
1069 | |||
1064 | status = ps3_open_hv_device(dev); | 1070 | status = ps3_open_hv_device(dev); |
1065 | if (status) { | 1071 | if (status) { |
1066 | dev_err(&dev->core, "%s: ps3_open_hv_device failed\n", | 1072 | dev_err(&dev->core, "%s: ps3_open_hv_device failed\n", |
@@ -1131,8 +1137,14 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev) | |||
1131 | /* Clear memory to prevent kernel info leakage into userspace */ | 1137 | /* Clear memory to prevent kernel info leakage into userspace */ |
1132 | memset(ps3fb.xdr_ea, 0, ps3fb_videomemory.size); | 1138 | memset(ps3fb.xdr_ea, 0, ps3fb_videomemory.size); |
1133 | 1139 | ||
1134 | /* The GPU command buffer is at the end of video memory */ | 1140 | /* |
1135 | ps3fb.xdr_size = ps3fb_videomemory.size - GPU_CMD_BUF_SIZE; | 1141 | * The GPU command buffer is at the start of video memory |
1142 | * As we don't use the full command buffer, we can put the actual | ||
1143 | * frame buffer at offset GPU_FB_START and save some precious XDR | ||
1144 | * memory | ||
1145 | */ | ||
1146 | ps3fb.xdr_ea += GPU_FB_START; | ||
1147 | ps3fb.xdr_size = ps3fb_videomemory.size - GPU_FB_START; | ||
1136 | 1148 | ||
1137 | retval = ps3fb_xdr_settings(xdr_lpar, &dev->core); | 1149 | retval = ps3fb_xdr_settings(xdr_lpar, &dev->core); |
1138 | if (retval) | 1150 | if (retval) |
@@ -1200,7 +1212,7 @@ err_fb_dealloc: | |||
1200 | err_framebuffer_release: | 1212 | err_framebuffer_release: |
1201 | framebuffer_release(info); | 1213 | framebuffer_release(info); |
1202 | err_free_irq: | 1214 | err_free_irq: |
1203 | free_irq(ps3fb.irq_no, dev); | 1215 | free_irq(ps3fb.irq_no, &dev->core); |
1204 | ps3_irq_plug_destroy(ps3fb.irq_no); | 1216 | ps3_irq_plug_destroy(ps3fb.irq_no); |
1205 | err_iounmap_dinfo: | 1217 | err_iounmap_dinfo: |
1206 | iounmap((u8 __iomem *)ps3fb.dinfo); | 1218 | iounmap((u8 __iomem *)ps3fb.dinfo); |
@@ -1235,7 +1247,7 @@ static int ps3fb_shutdown(struct ps3_system_bus_device *dev) | |||
1235 | kthread_stop(task); | 1247 | kthread_stop(task); |
1236 | } | 1248 | } |
1237 | if (ps3fb.irq_no) { | 1249 | if (ps3fb.irq_no) { |
1238 | free_irq(ps3fb.irq_no, dev); | 1250 | free_irq(ps3fb.irq_no, &dev->core); |
1239 | ps3_irq_plug_destroy(ps3fb.irq_no); | 1251 | ps3_irq_plug_destroy(ps3fb.irq_no); |
1240 | } | 1252 | } |
1241 | iounmap((u8 __iomem *)ps3fb.dinfo); | 1253 | iounmap((u8 __iomem *)ps3fb.dinfo); |
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index e176d195e7e5..7596e1e94cde 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c | |||
@@ -319,7 +319,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
319 | current->mm->free_area_cache = current->mm->mmap_base; | 319 | current->mm->free_area_cache = current->mm->mmap_base; |
320 | current->mm->cached_hole_size = 0; | 320 | current->mm->cached_hole_size = 0; |
321 | 321 | ||
322 | current->mm->mmap = NULL; | ||
323 | compute_creds(bprm); | 322 | compute_creds(bprm); |
324 | current->flags &= ~PF_FORKNOEXEC; | 323 | current->flags &= ~PF_FORKNOEXEC; |
325 | #ifdef __sparc__ | 324 | #ifdef __sparc__ |
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index a977affaebec..a1b9719f5fbb 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h | |||
@@ -124,10 +124,21 @@ clear_bit_unlock (int nr, volatile void *addr) | |||
124 | /** | 124 | /** |
125 | * __clear_bit_unlock - Non-atomically clear a bit with release | 125 | * __clear_bit_unlock - Non-atomically clear a bit with release |
126 | * | 126 | * |
127 | * This is like clear_bit_unlock, but the implementation may use a non-atomic | 127 | * This is like clear_bit_unlock, but the implementation uses a store |
128 | * store (this one uses an atomic, however). | 128 | * with release semantics. See also __raw_spin_unlock(). |
129 | */ | 129 | */ |
130 | #define __clear_bit_unlock clear_bit_unlock | 130 | static __inline__ void |
131 | __clear_bit_unlock(int nr, volatile void *addr) | ||
132 | { | ||
133 | __u32 mask, new; | ||
134 | volatile __u32 *m; | ||
135 | |||
136 | m = (volatile __u32 *)addr + (nr >> 5); | ||
137 | mask = ~(1 << (nr & 31)); | ||
138 | new = *m & mask; | ||
139 | barrier(); | ||
140 | ia64_st4_rel_nta(m, new); | ||
141 | } | ||
131 | 142 | ||
132 | /** | 143 | /** |
133 | * __clear_bit - Clears a bit in memory (non-atomic version) | 144 | * __clear_bit - Clears a bit in memory (non-atomic version) |
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h index 4fb4e439b05c..e58d3298fa10 100644 --- a/include/asm-ia64/gcc_intrin.h +++ b/include/asm-ia64/gcc_intrin.h | |||
@@ -191,6 +191,11 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__; | |||
191 | asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \ | 191 | asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \ |
192 | }) | 192 | }) |
193 | 193 | ||
194 | #define ia64_st4_rel_nta(m, val) \ | ||
195 | ({ \ | ||
196 | asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \ | ||
197 | }) | ||
198 | |||
194 | #define ia64_stfs(x, regnum) \ | 199 | #define ia64_stfs(x, regnum) \ |
195 | ({ \ | 200 | ({ \ |
196 | register double __f__ asm ("f"#regnum); \ | 201 | register double __f__ asm ("f"#regnum); \ |
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index bba5baa3c7fc..7e6e3779670a 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h | |||
@@ -63,7 +63,7 @@ extern int ia64_last_device_vector; | |||
63 | #define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1) | 63 | #define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1) |
64 | 64 | ||
65 | #define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */ | 65 | #define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */ |
66 | #define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */ | 66 | #define IA64_PERFMON_VECTOR 0xee /* performance monitor interrupt vector */ |
67 | #define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */ | 67 | #define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */ |
68 | #define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */ | 68 | #define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */ |
69 | #define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */ | 69 | #define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */ |
diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h index d069b6acddce..a520d103d808 100644 --- a/include/asm-ia64/intel_intrin.h +++ b/include/asm-ia64/intel_intrin.h | |||
@@ -110,6 +110,9 @@ | |||
110 | #define ia64_st4_rel __st4_rel | 110 | #define ia64_st4_rel __st4_rel |
111 | #define ia64_st8_rel __st8_rel | 111 | #define ia64_st8_rel __st8_rel |
112 | 112 | ||
113 | /* FIXME: need st4.rel.nta intrinsic */ | ||
114 | #define ia64_st4_rel_nta __st4_rel | ||
115 | |||
113 | #define ia64_ld1_acq __ld1_acq | 116 | #define ia64_ld1_acq __ld1_acq |
114 | #define ia64_ld2_acq __ld2_acq | 117 | #define ia64_ld2_acq __ld2_acq |
115 | #define ia64_ld4_acq __ld4_acq | 118 | #define ia64_ld4_acq __ld4_acq |
diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h index 5335d87ca5f8..a0d214f43115 100644 --- a/include/asm-ia64/sn/bte.h +++ b/include/asm-ia64/sn/bte.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | 9 | ||
@@ -150,6 +150,35 @@ typedef enum { | |||
150 | BTEFAIL_NOTAVAIL, /* BTE not available */ | 150 | BTEFAIL_NOTAVAIL, /* BTE not available */ |
151 | } bte_result_t; | 151 | } bte_result_t; |
152 | 152 | ||
153 | #define BTEFAIL_SH2_RESP_SHORT 0x1 /* bit 000001 */ | ||
154 | #define BTEFAIL_SH2_RESP_LONG 0x2 /* bit 000010 */ | ||
155 | #define BTEFAIL_SH2_RESP_DSP 0x4 /* bit 000100 */ | ||
156 | #define BTEFAIL_SH2_RESP_ACCESS 0x8 /* bit 001000 */ | ||
157 | #define BTEFAIL_SH2_CRB_TO 0x10 /* bit 010000 */ | ||
158 | #define BTEFAIL_SH2_NACK_LIMIT 0x20 /* bit 100000 */ | ||
159 | #define BTEFAIL_SH2_ALL 0x3F /* bit 111111 */ | ||
160 | |||
161 | #define BTE_ERR_BITS 0x3FUL | ||
162 | #define BTE_ERR_SHIFT 36 | ||
163 | #define BTE_ERR_MASK (BTE_ERR_BITS << BTE_ERR_SHIFT) | ||
164 | |||
165 | #define BTE_ERROR_RETRY(value) \ | ||
166 | (is_shub2() ? (value != BTEFAIL_SH2_CRB_TO) \ | ||
167 | : (value != BTEFAIL_TOUT)) | ||
168 | |||
169 | /* | ||
170 | * On shub1 BTE_ERR_MASK will always be false, so no need for is_shub2() | ||
171 | */ | ||
172 | #define BTE_SHUB2_ERROR(_status) \ | ||
173 | ((_status & BTE_ERR_MASK) \ | ||
174 | ? (((_status >> BTE_ERR_SHIFT) & BTE_ERR_BITS) | IBLS_ERROR) \ | ||
175 | : _status) | ||
176 | |||
177 | #define BTE_GET_ERROR_STATUS(_status) \ | ||
178 | (BTE_SHUB2_ERROR(_status) & ~IBLS_ERROR) | ||
179 | |||
180 | #define BTE_VALID_SH2_ERROR(value) \ | ||
181 | ((value >= BTEFAIL_SH2_RESP_SHORT) && (value <= BTEFAIL_SH2_ALL)) | ||
153 | 182 | ||
154 | /* | 183 | /* |
155 | * Structure defining a bte. An instance of this | 184 | * Structure defining a bte. An instance of this |
diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h index 6f807e0193b7..f7711b308e48 100644 --- a/include/asm-ia64/sn/xp.h +++ b/include/asm-ia64/sn/xp.h | |||
@@ -86,7 +86,7 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) | |||
86 | BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); | 86 | BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); |
87 | 87 | ||
88 | ret = bte_copy(src, pdst, len, mode, notification); | 88 | ret = bte_copy(src, pdst, len, mode, notification); |
89 | if (ret != BTE_SUCCESS) { | 89 | if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) { |
90 | if (!in_interrupt()) { | 90 | if (!in_interrupt()) { |
91 | cond_resched(); | 91 | cond_resched(); |
92 | } | 92 | } |
@@ -244,7 +244,30 @@ enum xpc_retval { | |||
244 | 244 | ||
245 | xpcDisconnected, /* 51: channel disconnected (closed) */ | 245 | xpcDisconnected, /* 51: channel disconnected (closed) */ |
246 | 246 | ||
247 | xpcUnknownReason /* 52: unknown reason -- must be last in list */ | 247 | xpcBteSh2Start, /* 52: BTE CRB timeout */ |
248 | |||
249 | /* 53: 0x1 BTE Error Response Short */ | ||
250 | xpcBteSh2RspShort = xpcBteSh2Start + BTEFAIL_SH2_RESP_SHORT, | ||
251 | |||
252 | /* 54: 0x2 BTE Error Response Long */ | ||
253 | xpcBteSh2RspLong = xpcBteSh2Start + BTEFAIL_SH2_RESP_LONG, | ||
254 | |||
255 | /* 56: 0x4 BTE Error Response DSB */ | ||
256 | xpcBteSh2RspDSB = xpcBteSh2Start + BTEFAIL_SH2_RESP_DSP, | ||
257 | |||
258 | /* 60: 0x8 BTE Error Response Access */ | ||
259 | xpcBteSh2RspAccess = xpcBteSh2Start + BTEFAIL_SH2_RESP_ACCESS, | ||
260 | |||
261 | /* 68: 0x10 BTE Error CRB timeout */ | ||
262 | xpcBteSh2CRBTO = xpcBteSh2Start + BTEFAIL_SH2_CRB_TO, | ||
263 | |||
264 | /* 84: 0x20 BTE Error NACK limit */ | ||
265 | xpcBteSh2NACKLimit = xpcBteSh2Start + BTEFAIL_SH2_NACK_LIMIT, | ||
266 | |||
267 | /* 115: BTE end */ | ||
268 | xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL, | ||
269 | |||
270 | xpcUnknownReason /* 116: unknown reason -- must be last in list */ | ||
248 | }; | 271 | }; |
249 | 272 | ||
250 | 273 | ||
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index e52b8508083b..8e5d7de9c632 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | 9 | ||
@@ -1211,6 +1211,12 @@ xpc_IPI_init(int index) | |||
1211 | static inline enum xpc_retval | 1211 | static inline enum xpc_retval |
1212 | xpc_map_bte_errors(bte_result_t error) | 1212 | xpc_map_bte_errors(bte_result_t error) |
1213 | { | 1213 | { |
1214 | if (is_shub2()) { | ||
1215 | if (BTE_VALID_SH2_ERROR(error)) | ||
1216 | return xpcBteSh2Start + error; | ||
1217 | else | ||
1218 | return xpcBteUnmappedError; | ||
1219 | } | ||
1214 | switch (error) { | 1220 | switch (error) { |
1215 | case BTE_SUCCESS: return xpcSuccess; | 1221 | case BTE_SUCCESS: return xpcSuccess; |
1216 | case BTEFAIL_DIR: return xpcBteDirectoryError; | 1222 | case BTEFAIL_DIR: return xpcBteDirectoryError; |
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index 80bcb0a38e8a..7774a1cac0cc 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h | |||
@@ -92,6 +92,10 @@ void smp_local_flush_tlb(void); | |||
92 | #define smp_local_flush_tlb() | 92 | #define smp_local_flush_tlb() |
93 | #endif | 93 | #endif |
94 | 94 | ||
95 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ | 95 | static inline void flush_tlb_kernel_range(unsigned long start, |
96 | unsigned long end) | ||
97 | { | ||
98 | flush_tlb_all(); /* XXX fix me */ | ||
99 | } | ||
96 | 100 | ||
97 | #endif /* _ASM_IA64_TLBFLUSH_H */ | 101 | #endif /* _ASM_IA64_TLBFLUSH_H */ |
diff --git a/include/asm-x86/kprobes_32.h b/include/asm-x86/kprobes_32.h index b772d5b38685..9fe8f3bddfd5 100644 --- a/include/asm-x86/kprobes_32.h +++ b/include/asm-x86/kprobes_32.h | |||
@@ -73,7 +73,7 @@ struct kprobe_ctlblk { | |||
73 | unsigned long kprobe_status; | 73 | unsigned long kprobe_status; |
74 | unsigned long kprobe_old_eflags; | 74 | unsigned long kprobe_old_eflags; |
75 | unsigned long kprobe_saved_eflags; | 75 | unsigned long kprobe_saved_eflags; |
76 | long *jprobe_saved_esp; | 76 | unsigned long *jprobe_saved_esp; |
77 | struct pt_regs jprobe_saved_regs; | 77 | struct pt_regs jprobe_saved_regs; |
78 | kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; | 78 | kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; |
79 | struct prev_kprobe prev_kprobe; | 79 | struct prev_kprobe prev_kprobe; |
diff --git a/include/asm-x86/kprobes_64.h b/include/asm-x86/kprobes_64.h index 53f4d8507354..743d76218fc9 100644 --- a/include/asm-x86/kprobes_64.h +++ b/include/asm-x86/kprobes_64.h | |||
@@ -66,7 +66,7 @@ struct kprobe_ctlblk { | |||
66 | unsigned long kprobe_status; | 66 | unsigned long kprobe_status; |
67 | unsigned long kprobe_old_rflags; | 67 | unsigned long kprobe_old_rflags; |
68 | unsigned long kprobe_saved_rflags; | 68 | unsigned long kprobe_saved_rflags; |
69 | long *jprobe_saved_rsp; | 69 | unsigned long *jprobe_saved_rsp; |
70 | struct pt_regs jprobe_saved_regs; | 70 | struct pt_regs jprobe_saved_regs; |
71 | kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; | 71 | kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; |
72 | struct prev_kprobe prev_kprobe; | 72 | struct prev_kprobe prev_kprobe; |
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h index 4cb23848d460..6e9e4841a2da 100644 --- a/include/asm-x86/system_64.h +++ b/include/asm-x86/system_64.h | |||
@@ -7,6 +7,13 @@ | |||
7 | 7 | ||
8 | #ifdef __KERNEL__ | 8 | #ifdef __KERNEL__ |
9 | 9 | ||
10 | /* entries in ARCH_DLINFO: */ | ||
11 | #ifdef CONFIG_IA32_EMULATION | ||
12 | # define AT_VECTOR_SIZE_ARCH 2 | ||
13 | #else | ||
14 | # define AT_VECTOR_SIZE_ARCH 1 | ||
15 | #endif | ||
16 | |||
10 | #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" | 17 | #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" |
11 | #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" | 18 | #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" |
12 | 19 | ||
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index b8b7c51389fe..e765e191663d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -115,6 +115,7 @@ struct io_restrictions { | |||
115 | unsigned short max_hw_segments; | 115 | unsigned short max_hw_segments; |
116 | unsigned short hardsect_size; | 116 | unsigned short hardsect_size; |
117 | unsigned int max_segment_size; | 117 | unsigned int max_segment_size; |
118 | unsigned int max_hw_sectors; | ||
118 | unsigned long seg_boundary_mask; | 119 | unsigned long seg_boundary_mask; |
119 | unsigned long bounce_pfn; | 120 | unsigned long bounce_pfn; |
120 | unsigned char no_cluster; /* inverted so that 0 is default */ | 121 | unsigned char no_cluster; /* inverted so that 0 is default */ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index e8f42133a616..639624b55fbe 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -119,7 +119,7 @@ extern void elv_put_request(struct request_queue *, struct request *); | |||
119 | /* | 119 | /* |
120 | * io scheduler registration | 120 | * io scheduler registration |
121 | */ | 121 | */ |
122 | extern int elv_register(struct elevator_type *); | 122 | extern void elv_register(struct elevator_type *); |
123 | extern void elv_unregister(struct elevator_type *); | 123 | extern void elv_unregister(struct elevator_type *); |
124 | 124 | ||
125 | /* | 125 | /* |
diff --git a/include/linux/irq.h b/include/linux/irq.h index efc88538b2ba..4669be080617 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -339,6 +339,13 @@ extern void | |||
339 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 339 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
340 | const char *name); | 340 | const char *name); |
341 | 341 | ||
342 | /* caller has locked the irq_desc and both params are valid */ | ||
343 | static inline void __set_irq_handler_unlocked(int irq, | ||
344 | irq_flow_handler_t handler) | ||
345 | { | ||
346 | irq_desc[irq].handle_irq = handler; | ||
347 | } | ||
348 | |||
342 | /* | 349 | /* |
343 | * Set a highlevel flow handler for a given IRQ: | 350 | * Set a highlevel flow handler for a given IRQ: |
344 | */ | 351 | */ |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 125eee1407ff..7ab962fa1d73 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -118,10 +118,6 @@ struct mmc_host { | |||
118 | unsigned int removed:1; /* host is being removed */ | 118 | unsigned int removed:1; /* host is being removed */ |
119 | #endif | 119 | #endif |
120 | 120 | ||
121 | unsigned int mode; /* current card mode of host */ | ||
122 | #define MMC_MODE_MMC 0 | ||
123 | #define MMC_MODE_SD 1 | ||
124 | |||
125 | struct mmc_card *card; /* device attached to this host */ | 121 | struct mmc_card *card; /* device attached to this host */ |
126 | 122 | ||
127 | wait_queue_head_t wq; | 123 | wait_queue_head_t wq; |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 111aa10f1136..023656d2f1da 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2148,6 +2148,7 @@ | |||
2148 | #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 | 2148 | #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 |
2149 | #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 | 2149 | #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 |
2150 | #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 | 2150 | #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 |
2151 | #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 | ||
2151 | 2152 | ||
2152 | #define PCI_VENDOR_ID_KORENIX 0x1982 | 2153 | #define PCI_VENDOR_ID_KORENIX 0x1982 |
2153 | #define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 | 2154 | #define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 9b5dff6b3f6a..44019ce30a14 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -297,18 +297,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
297 | 297 | ||
298 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 298 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
299 | goto out_unlock; | 299 | goto out_unlock; |
300 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | ||
300 | kstat_cpu(cpu).irqs[irq]++; | 301 | kstat_cpu(cpu).irqs[irq]++; |
301 | 302 | ||
302 | action = desc->action; | 303 | action = desc->action; |
303 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { | 304 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) |
304 | if (desc->chip->mask) | ||
305 | desc->chip->mask(irq); | ||
306 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | ||
307 | desc->status |= IRQ_PENDING; | ||
308 | goto out_unlock; | 305 | goto out_unlock; |
309 | } | ||
310 | 306 | ||
311 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING); | ||
312 | desc->status |= IRQ_INPROGRESS; | 307 | desc->status |= IRQ_INPROGRESS; |
313 | spin_unlock(&desc->lock); | 308 | spin_unlock(&desc->lock); |
314 | 309 | ||
diff --git a/kernel/panic.c b/kernel/panic.c index 6f6e03e91595..da4d6bac270e 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/nmi.h> | 19 | #include <linux/nmi.h> |
20 | #include <linux/kexec.h> | 20 | #include <linux/kexec.h> |
21 | #include <linux/debug_locks.h> | 21 | #include <linux/debug_locks.h> |
22 | #include <linux/random.h> | ||
22 | 23 | ||
23 | int panic_on_oops; | 24 | int panic_on_oops; |
24 | int tainted; | 25 | int tainted; |
@@ -266,12 +267,29 @@ void oops_enter(void) | |||
266 | } | 267 | } |
267 | 268 | ||
268 | /* | 269 | /* |
270 | * 64-bit random ID for oopses: | ||
271 | */ | ||
272 | static u64 oops_id; | ||
273 | |||
274 | static int init_oops_id(void) | ||
275 | { | ||
276 | if (!oops_id) | ||
277 | get_random_bytes(&oops_id, sizeof(oops_id)); | ||
278 | |||
279 | return 0; | ||
280 | } | ||
281 | late_initcall(init_oops_id); | ||
282 | |||
283 | /* | ||
269 | * Called when the architecture exits its oops handler, after printing | 284 | * Called when the architecture exits its oops handler, after printing |
270 | * everything. | 285 | * everything. |
271 | */ | 286 | */ |
272 | void oops_exit(void) | 287 | void oops_exit(void) |
273 | { | 288 | { |
274 | do_oops_enter_exit(); | 289 | do_oops_enter_exit(); |
290 | init_oops_id(); | ||
291 | printk(KERN_WARNING "---[ end trace %016llx ]---\n", | ||
292 | (unsigned long long)oops_id); | ||
275 | } | 293 | } |
276 | 294 | ||
277 | #ifdef CONFIG_CC_STACKPROTECTOR | 295 | #ifdef CONFIG_CC_STACKPROTECTOR |
diff --git a/kernel/rwsem.c b/kernel/rwsem.c index 1ec620c03064..cae050b05f5e 100644 --- a/kernel/rwsem.c +++ b/kernel/rwsem.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include <linux/types.h> | 7 | #include <linux/types.h> |
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/sched.h> | ||
9 | #include <linux/module.h> | 10 | #include <linux/module.h> |
10 | #include <linux/rwsem.h> | 11 | #include <linux/rwsem.h> |
11 | 12 | ||
@@ -15,7 +16,7 @@ | |||
15 | /* | 16 | /* |
16 | * lock for reading | 17 | * lock for reading |
17 | */ | 18 | */ |
18 | void down_read(struct rw_semaphore *sem) | 19 | void __sched down_read(struct rw_semaphore *sem) |
19 | { | 20 | { |
20 | might_sleep(); | 21 | might_sleep(); |
21 | rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); | 22 | rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); |
@@ -42,7 +43,7 @@ EXPORT_SYMBOL(down_read_trylock); | |||
42 | /* | 43 | /* |
43 | * lock for writing | 44 | * lock for writing |
44 | */ | 45 | */ |
45 | void down_write(struct rw_semaphore *sem) | 46 | void __sched down_write(struct rw_semaphore *sem) |
46 | { | 47 | { |
47 | might_sleep(); | 48 | might_sleep(); |
48 | rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); | 49 | rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); |
diff --git a/kernel/sched.c b/kernel/sched.c index c6e551de795b..3df84ea6aba9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -508,10 +508,15 @@ EXPORT_SYMBOL_GPL(cpu_clock); | |||
508 | # define finish_arch_switch(prev) do { } while (0) | 508 | # define finish_arch_switch(prev) do { } while (0) |
509 | #endif | 509 | #endif |
510 | 510 | ||
511 | static inline int task_current(struct rq *rq, struct task_struct *p) | ||
512 | { | ||
513 | return rq->curr == p; | ||
514 | } | ||
515 | |||
511 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW | 516 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW |
512 | static inline int task_running(struct rq *rq, struct task_struct *p) | 517 | static inline int task_running(struct rq *rq, struct task_struct *p) |
513 | { | 518 | { |
514 | return rq->curr == p; | 519 | return task_current(rq, p); |
515 | } | 520 | } |
516 | 521 | ||
517 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | 522 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
@@ -540,7 +545,7 @@ static inline int task_running(struct rq *rq, struct task_struct *p) | |||
540 | #ifdef CONFIG_SMP | 545 | #ifdef CONFIG_SMP |
541 | return p->oncpu; | 546 | return p->oncpu; |
542 | #else | 547 | #else |
543 | return rq->curr == p; | 548 | return task_current(rq, p); |
544 | #endif | 549 | #endif |
545 | } | 550 | } |
546 | 551 | ||
@@ -663,6 +668,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
663 | struct rq *rq = cpu_rq(smp_processor_id()); | 668 | struct rq *rq = cpu_rq(smp_processor_id()); |
664 | u64 now = sched_clock(); | 669 | u64 now = sched_clock(); |
665 | 670 | ||
671 | touch_softlockup_watchdog(); | ||
666 | rq->idle_clock += delta_ns; | 672 | rq->idle_clock += delta_ns; |
667 | /* | 673 | /* |
668 | * Override the previous timestamp and ignore all | 674 | * Override the previous timestamp and ignore all |
@@ -3334,7 +3340,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
3334 | 3340 | ||
3335 | rq = task_rq_lock(p, &flags); | 3341 | rq = task_rq_lock(p, &flags); |
3336 | ns = p->se.sum_exec_runtime; | 3342 | ns = p->se.sum_exec_runtime; |
3337 | if (rq->curr == p) { | 3343 | if (task_current(rq, p)) { |
3338 | update_rq_clock(rq); | 3344 | update_rq_clock(rq); |
3339 | delta_exec = rq->clock - p->se.exec_start; | 3345 | delta_exec = rq->clock - p->se.exec_start; |
3340 | if ((s64)delta_exec > 0) | 3346 | if ((s64)delta_exec > 0) |
@@ -4021,7 +4027,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4021 | 4027 | ||
4022 | oldprio = p->prio; | 4028 | oldprio = p->prio; |
4023 | on_rq = p->se.on_rq; | 4029 | on_rq = p->se.on_rq; |
4024 | running = task_running(rq, p); | 4030 | running = task_current(rq, p); |
4025 | if (on_rq) { | 4031 | if (on_rq) { |
4026 | dequeue_task(rq, p, 0); | 4032 | dequeue_task(rq, p, 0); |
4027 | if (running) | 4033 | if (running) |
@@ -4332,7 +4338,7 @@ recheck: | |||
4332 | } | 4338 | } |
4333 | update_rq_clock(rq); | 4339 | update_rq_clock(rq); |
4334 | on_rq = p->se.on_rq; | 4340 | on_rq = p->se.on_rq; |
4335 | running = task_running(rq, p); | 4341 | running = task_current(rq, p); |
4336 | if (on_rq) { | 4342 | if (on_rq) { |
4337 | deactivate_task(rq, p, 0); | 4343 | deactivate_task(rq, p, 0); |
4338 | if (running) | 4344 | if (running) |
@@ -7101,7 +7107,7 @@ void sched_move_task(struct task_struct *tsk) | |||
7101 | 7107 | ||
7102 | update_rq_clock(rq); | 7108 | update_rq_clock(rq); |
7103 | 7109 | ||
7104 | running = task_running(rq, tsk); | 7110 | running = task_current(rq, tsk); |
7105 | on_rq = tsk->se.on_rq; | 7111 | on_rq = tsk->se.on_rq; |
7106 | 7112 | ||
7107 | if (on_rq) { | 7113 | if (on_rq) { |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c33f0ceb3de9..da7c061e7206 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -511,8 +511,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
511 | 511 | ||
512 | if (!initial) { | 512 | if (!initial) { |
513 | /* sleeps upto a single latency don't count. */ | 513 | /* sleeps upto a single latency don't count. */ |
514 | if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) && | 514 | if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se)) |
515 | task_of(se)->policy != SCHED_BATCH) | ||
516 | vruntime -= sysctl_sched_latency; | 515 | vruntime -= sysctl_sched_latency; |
517 | 516 | ||
518 | /* ensure we never gain time by being placed backwards. */ | 517 | /* ensure we never gain time by being placed backwards. */ |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index ee9c8b6529e9..9ba3daa03475 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -208,6 +208,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
208 | 208 | ||
209 | static void task_tick_rt(struct rq *rq, struct task_struct *p) | 209 | static void task_tick_rt(struct rq *rq, struct task_struct *p) |
210 | { | 210 | { |
211 | update_curr_rt(rq); | ||
212 | |||
211 | /* | 213 | /* |
212 | * RR tasks need a special form of timeslice management. | 214 | * RR tasks need a special form of timeslice management. |
213 | * FIFO tasks have no timeslices. | 215 | * FIFO tasks have no timeslices. |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1135de730872..c68f68dcc605 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -225,10 +225,10 @@ static struct ctl_table root_table[] = { | |||
225 | }; | 225 | }; |
226 | 226 | ||
227 | #ifdef CONFIG_SCHED_DEBUG | 227 | #ifdef CONFIG_SCHED_DEBUG |
228 | static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */ | 228 | static int min_sched_granularity_ns = 100000; /* 100 usecs */ |
229 | static unsigned long max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ | 229 | static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ |
230 | static unsigned long min_wakeup_granularity_ns; /* 0 usecs */ | 230 | static int min_wakeup_granularity_ns; /* 0 usecs */ |
231 | static unsigned long max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ | 231 | static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ |
232 | #endif | 232 | #endif |
233 | 233 | ||
234 | static struct ctl_table kern_table[] = { | 234 | static struct ctl_table kern_table[] = { |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index aa82d7bf478a..5b86698faa0b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -384,45 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | /* | 386 | /* |
387 | * Reprogram the broadcast device: | ||
388 | * | ||
389 | * Called with tick_broadcast_lock held and interrupts disabled. | ||
390 | */ | ||
391 | static int tick_broadcast_reprogram(void) | ||
392 | { | ||
393 | ktime_t expires = { .tv64 = KTIME_MAX }; | ||
394 | struct tick_device *td; | ||
395 | int cpu; | ||
396 | |||
397 | /* | ||
398 | * Find the event which expires next: | ||
399 | */ | ||
400 | for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; | ||
401 | cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { | ||
402 | td = &per_cpu(tick_cpu_device, cpu); | ||
403 | if (td->evtdev->next_event.tv64 < expires.tv64) | ||
404 | expires = td->evtdev->next_event; | ||
405 | } | ||
406 | |||
407 | if (expires.tv64 == KTIME_MAX) | ||
408 | return 0; | ||
409 | |||
410 | return tick_broadcast_set_event(expires, 0); | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Handle oneshot mode broadcasting | 387 | * Handle oneshot mode broadcasting |
415 | */ | 388 | */ |
416 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | 389 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) |
417 | { | 390 | { |
418 | struct tick_device *td; | 391 | struct tick_device *td; |
419 | cpumask_t mask; | 392 | cpumask_t mask; |
420 | ktime_t now; | 393 | ktime_t now, next_event; |
421 | int cpu; | 394 | int cpu; |
422 | 395 | ||
423 | spin_lock(&tick_broadcast_lock); | 396 | spin_lock(&tick_broadcast_lock); |
424 | again: | 397 | again: |
425 | dev->next_event.tv64 = KTIME_MAX; | 398 | dev->next_event.tv64 = KTIME_MAX; |
399 | next_event.tv64 = KTIME_MAX; | ||
426 | mask = CPU_MASK_NONE; | 400 | mask = CPU_MASK_NONE; |
427 | now = ktime_get(); | 401 | now = ktime_get(); |
428 | /* Find all expired events */ | 402 | /* Find all expired events */ |
@@ -431,19 +405,31 @@ again: | |||
431 | td = &per_cpu(tick_cpu_device, cpu); | 405 | td = &per_cpu(tick_cpu_device, cpu); |
432 | if (td->evtdev->next_event.tv64 <= now.tv64) | 406 | if (td->evtdev->next_event.tv64 <= now.tv64) |
433 | cpu_set(cpu, mask); | 407 | cpu_set(cpu, mask); |
408 | else if (td->evtdev->next_event.tv64 < next_event.tv64) | ||
409 | next_event.tv64 = td->evtdev->next_event.tv64; | ||
434 | } | 410 | } |
435 | 411 | ||
436 | /* | 412 | /* |
437 | * Wakeup the cpus which have an expired event. The broadcast | 413 | * Wakeup the cpus which have an expired event. |
438 | * device is reprogrammed in the return from idle code. | 414 | */ |
415 | tick_do_broadcast(mask); | ||
416 | |||
417 | /* | ||
418 | * Two reasons for reprogram: | ||
419 | * | ||
420 | * - The global event did not expire any CPU local | ||
421 | * events. This happens in dyntick mode, as the maximum PIT | ||
422 | * delta is quite small. | ||
423 | * | ||
424 | * - There are pending events on sleeping CPUs which were not | ||
425 | * in the event mask | ||
439 | */ | 426 | */ |
440 | if (!tick_do_broadcast(mask)) { | 427 | if (next_event.tv64 != KTIME_MAX) { |
441 | /* | 428 | /* |
442 | * The global event did not expire any CPU local | 429 | * Rearm the broadcast device. If event expired, |
443 | * events. This happens in dyntick mode, as the | 430 | * repeat the above |
444 | * maximum PIT delta is quite small. | ||
445 | */ | 431 | */ |
446 | if (tick_broadcast_reprogram()) | 432 | if (tick_broadcast_set_event(next_event, 0)) |
447 | goto again; | 433 | goto again; |
448 | } | 434 | } |
449 | spin_unlock(&tick_broadcast_lock); | 435 | spin_unlock(&tick_broadcast_lock); |
diff --git a/kernel/timer.c b/kernel/timer.c index a05817c021d6..d4527dcef1af 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1219,11 +1219,11 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info) | |||
1219 | */ | 1219 | */ |
1220 | static struct lock_class_key base_lock_keys[NR_CPUS]; | 1220 | static struct lock_class_key base_lock_keys[NR_CPUS]; |
1221 | 1221 | ||
1222 | static int __devinit init_timers_cpu(int cpu) | 1222 | static int __cpuinit init_timers_cpu(int cpu) |
1223 | { | 1223 | { |
1224 | int j; | 1224 | int j; |
1225 | tvec_base_t *base; | 1225 | tvec_base_t *base; |
1226 | static char __devinitdata tvec_base_done[NR_CPUS]; | 1226 | static char __cpuinitdata tvec_base_done[NR_CPUS]; |
1227 | 1227 | ||
1228 | if (!tvec_base_done[cpu]) { | 1228 | if (!tvec_base_done[cpu]) { |
1229 | static char boot_done; | 1229 | static char boot_done; |
diff --git a/lib/rwsem.c b/lib/rwsem.c index cdb4e3d05607..7d02700a4b0e 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -146,7 +146,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
146 | /* | 146 | /* |
147 | * wait for a lock to be granted | 147 | * wait for a lock to be granted |
148 | */ | 148 | */ |
149 | static struct rw_semaphore * | 149 | static struct rw_semaphore __sched * |
150 | rwsem_down_failed_common(struct rw_semaphore *sem, | 150 | rwsem_down_failed_common(struct rw_semaphore *sem, |
151 | struct rwsem_waiter *waiter, signed long adjustment) | 151 | struct rwsem_waiter *waiter, signed long adjustment) |
152 | { | 152 | { |
diff --git a/mm/filemap.c b/mm/filemap.c index 188cf5fd3e8d..f4d0cded0e10 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -124,6 +124,18 @@ void __remove_from_page_cache(struct page *page) | |||
124 | mapping->nrpages--; | 124 | mapping->nrpages--; |
125 | __dec_zone_page_state(page, NR_FILE_PAGES); | 125 | __dec_zone_page_state(page, NR_FILE_PAGES); |
126 | BUG_ON(page_mapped(page)); | 126 | BUG_ON(page_mapped(page)); |
127 | |||
128 | /* | ||
129 | * Some filesystems seem to re-dirty the page even after | ||
130 | * the VM has canceled the dirty bit (eg ext3 journaling). | ||
131 | * | ||
132 | * Fix it up by doing a final dirty accounting check after | ||
133 | * having removed the page entirely. | ||
134 | */ | ||
135 | if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { | ||
136 | dec_zone_page_state(page, NR_FILE_DIRTY); | ||
137 | dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | ||
138 | } | ||
127 | } | 139 | } |
128 | 140 | ||
129 | void remove_from_page_cache(struct page *page) | 141 | void remove_from_page_cache(struct page *page) |