diff options
53 files changed, 629 insertions, 310 deletions
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index 43268f15004e..6d422979ebaf 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h | |||
| @@ -142,6 +142,11 @@ static inline const char *eeh_pci_name(struct pci_dev *pdev) | |||
| 142 | return pdev ? pci_name(pdev) : "<null>"; | 142 | return pdev ? pci_name(pdev) : "<null>"; |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | static inline const char *eeh_driver_name(struct pci_dev *pdev) | ||
| 146 | { | ||
| 147 | return (pdev && pdev->driver) ? pdev->driver->name : "<null>"; | ||
| 148 | } | ||
| 149 | |||
| 145 | #endif /* CONFIG_EEH */ | 150 | #endif /* CONFIG_EEH */ |
| 146 | 151 | ||
| 147 | #else /* CONFIG_PCI */ | 152 | #else /* CONFIG_PCI */ |
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 78a205162fd7..84cc7840cd18 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
| @@ -83,8 +83,18 @@ struct pt_regs { | |||
| 83 | 83 | ||
| 84 | #ifndef __ASSEMBLY__ | 84 | #ifndef __ASSEMBLY__ |
| 85 | 85 | ||
| 86 | #define instruction_pointer(regs) ((regs)->nip) | 86 | #define GET_IP(regs) ((regs)->nip) |
| 87 | #define user_stack_pointer(regs) ((regs)->gpr[1]) | 87 | #define GET_USP(regs) ((regs)->gpr[1]) |
| 88 | #define GET_FP(regs) (0) | ||
| 89 | #define SET_FP(regs, val) | ||
| 90 | |||
| 91 | #ifdef CONFIG_SMP | ||
| 92 | extern unsigned long profile_pc(struct pt_regs *regs); | ||
| 93 | #define profile_pc profile_pc | ||
| 94 | #endif | ||
| 95 | |||
| 96 | #include <asm-generic/ptrace.h> | ||
| 97 | |||
| 88 | #define kernel_stack_pointer(regs) ((regs)->gpr[1]) | 98 | #define kernel_stack_pointer(regs) ((regs)->gpr[1]) |
| 89 | static inline int is_syscall_success(struct pt_regs *regs) | 99 | static inline int is_syscall_success(struct pt_regs *regs) |
| 90 | { | 100 | { |
| @@ -99,12 +109,6 @@ static inline long regs_return_value(struct pt_regs *regs) | |||
| 99 | return -regs->gpr[3]; | 109 | return -regs->gpr[3]; |
| 100 | } | 110 | } |
| 101 | 111 | ||
| 102 | #ifdef CONFIG_SMP | ||
| 103 | extern unsigned long profile_pc(struct pt_regs *regs); | ||
| 104 | #else | ||
| 105 | #define profile_pc(regs) instruction_pointer(regs) | ||
| 106 | #endif | ||
| 107 | |||
| 108 | #ifdef __powerpc64__ | 112 | #ifdef __powerpc64__ |
| 109 | #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) | 113 | #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) |
| 110 | #else | 114 | #else |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 701d4aceb4f4..01e2877e8e04 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
| @@ -118,10 +118,14 @@ static inline notrace void set_soft_enabled(unsigned long enable) | |||
| 118 | static inline notrace void decrementer_check_overflow(void) | 118 | static inline notrace void decrementer_check_overflow(void) |
| 119 | { | 119 | { |
| 120 | u64 now = get_tb_or_rtc(); | 120 | u64 now = get_tb_or_rtc(); |
| 121 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 121 | u64 *next_tb; |
| 122 | |||
| 123 | preempt_disable(); | ||
| 124 | next_tb = &__get_cpu_var(decrementers_next_tb); | ||
| 122 | 125 | ||
| 123 | if (now >= *next_tb) | 126 | if (now >= *next_tb) |
| 124 | set_dec(1); | 127 | set_dec(1); |
| 128 | preempt_enable(); | ||
| 125 | } | 129 | } |
| 126 | 130 | ||
| 127 | notrace void arch_local_irq_restore(unsigned long en) | 131 | notrace void arch_local_irq_restore(unsigned long en) |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 517b1d8f455b..9f843cdfee9e 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
| @@ -716,7 +716,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w | |||
| 716 | int cpu; | 716 | int cpu; |
| 717 | 717 | ||
| 718 | slb_set_size(SLB_MIN_SIZE); | 718 | slb_set_size(SLB_MIN_SIZE); |
| 719 | stop_topology_update(); | ||
| 720 | printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); | 719 | printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); |
| 721 | 720 | ||
| 722 | while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && | 721 | while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && |
| @@ -732,7 +731,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w | |||
| 732 | rc = atomic_read(&data->error); | 731 | rc = atomic_read(&data->error); |
| 733 | 732 | ||
| 734 | atomic_set(&data->error, rc); | 733 | atomic_set(&data->error, rc); |
| 735 | start_topology_update(); | ||
| 736 | pSeries_coalesce_init(); | 734 | pSeries_coalesce_init(); |
| 737 | 735 | ||
| 738 | if (wake_when_done) { | 736 | if (wake_when_done) { |
| @@ -846,6 +844,7 @@ int rtas_ibm_suspend_me(struct rtas_args *args) | |||
| 846 | atomic_set(&data.error, 0); | 844 | atomic_set(&data.error, 0); |
| 847 | data.token = rtas_token("ibm,suspend-me"); | 845 | data.token = rtas_token("ibm,suspend-me"); |
| 848 | data.complete = &done; | 846 | data.complete = &done; |
| 847 | stop_topology_update(); | ||
| 849 | 848 | ||
| 850 | /* Call function on all CPUs. One of us will make the | 849 | /* Call function on all CPUs. One of us will make the |
| 851 | * rtas call | 850 | * rtas call |
| @@ -858,6 +857,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args) | |||
| 858 | if (atomic_read(&data.error) != 0) | 857 | if (atomic_read(&data.error) != 0) |
| 859 | printk(KERN_ERR "Error doing global join\n"); | 858 | printk(KERN_ERR "Error doing global join\n"); |
| 860 | 859 | ||
| 860 | start_topology_update(); | ||
| 861 | |||
| 861 | return atomic_read(&data.error); | 862 | return atomic_read(&data.error); |
| 862 | } | 863 | } |
| 863 | #else /* CONFIG_PPC_PSERIES */ | 864 | #else /* CONFIG_PPC_PSERIES */ |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index a70bc1e385eb..f92b9ef7340e 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
| @@ -52,32 +52,38 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type) | |||
| 52 | 52 | ||
| 53 | static unsigned int pnv_get_one_msi(struct pnv_phb *phb) | 53 | static unsigned int pnv_get_one_msi(struct pnv_phb *phb) |
| 54 | { | 54 | { |
| 55 | unsigned int id; | 55 | unsigned long flags; |
| 56 | unsigned int id, rc; | ||
| 57 | |||
| 58 | spin_lock_irqsave(&phb->lock, flags); | ||
| 56 | 59 | ||
| 57 | spin_lock(&phb->lock); | ||
| 58 | id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next); | 60 | id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next); |
| 59 | if (id >= phb->msi_count && phb->msi_next) | 61 | if (id >= phb->msi_count && phb->msi_next) |
| 60 | id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0); | 62 | id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0); |
| 61 | if (id >= phb->msi_count) { | 63 | if (id >= phb->msi_count) { |
| 62 | spin_unlock(&phb->lock); | 64 | rc = 0; |
| 63 | return 0; | 65 | goto out; |
| 64 | } | 66 | } |
| 65 | __set_bit(id, phb->msi_map); | 67 | __set_bit(id, phb->msi_map); |
| 66 | spin_unlock(&phb->lock); | 68 | rc = id + phb->msi_base; |
| 67 | return id + phb->msi_base; | 69 | out: |
| 70 | spin_unlock_irqrestore(&phb->lock, flags); | ||
| 71 | return rc; | ||
| 68 | } | 72 | } |
| 69 | 73 | ||
| 70 | static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq) | 74 | static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq) |
| 71 | { | 75 | { |
| 76 | unsigned long flags; | ||
| 72 | unsigned int id; | 77 | unsigned int id; |
| 73 | 78 | ||
| 74 | if (WARN_ON(hwirq < phb->msi_base || | 79 | if (WARN_ON(hwirq < phb->msi_base || |
| 75 | hwirq >= (phb->msi_base + phb->msi_count))) | 80 | hwirq >= (phb->msi_base + phb->msi_count))) |
| 76 | return; | 81 | return; |
| 77 | id = hwirq - phb->msi_base; | 82 | id = hwirq - phb->msi_base; |
| 78 | spin_lock(&phb->lock); | 83 | |
| 84 | spin_lock_irqsave(&phb->lock, flags); | ||
| 79 | __clear_bit(id, phb->msi_map); | 85 | __clear_bit(id, phb->msi_map); |
| 80 | spin_unlock(&phb->lock); | 86 | spin_unlock_irqrestore(&phb->lock, flags); |
| 81 | } | 87 | } |
| 82 | 88 | ||
| 83 | static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | 89 | static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) |
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 565869022e3d..c0b40af4ce4f 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
| @@ -551,9 +551,9 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | |||
| 551 | printk (KERN_ERR "EEH: %d reads ignored for recovering device at " | 551 | printk (KERN_ERR "EEH: %d reads ignored for recovering device at " |
| 552 | "location=%s driver=%s pci addr=%s\n", | 552 | "location=%s driver=%s pci addr=%s\n", |
| 553 | pdn->eeh_check_count, location, | 553 | pdn->eeh_check_count, location, |
| 554 | dev->driver->name, eeh_pci_name(dev)); | 554 | eeh_driver_name(dev), eeh_pci_name(dev)); |
| 555 | printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n", | 555 | printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n", |
| 556 | dev->driver->name); | 556 | eeh_driver_name(dev)); |
| 557 | dump_stack(); | 557 | dump_stack(); |
| 558 | } | 558 | } |
| 559 | goto dn_unlock; | 559 | goto dn_unlock; |
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index b84a8b2238dd..47226e04126d 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <asm/machdep.h> | 24 | #include <asm/machdep.h> |
| 25 | #include <asm/mmu.h> | 25 | #include <asm/mmu.h> |
| 26 | #include <asm/rtas.h> | 26 | #include <asm/rtas.h> |
| 27 | #include <asm/topology.h> | ||
| 27 | 28 | ||
| 28 | static u64 stream_id; | 29 | static u64 stream_id; |
| 29 | static struct device suspend_dev; | 30 | static struct device suspend_dev; |
| @@ -138,8 +139,11 @@ static ssize_t store_hibernate(struct device *dev, | |||
| 138 | ssleep(1); | 139 | ssleep(1); |
| 139 | } while (rc == -EAGAIN); | 140 | } while (rc == -EAGAIN); |
| 140 | 141 | ||
| 141 | if (!rc) | 142 | if (!rc) { |
| 143 | stop_topology_update(); | ||
| 142 | rc = pm_suspend(PM_SUSPEND_MEM); | 144 | rc = pm_suspend(PM_SUSPEND_MEM); |
| 145 | start_topology_update(); | ||
| 146 | } | ||
| 143 | 147 | ||
| 144 | stream_id = 0; | 148 | stream_id = 0; |
| 145 | 149 | ||
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c index 576874392543..97fe82ee8633 100644 --- a/arch/powerpc/platforms/wsp/ics.c +++ b/arch/powerpc/platforms/wsp/ics.c | |||
| @@ -346,7 +346,7 @@ static int wsp_chip_set_affinity(struct irq_data *d, | |||
| 346 | * For the moment only implement delivery to all cpus or one cpu. | 346 | * For the moment only implement delivery to all cpus or one cpu. |
| 347 | * Get current irq_server for the given irq | 347 | * Get current irq_server for the given irq |
| 348 | */ | 348 | */ |
| 349 | ret = cache_hwirq_map(ics, d->irq, cpumask); | 349 | ret = cache_hwirq_map(ics, hw_irq, cpumask); |
| 350 | if (ret == -1) { | 350 | if (ret == -1) { |
| 351 | char cpulist[128]; | 351 | char cpulist[128]; |
| 352 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | 352 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); |
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c index e0262cd0e2d3..d24b3acf858e 100644 --- a/arch/powerpc/platforms/wsp/wsp_pci.c +++ b/arch/powerpc/platforms/wsp/wsp_pci.c | |||
| @@ -468,15 +468,15 @@ static void __init wsp_pcie_configure_hw(struct pci_controller *hose) | |||
| 468 | #define DUMP_REG(x) \ | 468 | #define DUMP_REG(x) \ |
| 469 | pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x)) | 469 | pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x)) |
| 470 | 470 | ||
| 471 | #ifdef CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS | 471 | /* |
| 472 | /* WSP DD1 has a bogus class code by default in the PCI-E | 472 | * Some WSP variants has a bogus class code by default in the PCI-E |
| 473 | * root complex's built-in P2P bridge */ | 473 | * root complex's built-in P2P bridge |
| 474 | */ | ||
| 474 | val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1); | 475 | val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1); |
| 475 | pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val); | 476 | pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val); |
| 476 | out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1, | 477 | out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1, |
| 477 | (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8)); | 478 | (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8)); |
| 478 | pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1)); | 479 | pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1)); |
| 479 | #endif /* CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS */ | ||
| 480 | 480 | ||
| 481 | #ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS | 481 | #ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS |
| 482 | /* XXX Disable TCE caching, it doesn't work on DD1 */ | 482 | /* XXX Disable TCE caching, it doesn't work on DD1 */ |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index a29571821b99..a850b4d8d14d 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
| @@ -29,8 +29,8 @@ extern unsigned int sig_xstate_size; | |||
| 29 | extern void fpu_init(void); | 29 | extern void fpu_init(void); |
| 30 | extern void mxcsr_feature_mask_init(void); | 30 | extern void mxcsr_feature_mask_init(void); |
| 31 | extern int init_fpu(struct task_struct *child); | 31 | extern int init_fpu(struct task_struct *child); |
| 32 | extern void __math_state_restore(struct task_struct *); | ||
| 32 | extern void math_state_restore(void); | 33 | extern void math_state_restore(void); |
| 33 | extern void __math_state_restore(void); | ||
| 34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
| 35 | 35 | ||
| 36 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 36 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
| @@ -212,19 +212,11 @@ static inline void fpu_fxsave(struct fpu *fpu) | |||
| 212 | 212 | ||
| 213 | #endif /* CONFIG_X86_64 */ | 213 | #endif /* CONFIG_X86_64 */ |
| 214 | 214 | ||
| 215 | /* We need a safe address that is cheap to find and that is already | ||
| 216 | in L1 during context switch. The best choices are unfortunately | ||
| 217 | different for UP and SMP */ | ||
| 218 | #ifdef CONFIG_SMP | ||
| 219 | #define safe_address (__per_cpu_offset[0]) | ||
| 220 | #else | ||
| 221 | #define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER]) | ||
| 222 | #endif | ||
| 223 | |||
| 224 | /* | 215 | /* |
| 225 | * These must be called with preempt disabled | 216 | * These must be called with preempt disabled. Returns |
| 217 | * 'true' if the FPU state is still intact. | ||
| 226 | */ | 218 | */ |
| 227 | static inline void fpu_save_init(struct fpu *fpu) | 219 | static inline int fpu_save_init(struct fpu *fpu) |
| 228 | { | 220 | { |
| 229 | if (use_xsave()) { | 221 | if (use_xsave()) { |
| 230 | fpu_xsave(fpu); | 222 | fpu_xsave(fpu); |
| @@ -233,33 +225,33 @@ static inline void fpu_save_init(struct fpu *fpu) | |||
| 233 | * xsave header may indicate the init state of the FP. | 225 | * xsave header may indicate the init state of the FP. |
| 234 | */ | 226 | */ |
| 235 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) | 227 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) |
| 236 | return; | 228 | return 1; |
| 237 | } else if (use_fxsr()) { | 229 | } else if (use_fxsr()) { |
| 238 | fpu_fxsave(fpu); | 230 | fpu_fxsave(fpu); |
| 239 | } else { | 231 | } else { |
| 240 | asm volatile("fnsave %[fx]; fwait" | 232 | asm volatile("fnsave %[fx]; fwait" |
| 241 | : [fx] "=m" (fpu->state->fsave)); | 233 | : [fx] "=m" (fpu->state->fsave)); |
| 242 | return; | 234 | return 0; |
| 243 | } | 235 | } |
| 244 | 236 | ||
| 245 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) | 237 | /* |
| 238 | * If exceptions are pending, we need to clear them so | ||
| 239 | * that we don't randomly get exceptions later. | ||
| 240 | * | ||
| 241 | * FIXME! Is this perhaps only true for the old-style | ||
| 242 | * irq13 case? Maybe we could leave the x87 state | ||
| 243 | * intact otherwise? | ||
| 244 | */ | ||
| 245 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { | ||
| 246 | asm volatile("fnclex"); | 246 | asm volatile("fnclex"); |
| 247 | 247 | return 0; | |
| 248 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 248 | } |
| 249 | is pending. Clear the x87 state here by setting it to fixed | 249 | return 1; |
| 250 | values. safe_address is a random variable that should be in L1 */ | ||
| 251 | alternative_input( | ||
| 252 | ASM_NOP8 ASM_NOP2, | ||
| 253 | "emms\n\t" /* clear stack tags */ | ||
| 254 | "fildl %P[addr]", /* set F?P to defined value */ | ||
| 255 | X86_FEATURE_FXSAVE_LEAK, | ||
| 256 | [addr] "m" (safe_address)); | ||
| 257 | } | 250 | } |
| 258 | 251 | ||
| 259 | static inline void __save_init_fpu(struct task_struct *tsk) | 252 | static inline int __save_init_fpu(struct task_struct *tsk) |
| 260 | { | 253 | { |
| 261 | fpu_save_init(&tsk->thread.fpu); | 254 | return fpu_save_init(&tsk->thread.fpu); |
| 262 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
| 263 | } | 255 | } |
| 264 | 256 | ||
| 265 | static inline int fpu_fxrstor_checking(struct fpu *fpu) | 257 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
| @@ -281,29 +273,128 @@ static inline int restore_fpu_checking(struct task_struct *tsk) | |||
| 281 | } | 273 | } |
| 282 | 274 | ||
| 283 | /* | 275 | /* |
| 284 | * Signal frame handlers... | 276 | * Software FPU state helpers. Careful: these need to |
| 277 | * be preemption protection *and* they need to be | ||
| 278 | * properly paired with the CR0.TS changes! | ||
| 285 | */ | 279 | */ |
| 286 | extern int save_i387_xstate(void __user *buf); | 280 | static inline int __thread_has_fpu(struct task_struct *tsk) |
| 287 | extern int restore_i387_xstate(void __user *buf); | 281 | { |
| 282 | return tsk->thread.has_fpu; | ||
| 283 | } | ||
| 288 | 284 | ||
| 289 | static inline void __unlazy_fpu(struct task_struct *tsk) | 285 | /* Must be paired with an 'stts' after! */ |
| 286 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) | ||
| 290 | { | 287 | { |
| 291 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 288 | tsk->thread.has_fpu = 0; |
| 292 | __save_init_fpu(tsk); | 289 | } |
| 293 | stts(); | 290 | |
| 294 | } else | 291 | /* Must be paired with a 'clts' before! */ |
| 295 | tsk->fpu_counter = 0; | 292 | static inline void __thread_set_has_fpu(struct task_struct *tsk) |
| 293 | { | ||
| 294 | tsk->thread.has_fpu = 1; | ||
| 295 | } | ||
| 296 | |||
| 297 | /* | ||
| 298 | * Encapsulate the CR0.TS handling together with the | ||
| 299 | * software flag. | ||
| 300 | * | ||
| 301 | * These generally need preemption protection to work, | ||
| 302 | * do try to avoid using these on their own. | ||
| 303 | */ | ||
| 304 | static inline void __thread_fpu_end(struct task_struct *tsk) | ||
| 305 | { | ||
| 306 | __thread_clear_has_fpu(tsk); | ||
| 307 | stts(); | ||
| 308 | } | ||
| 309 | |||
| 310 | static inline void __thread_fpu_begin(struct task_struct *tsk) | ||
| 311 | { | ||
| 312 | clts(); | ||
| 313 | __thread_set_has_fpu(tsk); | ||
| 314 | } | ||
| 315 | |||
| 316 | /* | ||
| 317 | * FPU state switching for scheduling. | ||
| 318 | * | ||
| 319 | * This is a two-stage process: | ||
| 320 | * | ||
| 321 | * - switch_fpu_prepare() saves the old state and | ||
| 322 | * sets the new state of the CR0.TS bit. This is | ||
| 323 | * done within the context of the old process. | ||
| 324 | * | ||
| 325 | * - switch_fpu_finish() restores the new state as | ||
| 326 | * necessary. | ||
| 327 | */ | ||
| 328 | typedef struct { int preload; } fpu_switch_t; | ||
| 329 | |||
| 330 | /* | ||
| 331 | * FIXME! We could do a totally lazy restore, but we need to | ||
| 332 | * add a per-cpu "this was the task that last touched the FPU | ||
| 333 | * on this CPU" variable, and the task needs to have a "I last | ||
| 334 | * touched the FPU on this CPU" and check them. | ||
| 335 | * | ||
| 336 | * We don't do that yet, so "fpu_lazy_restore()" always returns | ||
| 337 | * false, but some day.. | ||
| 338 | */ | ||
| 339 | #define fpu_lazy_restore(tsk) (0) | ||
| 340 | #define fpu_lazy_state_intact(tsk) do { } while (0) | ||
| 341 | |||
| 342 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new) | ||
| 343 | { | ||
| 344 | fpu_switch_t fpu; | ||
| 345 | |||
| 346 | fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; | ||
| 347 | if (__thread_has_fpu(old)) { | ||
| 348 | if (__save_init_fpu(old)) | ||
| 349 | fpu_lazy_state_intact(old); | ||
| 350 | __thread_clear_has_fpu(old); | ||
| 351 | old->fpu_counter++; | ||
| 352 | |||
| 353 | /* Don't change CR0.TS if we just switch! */ | ||
| 354 | if (fpu.preload) { | ||
| 355 | __thread_set_has_fpu(new); | ||
| 356 | prefetch(new->thread.fpu.state); | ||
| 357 | } else | ||
| 358 | stts(); | ||
| 359 | } else { | ||
| 360 | old->fpu_counter = 0; | ||
| 361 | if (fpu.preload) { | ||
| 362 | if (fpu_lazy_restore(new)) | ||
| 363 | fpu.preload = 0; | ||
| 364 | else | ||
| 365 | prefetch(new->thread.fpu.state); | ||
| 366 | __thread_fpu_begin(new); | ||
| 367 | } | ||
| 368 | } | ||
| 369 | return fpu; | ||
| 296 | } | 370 | } |
| 297 | 371 | ||
| 372 | /* | ||
| 373 | * By the time this gets called, we've already cleared CR0.TS and | ||
| 374 | * given the process the FPU if we are going to preload the FPU | ||
| 375 | * state - all we need to do is to conditionally restore the register | ||
| 376 | * state itself. | ||
| 377 | */ | ||
| 378 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | ||
| 379 | { | ||
| 380 | if (fpu.preload) | ||
| 381 | __math_state_restore(new); | ||
| 382 | } | ||
| 383 | |||
| 384 | /* | ||
| 385 | * Signal frame handlers... | ||
| 386 | */ | ||
| 387 | extern int save_i387_xstate(void __user *buf); | ||
| 388 | extern int restore_i387_xstate(void __user *buf); | ||
| 389 | |||
| 298 | static inline void __clear_fpu(struct task_struct *tsk) | 390 | static inline void __clear_fpu(struct task_struct *tsk) |
| 299 | { | 391 | { |
| 300 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 392 | if (__thread_has_fpu(tsk)) { |
| 301 | /* Ignore delayed exceptions from user space */ | 393 | /* Ignore delayed exceptions from user space */ |
| 302 | asm volatile("1: fwait\n" | 394 | asm volatile("1: fwait\n" |
| 303 | "2:\n" | 395 | "2:\n" |
| 304 | _ASM_EXTABLE(1b, 2b)); | 396 | _ASM_EXTABLE(1b, 2b)); |
| 305 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 397 | __thread_fpu_end(tsk); |
| 306 | stts(); | ||
| 307 | } | 398 | } |
| 308 | } | 399 | } |
| 309 | 400 | ||
| @@ -311,14 +402,14 @@ static inline void __clear_fpu(struct task_struct *tsk) | |||
| 311 | * Were we in an interrupt that interrupted kernel mode? | 402 | * Were we in an interrupt that interrupted kernel mode? |
| 312 | * | 403 | * |
| 313 | * We can do a kernel_fpu_begin/end() pair *ONLY* if that | 404 | * We can do a kernel_fpu_begin/end() pair *ONLY* if that |
| 314 | * pair does nothing at all: TS_USEDFPU must be clear (so | 405 | * pair does nothing at all: the thread must not have fpu (so |
| 315 | * that we don't try to save the FPU state), and TS must | 406 | * that we don't try to save the FPU state), and TS must |
| 316 | * be set (so that the clts/stts pair does nothing that is | 407 | * be set (so that the clts/stts pair does nothing that is |
| 317 | * visible in the interrupted kernel thread). | 408 | * visible in the interrupted kernel thread). |
| 318 | */ | 409 | */ |
| 319 | static inline bool interrupted_kernel_fpu_idle(void) | 410 | static inline bool interrupted_kernel_fpu_idle(void) |
| 320 | { | 411 | { |
| 321 | return !(current_thread_info()->status & TS_USEDFPU) && | 412 | return !__thread_has_fpu(current) && |
| 322 | (read_cr0() & X86_CR0_TS); | 413 | (read_cr0() & X86_CR0_TS); |
| 323 | } | 414 | } |
| 324 | 415 | ||
| @@ -352,13 +443,15 @@ static inline bool irq_fpu_usable(void) | |||
| 352 | 443 | ||
| 353 | static inline void kernel_fpu_begin(void) | 444 | static inline void kernel_fpu_begin(void) |
| 354 | { | 445 | { |
| 355 | struct thread_info *me = current_thread_info(); | 446 | struct task_struct *me = current; |
| 356 | 447 | ||
| 357 | WARN_ON_ONCE(!irq_fpu_usable()); | 448 | WARN_ON_ONCE(!irq_fpu_usable()); |
| 358 | preempt_disable(); | 449 | preempt_disable(); |
| 359 | if (me->status & TS_USEDFPU) | 450 | if (__thread_has_fpu(me)) { |
| 360 | __save_init_fpu(me->task); | 451 | __save_init_fpu(me); |
| 361 | else | 452 | __thread_clear_has_fpu(me); |
| 453 | /* We do 'stts()' in kernel_fpu_end() */ | ||
| 454 | } else | ||
| 362 | clts(); | 455 | clts(); |
| 363 | } | 456 | } |
| 364 | 457 | ||
| @@ -400,21 +493,64 @@ static inline void irq_ts_restore(int TS_state) | |||
| 400 | } | 493 | } |
| 401 | 494 | ||
| 402 | /* | 495 | /* |
| 496 | * The question "does this thread have fpu access?" | ||
| 497 | * is slightly racy, since preemption could come in | ||
| 498 | * and revoke it immediately after the test. | ||
| 499 | * | ||
| 500 | * However, even in that very unlikely scenario, | ||
| 501 | * we can just assume we have FPU access - typically | ||
| 502 | * to save the FP state - we'll just take a #NM | ||
| 503 | * fault and get the FPU access back. | ||
| 504 | * | ||
| 505 | * The actual user_fpu_begin/end() functions | ||
| 506 | * need to be preemption-safe, though. | ||
| 507 | * | ||
| 508 | * NOTE! user_fpu_end() must be used only after you | ||
| 509 | * have saved the FP state, and user_fpu_begin() must | ||
| 510 | * be used only immediately before restoring it. | ||
| 511 | * These functions do not do any save/restore on | ||
| 512 | * their own. | ||
| 513 | */ | ||
| 514 | static inline int user_has_fpu(void) | ||
| 515 | { | ||
| 516 | return __thread_has_fpu(current); | ||
| 517 | } | ||
| 518 | |||
| 519 | static inline void user_fpu_end(void) | ||
| 520 | { | ||
| 521 | preempt_disable(); | ||
| 522 | __thread_fpu_end(current); | ||
| 523 | preempt_enable(); | ||
| 524 | } | ||
| 525 | |||
| 526 | static inline void user_fpu_begin(void) | ||
| 527 | { | ||
| 528 | preempt_disable(); | ||
| 529 | if (!user_has_fpu()) | ||
| 530 | __thread_fpu_begin(current); | ||
| 531 | preempt_enable(); | ||
| 532 | } | ||
| 533 | |||
| 534 | /* | ||
| 403 | * These disable preemption on their own and are safe | 535 | * These disable preemption on their own and are safe |
| 404 | */ | 536 | */ |
| 405 | static inline void save_init_fpu(struct task_struct *tsk) | 537 | static inline void save_init_fpu(struct task_struct *tsk) |
| 406 | { | 538 | { |
| 407 | WARN_ON_ONCE(task_thread_info(tsk)->status & TS_USEDFPU); | 539 | WARN_ON_ONCE(!__thread_has_fpu(tsk)); |
| 408 | preempt_disable(); | 540 | preempt_disable(); |
| 409 | __save_init_fpu(tsk); | 541 | __save_init_fpu(tsk); |
| 410 | stts(); | 542 | __thread_fpu_end(tsk); |
| 411 | preempt_enable(); | 543 | preempt_enable(); |
| 412 | } | 544 | } |
| 413 | 545 | ||
| 414 | static inline void unlazy_fpu(struct task_struct *tsk) | 546 | static inline void unlazy_fpu(struct task_struct *tsk) |
| 415 | { | 547 | { |
| 416 | preempt_disable(); | 548 | preempt_disable(); |
| 417 | __unlazy_fpu(tsk); | 549 | if (__thread_has_fpu(tsk)) { |
| 550 | __save_init_fpu(tsk); | ||
| 551 | __thread_fpu_end(tsk); | ||
| 552 | } else | ||
| 553 | tsk->fpu_counter = 0; | ||
| 418 | preempt_enable(); | 554 | preempt_enable(); |
| 419 | } | 555 | } |
| 420 | 556 | ||
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index aa9088c26931..f7c89e231c6c 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
| @@ -454,6 +454,7 @@ struct thread_struct { | |||
| 454 | unsigned long trap_no; | 454 | unsigned long trap_no; |
| 455 | unsigned long error_code; | 455 | unsigned long error_code; |
| 456 | /* floating point and extended processor state */ | 456 | /* floating point and extended processor state */ |
| 457 | unsigned long has_fpu; | ||
| 457 | struct fpu fpu; | 458 | struct fpu fpu; |
| 458 | #ifdef CONFIG_X86_32 | 459 | #ifdef CONFIG_X86_32 |
| 459 | /* Virtual 86 mode info */ | 460 | /* Virtual 86 mode info */ |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index bc817cd8b443..cfd8144d5527 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
| @@ -247,8 +247,6 @@ static inline struct thread_info *current_thread_info(void) | |||
| 247 | * ever touches our thread-synchronous status, so we don't | 247 | * ever touches our thread-synchronous status, so we don't |
| 248 | * have to worry about atomic accesses. | 248 | * have to worry about atomic accesses. |
| 249 | */ | 249 | */ |
| 250 | #define TS_USEDFPU 0x0001 /* FPU was used by this task | ||
| 251 | this quantum (SMP) */ | ||
| 252 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ | 250 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ |
| 253 | #define TS_POLLING 0x0004 /* idle task polling need_resched, | 251 | #define TS_POLLING 0x0004 /* idle task polling need_resched, |
| 254 | skip sending interrupt */ | 252 | skip sending interrupt */ |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 485204f58cda..80bfe1ab0031 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
| @@ -299,22 +299,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 299 | *next = &next_p->thread; | 299 | *next = &next_p->thread; |
| 300 | int cpu = smp_processor_id(); | 300 | int cpu = smp_processor_id(); |
| 301 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 301 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
| 302 | bool preload_fpu; | 302 | fpu_switch_t fpu; |
| 303 | 303 | ||
| 304 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 304 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
| 305 | 305 | ||
| 306 | /* | 306 | fpu = switch_fpu_prepare(prev_p, next_p); |
| 307 | * If the task has used fpu the last 5 timeslices, just do a full | ||
| 308 | * restore of the math state immediately to avoid the trap; the | ||
| 309 | * chances of needing FPU soon are obviously high now | ||
| 310 | */ | ||
| 311 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
| 312 | |||
| 313 | __unlazy_fpu(prev_p); | ||
| 314 | |||
| 315 | /* we're going to use this soon, after a few expensive things */ | ||
| 316 | if (preload_fpu) | ||
| 317 | prefetch(next->fpu.state); | ||
| 318 | 307 | ||
| 319 | /* | 308 | /* |
| 320 | * Reload esp0. | 309 | * Reload esp0. |
| @@ -354,11 +343,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 354 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | 343 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
| 355 | __switch_to_xtra(prev_p, next_p, tss); | 344 | __switch_to_xtra(prev_p, next_p, tss); |
| 356 | 345 | ||
| 357 | /* If we're going to preload the fpu context, make sure clts | ||
| 358 | is run while we're batching the cpu state updates. */ | ||
| 359 | if (preload_fpu) | ||
| 360 | clts(); | ||
| 361 | |||
| 362 | /* | 346 | /* |
| 363 | * Leave lazy mode, flushing any hypercalls made here. | 347 | * Leave lazy mode, flushing any hypercalls made here. |
| 364 | * This must be done before restoring TLS segments so | 348 | * This must be done before restoring TLS segments so |
| @@ -368,15 +352,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 368 | */ | 352 | */ |
| 369 | arch_end_context_switch(next_p); | 353 | arch_end_context_switch(next_p); |
| 370 | 354 | ||
| 371 | if (preload_fpu) | ||
| 372 | __math_state_restore(); | ||
| 373 | |||
| 374 | /* | 355 | /* |
| 375 | * Restore %gs if needed (which is common) | 356 | * Restore %gs if needed (which is common) |
| 376 | */ | 357 | */ |
| 377 | if (prev->gs | next->gs) | 358 | if (prev->gs | next->gs) |
| 378 | lazy_load_gs(next->gs); | 359 | lazy_load_gs(next->gs); |
| 379 | 360 | ||
| 361 | switch_fpu_finish(next_p, fpu); | ||
| 362 | |||
| 380 | percpu_write(current_task, next_p); | 363 | percpu_write(current_task, next_p); |
| 381 | 364 | ||
| 382 | return prev_p; | 365 | return prev_p; |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9b9fe4a85c87..1fd94bc4279d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
| @@ -386,18 +386,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 386 | int cpu = smp_processor_id(); | 386 | int cpu = smp_processor_id(); |
| 387 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 387 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
| 388 | unsigned fsindex, gsindex; | 388 | unsigned fsindex, gsindex; |
| 389 | bool preload_fpu; | 389 | fpu_switch_t fpu; |
| 390 | 390 | ||
| 391 | /* | 391 | fpu = switch_fpu_prepare(prev_p, next_p); |
| 392 | * If the task has used fpu the last 5 timeslices, just do a full | ||
| 393 | * restore of the math state immediately to avoid the trap; the | ||
| 394 | * chances of needing FPU soon are obviously high now | ||
| 395 | */ | ||
| 396 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
| 397 | |||
| 398 | /* we're going to use this soon, after a few expensive things */ | ||
| 399 | if (preload_fpu) | ||
| 400 | prefetch(next->fpu.state); | ||
| 401 | 392 | ||
| 402 | /* | 393 | /* |
| 403 | * Reload esp0, LDT and the page table pointer: | 394 | * Reload esp0, LDT and the page table pointer: |
| @@ -427,13 +418,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 427 | 418 | ||
| 428 | load_TLS(next, cpu); | 419 | load_TLS(next, cpu); |
| 429 | 420 | ||
| 430 | /* Must be after DS reload */ | ||
| 431 | __unlazy_fpu(prev_p); | ||
| 432 | |||
| 433 | /* Make sure cpu is ready for new context */ | ||
| 434 | if (preload_fpu) | ||
| 435 | clts(); | ||
| 436 | |||
| 437 | /* | 421 | /* |
| 438 | * Leave lazy mode, flushing any hypercalls made here. | 422 | * Leave lazy mode, flushing any hypercalls made here. |
| 439 | * This must be done before restoring TLS segments so | 423 | * This must be done before restoring TLS segments so |
| @@ -474,6 +458,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 474 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 458 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
| 475 | prev->gsindex = gsindex; | 459 | prev->gsindex = gsindex; |
| 476 | 460 | ||
| 461 | switch_fpu_finish(next_p, fpu); | ||
| 462 | |||
| 477 | /* | 463 | /* |
| 478 | * Switch the PDA and FPU contexts. | 464 | * Switch the PDA and FPU contexts. |
| 479 | */ | 465 | */ |
| @@ -492,13 +478,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 492 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) | 478 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) |
| 493 | __switch_to_xtra(prev_p, next_p, tss); | 479 | __switch_to_xtra(prev_p, next_p, tss); |
| 494 | 480 | ||
| 495 | /* | ||
| 496 | * Preload the FPU context, now that we've determined that the | ||
| 497 | * task is likely to be using it. | ||
| 498 | */ | ||
| 499 | if (preload_fpu) | ||
| 500 | __math_state_restore(); | ||
| 501 | |||
| 502 | return prev_p; | 481 | return prev_p; |
| 503 | } | 482 | } |
| 504 | 483 | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 8ba27dbc107a..77da5b475ad2 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
| @@ -571,25 +571,34 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) | |||
| 571 | } | 571 | } |
| 572 | 572 | ||
| 573 | /* | 573 | /* |
| 574 | * __math_state_restore assumes that cr0.TS is already clear and the | 574 | * This gets called with the process already owning the |
| 575 | * fpu state is all ready for use. Used during context switch. | 575 | * FPU state, and with CR0.TS cleared. It just needs to |
| 576 | * restore the FPU register state. | ||
| 576 | */ | 577 | */ |
| 577 | void __math_state_restore(void) | 578 | void __math_state_restore(struct task_struct *tsk) |
| 578 | { | 579 | { |
| 579 | struct thread_info *thread = current_thread_info(); | 580 | /* We need a safe address that is cheap to find and that is already |
| 580 | struct task_struct *tsk = thread->task; | 581 | in L1. We've just brought in "tsk->thread.has_fpu", so use that */ |
| 582 | #define safe_address (tsk->thread.has_fpu) | ||
| 583 | |||
| 584 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
| 585 | is pending. Clear the x87 state here by setting it to fixed | ||
| 586 | values. safe_address is a random variable that should be in L1 */ | ||
| 587 | alternative_input( | ||
| 588 | ASM_NOP8 ASM_NOP2, | ||
| 589 | "emms\n\t" /* clear stack tags */ | ||
| 590 | "fildl %P[addr]", /* set F?P to defined value */ | ||
| 591 | X86_FEATURE_FXSAVE_LEAK, | ||
| 592 | [addr] "m" (safe_address)); | ||
| 581 | 593 | ||
| 582 | /* | 594 | /* |
| 583 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | 595 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. |
| 584 | */ | 596 | */ |
| 585 | if (unlikely(restore_fpu_checking(tsk))) { | 597 | if (unlikely(restore_fpu_checking(tsk))) { |
| 586 | stts(); | 598 | __thread_fpu_end(tsk); |
| 587 | force_sig(SIGSEGV, tsk); | 599 | force_sig(SIGSEGV, tsk); |
| 588 | return; | 600 | return; |
| 589 | } | 601 | } |
| 590 | |||
| 591 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | ||
| 592 | tsk->fpu_counter++; | ||
| 593 | } | 602 | } |
| 594 | 603 | ||
| 595 | /* | 604 | /* |
| @@ -604,8 +613,7 @@ void __math_state_restore(void) | |||
| 604 | */ | 613 | */ |
| 605 | void math_state_restore(void) | 614 | void math_state_restore(void) |
| 606 | { | 615 | { |
| 607 | struct thread_info *thread = current_thread_info(); | 616 | struct task_struct *tsk = current; |
| 608 | struct task_struct *tsk = thread->task; | ||
| 609 | 617 | ||
| 610 | if (!tsk_used_math(tsk)) { | 618 | if (!tsk_used_math(tsk)) { |
| 611 | local_irq_enable(); | 619 | local_irq_enable(); |
| @@ -622,16 +630,16 @@ void math_state_restore(void) | |||
| 622 | local_irq_disable(); | 630 | local_irq_disable(); |
| 623 | } | 631 | } |
| 624 | 632 | ||
| 625 | clts(); /* Allow maths ops (or we recurse) */ | 633 | __thread_fpu_begin(tsk); |
| 634 | __math_state_restore(tsk); | ||
| 626 | 635 | ||
| 627 | __math_state_restore(); | 636 | tsk->fpu_counter++; |
| 628 | } | 637 | } |
| 629 | EXPORT_SYMBOL_GPL(math_state_restore); | 638 | EXPORT_SYMBOL_GPL(math_state_restore); |
| 630 | 639 | ||
| 631 | dotraplinkage void __kprobes | 640 | dotraplinkage void __kprobes |
| 632 | do_device_not_available(struct pt_regs *regs, long error_code) | 641 | do_device_not_available(struct pt_regs *regs, long error_code) |
| 633 | { | 642 | { |
| 634 | WARN_ON_ONCE(!user_mode_vm(regs)); | ||
| 635 | #ifdef CONFIG_MATH_EMULATION | 643 | #ifdef CONFIG_MATH_EMULATION |
| 636 | if (read_cr0() & X86_CR0_EM) { | 644 | if (read_cr0() & X86_CR0_EM) { |
| 637 | struct math_emu_info info = { }; | 645 | struct math_emu_info info = { }; |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index a3911343976b..711091114119 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
| @@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk) | |||
| 47 | if (!fx) | 47 | if (!fx) |
| 48 | return; | 48 | return; |
| 49 | 49 | ||
| 50 | BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); | 50 | BUG_ON(__thread_has_fpu(tsk)); |
| 51 | 51 | ||
| 52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; | 52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; |
| 53 | 53 | ||
| @@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf) | |||
| 168 | if (!used_math()) | 168 | if (!used_math()) |
| 169 | return 0; | 169 | return 0; |
| 170 | 170 | ||
| 171 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 171 | if (user_has_fpu()) { |
| 172 | if (use_xsave()) | 172 | if (use_xsave()) |
| 173 | err = xsave_user(buf); | 173 | err = xsave_user(buf); |
| 174 | else | 174 | else |
| @@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf) | |||
| 176 | 176 | ||
| 177 | if (err) | 177 | if (err) |
| 178 | return err; | 178 | return err; |
| 179 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 179 | user_fpu_end(); |
| 180 | stts(); | ||
| 181 | } else { | 180 | } else { |
| 182 | sanitize_i387_state(tsk); | 181 | sanitize_i387_state(tsk); |
| 183 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, | 182 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, |
| @@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf) | |||
| 292 | return err; | 291 | return err; |
| 293 | } | 292 | } |
| 294 | 293 | ||
| 295 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | 294 | user_fpu_begin(); |
| 296 | clts(); | ||
| 297 | task_thread_info(current)->status |= TS_USEDFPU; | ||
| 298 | } | ||
| 299 | if (use_xsave()) | 295 | if (use_xsave()) |
| 300 | err = restore_user_xstate(buf); | 296 | err = restore_user_xstate(buf); |
| 301 | else | 297 | else |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d29216c462b3..3b4c8d8ad906 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
| 1457 | #ifdef CONFIG_X86_64 | 1457 | #ifdef CONFIG_X86_64 |
| 1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | 1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
| 1459 | #endif | 1459 | #endif |
| 1460 | if (current_thread_info()->status & TS_USEDFPU) | 1460 | if (__thread_has_fpu(current)) |
| 1461 | clts(); | 1461 | clts(); |
| 1462 | load_gdt(&__get_cpu_var(host_gdt)); | 1462 | load_gdt(&__get_cpu_var(host_gdt)); |
| 1463 | } | 1463 | } |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 492ade8c978e..d99346ea8fdb 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
| @@ -374,7 +374,7 @@ int __init pci_xen_init(void) | |||
| 374 | 374 | ||
| 375 | int __init pci_xen_hvm_init(void) | 375 | int __init pci_xen_hvm_init(void) |
| 376 | { | 376 | { |
| 377 | if (!xen_feature(XENFEAT_hvm_pirqs)) | 377 | if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) |
| 378 | return 0; | 378 | return 0; |
| 379 | 379 | ||
| 380 | #ifdef CONFIG_ACPI | 380 | #ifdef CONFIG_ACPI |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 041d4fe9dfe4..501d4e0244ba 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
| @@ -409,6 +409,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ | |||
| 409 | play_dead_common(); | 409 | play_dead_common(); |
| 410 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | 410 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
| 411 | cpu_bringup(); | 411 | cpu_bringup(); |
| 412 | /* | ||
| 413 | * Balance out the preempt calls - as we are running in cpu_idle | ||
| 414 | * loop which has been called at bootup from cpu_bringup_and_idle. | ||
| 415 | * The cpucpu_bringup_and_idle called cpu_bringup which made a | ||
| 416 | * preempt_disable() So this preempt_enable will balance it out. | ||
| 417 | */ | ||
| 418 | preempt_enable(); | ||
| 412 | } | 419 | } |
| 413 | 420 | ||
| 414 | #else /* !CONFIG_HOTPLUG_CPU */ | 421 | #else /* !CONFIG_HOTPLUG_CPU */ |
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index f04af931a682..107f6f7be5e1 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c | |||
| @@ -31,11 +31,6 @@ static inline u64 Maj(u64 x, u64 y, u64 z) | |||
| 31 | return (x & y) | (z & (x | y)); | 31 | return (x & y) | (z & (x | y)); |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | static inline u64 RORu64(u64 x, u64 y) | ||
| 35 | { | ||
| 36 | return (x >> y) | (x << (64 - y)); | ||
| 37 | } | ||
| 38 | |||
| 39 | static const u64 sha512_K[80] = { | 34 | static const u64 sha512_K[80] = { |
| 40 | 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, | 35 | 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, |
| 41 | 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, | 36 | 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, |
| @@ -66,10 +61,10 @@ static const u64 sha512_K[80] = { | |||
| 66 | 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL, | 61 | 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL, |
| 67 | }; | 62 | }; |
| 68 | 63 | ||
| 69 | #define e0(x) (RORu64(x,28) ^ RORu64(x,34) ^ RORu64(x,39)) | 64 | #define e0(x) (ror64(x,28) ^ ror64(x,34) ^ ror64(x,39)) |
| 70 | #define e1(x) (RORu64(x,14) ^ RORu64(x,18) ^ RORu64(x,41)) | 65 | #define e1(x) (ror64(x,14) ^ ror64(x,18) ^ ror64(x,41)) |
| 71 | #define s0(x) (RORu64(x, 1) ^ RORu64(x, 8) ^ (x >> 7)) | 66 | #define s0(x) (ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7)) |
| 72 | #define s1(x) (RORu64(x,19) ^ RORu64(x,61) ^ (x >> 6)) | 67 | #define s1(x) (ror64(x,19) ^ ror64(x,61) ^ (x >> 6)) |
| 73 | 68 | ||
| 74 | static inline void LOAD_OP(int I, u64 *W, const u8 *input) | 69 | static inline void LOAD_OP(int I, u64 *W, const u8 *input) |
| 75 | { | 70 | { |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 0cad48a284a8..c6a383d0244d 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
| @@ -1694,6 +1694,7 @@ static int mmc_add_disk(struct mmc_blk_data *md) | |||
| 1694 | 1694 | ||
| 1695 | md->power_ro_lock.show = power_ro_lock_show; | 1695 | md->power_ro_lock.show = power_ro_lock_show; |
| 1696 | md->power_ro_lock.store = power_ro_lock_store; | 1696 | md->power_ro_lock.store = power_ro_lock_store; |
| 1697 | sysfs_attr_init(&md->power_ro_lock.attr); | ||
| 1697 | md->power_ro_lock.attr.mode = mode; | 1698 | md->power_ro_lock.attr.mode = mode; |
| 1698 | md->power_ro_lock.attr.name = | 1699 | md->power_ro_lock.attr.name = |
| 1699 | "ro_lock_until_next_power_on"; | 1700 | "ro_lock_until_next_power_on"; |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index f545a3e6eb80..690255c7d4dc 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
| @@ -290,8 +290,11 @@ static void mmc_wait_for_req_done(struct mmc_host *host, | |||
| 290 | static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, | 290 | static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, |
| 291 | bool is_first_req) | 291 | bool is_first_req) |
| 292 | { | 292 | { |
| 293 | if (host->ops->pre_req) | 293 | if (host->ops->pre_req) { |
| 294 | mmc_host_clk_hold(host); | ||
| 294 | host->ops->pre_req(host, mrq, is_first_req); | 295 | host->ops->pre_req(host, mrq, is_first_req); |
| 296 | mmc_host_clk_release(host); | ||
| 297 | } | ||
| 295 | } | 298 | } |
| 296 | 299 | ||
| 297 | /** | 300 | /** |
| @@ -306,8 +309,11 @@ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, | |||
| 306 | static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, | 309 | static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, |
| 307 | int err) | 310 | int err) |
| 308 | { | 311 | { |
| 309 | if (host->ops->post_req) | 312 | if (host->ops->post_req) { |
| 313 | mmc_host_clk_hold(host); | ||
| 310 | host->ops->post_req(host, mrq, err); | 314 | host->ops->post_req(host, mrq, err); |
| 315 | mmc_host_clk_release(host); | ||
| 316 | } | ||
| 311 | } | 317 | } |
| 312 | 318 | ||
| 313 | /** | 319 | /** |
| @@ -620,7 +626,9 @@ int mmc_host_enable(struct mmc_host *host) | |||
| 620 | int err; | 626 | int err; |
| 621 | 627 | ||
| 622 | host->en_dis_recurs = 1; | 628 | host->en_dis_recurs = 1; |
| 629 | mmc_host_clk_hold(host); | ||
| 623 | err = host->ops->enable(host); | 630 | err = host->ops->enable(host); |
| 631 | mmc_host_clk_release(host); | ||
| 624 | host->en_dis_recurs = 0; | 632 | host->en_dis_recurs = 0; |
| 625 | 633 | ||
| 626 | if (err) { | 634 | if (err) { |
| @@ -640,7 +648,9 @@ static int mmc_host_do_disable(struct mmc_host *host, int lazy) | |||
| 640 | int err; | 648 | int err; |
| 641 | 649 | ||
| 642 | host->en_dis_recurs = 1; | 650 | host->en_dis_recurs = 1; |
| 651 | mmc_host_clk_hold(host); | ||
| 643 | err = host->ops->disable(host, lazy); | 652 | err = host->ops->disable(host, lazy); |
| 653 | mmc_host_clk_release(host); | ||
| 644 | host->en_dis_recurs = 0; | 654 | host->en_dis_recurs = 0; |
| 645 | 655 | ||
| 646 | if (err < 0) { | 656 | if (err < 0) { |
| @@ -1121,6 +1131,10 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, | |||
| 1121 | * might not allow this operation | 1131 | * might not allow this operation |
| 1122 | */ | 1132 | */ |
| 1123 | voltage = regulator_get_voltage(supply); | 1133 | voltage = regulator_get_voltage(supply); |
| 1134 | |||
| 1135 | if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE) | ||
| 1136 | min_uV = max_uV = voltage; | ||
| 1137 | |||
| 1124 | if (voltage < 0) | 1138 | if (voltage < 0) |
| 1125 | result = voltage; | 1139 | result = voltage; |
| 1126 | else if (voltage < min_uV || voltage > max_uV) | 1140 | else if (voltage < min_uV || voltage > max_uV) |
| @@ -1203,8 +1217,11 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11 | |||
| 1203 | 1217 | ||
| 1204 | host->ios.signal_voltage = signal_voltage; | 1218 | host->ios.signal_voltage = signal_voltage; |
| 1205 | 1219 | ||
| 1206 | if (host->ops->start_signal_voltage_switch) | 1220 | if (host->ops->start_signal_voltage_switch) { |
| 1221 | mmc_host_clk_hold(host); | ||
| 1207 | err = host->ops->start_signal_voltage_switch(host, &host->ios); | 1222 | err = host->ops->start_signal_voltage_switch(host, &host->ios); |
| 1223 | mmc_host_clk_release(host); | ||
| 1224 | } | ||
| 1208 | 1225 | ||
| 1209 | return err; | 1226 | return err; |
| 1210 | } | 1227 | } |
| @@ -1239,6 +1256,7 @@ static void mmc_poweroff_notify(struct mmc_host *host) | |||
| 1239 | int err = 0; | 1256 | int err = 0; |
| 1240 | 1257 | ||
| 1241 | card = host->card; | 1258 | card = host->card; |
| 1259 | mmc_claim_host(host); | ||
| 1242 | 1260 | ||
| 1243 | /* | 1261 | /* |
| 1244 | * Send power notify command only if card | 1262 | * Send power notify command only if card |
| @@ -1269,6 +1287,7 @@ static void mmc_poweroff_notify(struct mmc_host *host) | |||
| 1269 | /* Set the card state to no notification after the poweroff */ | 1287 | /* Set the card state to no notification after the poweroff */ |
| 1270 | card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION; | 1288 | card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION; |
| 1271 | } | 1289 | } |
| 1290 | mmc_release_host(host); | ||
| 1272 | } | 1291 | } |
| 1273 | 1292 | ||
| 1274 | /* | 1293 | /* |
| @@ -1327,12 +1346,28 @@ static void mmc_power_up(struct mmc_host *host) | |||
| 1327 | 1346 | ||
| 1328 | void mmc_power_off(struct mmc_host *host) | 1347 | void mmc_power_off(struct mmc_host *host) |
| 1329 | { | 1348 | { |
| 1349 | int err = 0; | ||
| 1330 | mmc_host_clk_hold(host); | 1350 | mmc_host_clk_hold(host); |
| 1331 | 1351 | ||
| 1332 | host->ios.clock = 0; | 1352 | host->ios.clock = 0; |
| 1333 | host->ios.vdd = 0; | 1353 | host->ios.vdd = 0; |
| 1334 | 1354 | ||
| 1335 | mmc_poweroff_notify(host); | 1355 | /* |
| 1356 | * For eMMC 4.5 device send AWAKE command before | ||
| 1357 | * POWER_OFF_NOTIFY command, because in sleep state | ||
| 1358 | * eMMC 4.5 devices respond to only RESET and AWAKE cmd | ||
| 1359 | */ | ||
| 1360 | if (host->card && mmc_card_is_sleep(host->card) && | ||
| 1361 | host->bus_ops->resume) { | ||
| 1362 | err = host->bus_ops->resume(host); | ||
| 1363 | |||
| 1364 | if (!err) | ||
| 1365 | mmc_poweroff_notify(host); | ||
| 1366 | else | ||
| 1367 | pr_warning("%s: error %d during resume " | ||
| 1368 | "(continue with poweroff sequence)\n", | ||
| 1369 | mmc_hostname(host), err); | ||
| 1370 | } | ||
| 1336 | 1371 | ||
| 1337 | /* | 1372 | /* |
| 1338 | * Reset ocr mask to be the highest possible voltage supported for | 1373 | * Reset ocr mask to be the highest possible voltage supported for |
| @@ -2386,12 +2421,6 @@ int mmc_suspend_host(struct mmc_host *host) | |||
| 2386 | */ | 2421 | */ |
| 2387 | if (mmc_try_claim_host(host)) { | 2422 | if (mmc_try_claim_host(host)) { |
| 2388 | if (host->bus_ops->suspend) { | 2423 | if (host->bus_ops->suspend) { |
| 2389 | /* | ||
| 2390 | * For eMMC 4.5 device send notify command | ||
| 2391 | * before sleep, because in sleep state eMMC 4.5 | ||
| 2392 | * devices respond to only RESET and AWAKE cmd | ||
| 2393 | */ | ||
| 2394 | mmc_poweroff_notify(host); | ||
| 2395 | err = host->bus_ops->suspend(host); | 2424 | err = host->bus_ops->suspend(host); |
| 2396 | } | 2425 | } |
| 2397 | mmc_do_release_host(host); | 2426 | mmc_do_release_host(host); |
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index fb8a5cd2e4a1..08a7852ade44 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h | |||
| @@ -14,27 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | int mmc_register_host_class(void); | 15 | int mmc_register_host_class(void); |
| 16 | void mmc_unregister_host_class(void); | 16 | void mmc_unregister_host_class(void); |
| 17 | |||
| 18 | #ifdef CONFIG_MMC_CLKGATE | ||
| 19 | void mmc_host_clk_hold(struct mmc_host *host); | ||
| 20 | void mmc_host_clk_release(struct mmc_host *host); | ||
| 21 | unsigned int mmc_host_clk_rate(struct mmc_host *host); | ||
| 22 | |||
| 23 | #else | ||
| 24 | static inline void mmc_host_clk_hold(struct mmc_host *host) | ||
| 25 | { | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline void mmc_host_clk_release(struct mmc_host *host) | ||
| 29 | { | ||
| 30 | } | ||
| 31 | |||
| 32 | static inline unsigned int mmc_host_clk_rate(struct mmc_host *host) | ||
| 33 | { | ||
| 34 | return host->ios.clock; | ||
| 35 | } | ||
| 36 | #endif | ||
| 37 | |||
| 38 | void mmc_host_deeper_disable(struct work_struct *work); | 17 | void mmc_host_deeper_disable(struct work_struct *work); |
| 39 | 18 | ||
| 40 | #endif | 19 | #endif |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 59b9ba52e66a..a48066344fa8 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -376,7 +376,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
| 376 | } | 376 | } |
| 377 | 377 | ||
| 378 | card->ext_csd.raw_hc_erase_gap_size = | 378 | card->ext_csd.raw_hc_erase_gap_size = |
| 379 | ext_csd[EXT_CSD_PARTITION_ATTRIBUTE]; | 379 | ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; |
| 380 | card->ext_csd.raw_sec_trim_mult = | 380 | card->ext_csd.raw_sec_trim_mult = |
| 381 | ext_csd[EXT_CSD_SEC_TRIM_MULT]; | 381 | ext_csd[EXT_CSD_SEC_TRIM_MULT]; |
| 382 | card->ext_csd.raw_sec_erase_mult = | 382 | card->ext_csd.raw_sec_erase_mult = |
| @@ -551,7 +551,7 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) | |||
| 551 | goto out; | 551 | goto out; |
| 552 | 552 | ||
| 553 | /* only compare read only fields */ | 553 | /* only compare read only fields */ |
| 554 | err = (!(card->ext_csd.raw_partition_support == | 554 | err = !((card->ext_csd.raw_partition_support == |
| 555 | bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && | 555 | bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && |
| 556 | (card->ext_csd.raw_erased_mem_count == | 556 | (card->ext_csd.raw_erased_mem_count == |
| 557 | bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && | 557 | bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && |
| @@ -1006,7 +1006,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
| 1006 | err = mmc_select_hs200(card); | 1006 | err = mmc_select_hs200(card); |
| 1007 | else if (host->caps & MMC_CAP_MMC_HIGHSPEED) | 1007 | else if (host->caps & MMC_CAP_MMC_HIGHSPEED) |
| 1008 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 1008 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
| 1009 | EXT_CSD_HS_TIMING, 1, 0); | 1009 | EXT_CSD_HS_TIMING, 1, |
| 1010 | card->ext_csd.generic_cmd6_time); | ||
| 1010 | 1011 | ||
| 1011 | if (err && err != -EBADMSG) | 1012 | if (err && err != -EBADMSG) |
| 1012 | goto free_card; | 1013 | goto free_card; |
| @@ -1116,7 +1117,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
| 1116 | * Activate wide bus and DDR (if supported). | 1117 | * Activate wide bus and DDR (if supported). |
| 1117 | */ | 1118 | */ |
| 1118 | if (!mmc_card_hs200(card) && | 1119 | if (!mmc_card_hs200(card) && |
| 1119 | (card->csd.mmca_vsn >= CSD_SPEC_VER_3) && | 1120 | (card->csd.mmca_vsn >= CSD_SPEC_VER_4) && |
| 1120 | (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { | 1121 | (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { |
| 1121 | static unsigned ext_csd_bits[][2] = { | 1122 | static unsigned ext_csd_bits[][2] = { |
| 1122 | { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, | 1123 | { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, |
| @@ -1315,11 +1316,13 @@ static int mmc_suspend(struct mmc_host *host) | |||
| 1315 | BUG_ON(!host->card); | 1316 | BUG_ON(!host->card); |
| 1316 | 1317 | ||
| 1317 | mmc_claim_host(host); | 1318 | mmc_claim_host(host); |
| 1318 | if (mmc_card_can_sleep(host)) | 1319 | if (mmc_card_can_sleep(host)) { |
| 1319 | err = mmc_card_sleep(host); | 1320 | err = mmc_card_sleep(host); |
| 1320 | else if (!mmc_host_is_spi(host)) | 1321 | if (!err) |
| 1322 | mmc_card_set_sleep(host->card); | ||
| 1323 | } else if (!mmc_host_is_spi(host)) | ||
| 1321 | mmc_deselect_cards(host); | 1324 | mmc_deselect_cards(host); |
| 1322 | host->card->state &= ~MMC_STATE_HIGHSPEED; | 1325 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); |
| 1323 | mmc_release_host(host); | 1326 | mmc_release_host(host); |
| 1324 | 1327 | ||
| 1325 | return err; | 1328 | return err; |
| @@ -1339,7 +1342,11 @@ static int mmc_resume(struct mmc_host *host) | |||
| 1339 | BUG_ON(!host->card); | 1342 | BUG_ON(!host->card); |
| 1340 | 1343 | ||
| 1341 | mmc_claim_host(host); | 1344 | mmc_claim_host(host); |
| 1342 | err = mmc_init_card(host, host->ocr, host->card); | 1345 | if (mmc_card_is_sleep(host->card)) { |
| 1346 | err = mmc_card_awake(host); | ||
| 1347 | mmc_card_clr_sleep(host->card); | ||
| 1348 | } else | ||
| 1349 | err = mmc_init_card(host, host->ocr, host->card); | ||
| 1343 | mmc_release_host(host); | 1350 | mmc_release_host(host); |
| 1344 | 1351 | ||
| 1345 | return err; | 1352 | return err; |
| @@ -1349,7 +1356,8 @@ static int mmc_power_restore(struct mmc_host *host) | |||
| 1349 | { | 1356 | { |
| 1350 | int ret; | 1357 | int ret; |
| 1351 | 1358 | ||
| 1352 | host->card->state &= ~MMC_STATE_HIGHSPEED; | 1359 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); |
| 1360 | mmc_card_clr_sleep(host->card); | ||
| 1353 | mmc_claim_host(host); | 1361 | mmc_claim_host(host); |
| 1354 | ret = mmc_init_card(host, host->ocr, host->card); | 1362 | ret = mmc_init_card(host, host->ocr, host->card); |
| 1355 | mmc_release_host(host); | 1363 | mmc_release_host(host); |
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index c63ad03c29c7..5017f9354ce2 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c | |||
| @@ -451,9 +451,11 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status) | |||
| 451 | * information and let the hardware specific code | 451 | * information and let the hardware specific code |
| 452 | * return what is possible given the options | 452 | * return what is possible given the options |
| 453 | */ | 453 | */ |
| 454 | mmc_host_clk_hold(card->host); | ||
| 454 | drive_strength = card->host->ops->select_drive_strength( | 455 | drive_strength = card->host->ops->select_drive_strength( |
| 455 | card->sw_caps.uhs_max_dtr, | 456 | card->sw_caps.uhs_max_dtr, |
| 456 | host_drv_type, card_drv_type); | 457 | host_drv_type, card_drv_type); |
| 458 | mmc_host_clk_release(card->host); | ||
| 457 | 459 | ||
| 458 | err = mmc_sd_switch(card, 1, 2, drive_strength, status); | 460 | err = mmc_sd_switch(card, 1, 2, drive_strength, status); |
| 459 | if (err) | 461 | if (err) |
| @@ -660,9 +662,12 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card) | |||
| 660 | goto out; | 662 | goto out; |
| 661 | 663 | ||
| 662 | /* SPI mode doesn't define CMD19 */ | 664 | /* SPI mode doesn't define CMD19 */ |
| 663 | if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) | 665 | if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) { |
| 666 | mmc_host_clk_hold(card->host); | ||
| 664 | err = card->host->ops->execute_tuning(card->host, | 667 | err = card->host->ops->execute_tuning(card->host, |
| 665 | MMC_SEND_TUNING_BLOCK); | 668 | MMC_SEND_TUNING_BLOCK); |
| 669 | mmc_host_clk_release(card->host); | ||
| 670 | } | ||
| 666 | 671 | ||
| 667 | out: | 672 | out: |
| 668 | kfree(status); | 673 | kfree(status); |
| @@ -850,8 +855,11 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, | |||
| 850 | if (!reinit) { | 855 | if (!reinit) { |
| 851 | int ro = -1; | 856 | int ro = -1; |
| 852 | 857 | ||
| 853 | if (host->ops->get_ro) | 858 | if (host->ops->get_ro) { |
| 859 | mmc_host_clk_hold(card->host); | ||
| 854 | ro = host->ops->get_ro(host); | 860 | ro = host->ops->get_ro(host); |
| 861 | mmc_host_clk_release(card->host); | ||
| 862 | } | ||
| 855 | 863 | ||
| 856 | if (ro < 0) { | 864 | if (ro < 0) { |
| 857 | pr_warning("%s: host does not " | 865 | pr_warning("%s: host does not " |
| @@ -967,8 +975,11 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, | |||
| 967 | * Since initialization is now complete, enable preset | 975 | * Since initialization is now complete, enable preset |
| 968 | * value registers for UHS-I cards. | 976 | * value registers for UHS-I cards. |
| 969 | */ | 977 | */ |
| 970 | if (host->ops->enable_preset_value) | 978 | if (host->ops->enable_preset_value) { |
| 979 | mmc_host_clk_hold(card->host); | ||
| 971 | host->ops->enable_preset_value(host, true); | 980 | host->ops->enable_preset_value(host, true); |
| 981 | mmc_host_clk_release(card->host); | ||
| 982 | } | ||
| 972 | } else { | 983 | } else { |
| 973 | /* | 984 | /* |
| 974 | * Attempt to change to high-speed (if supported) | 985 | * Attempt to change to high-speed (if supported) |
| @@ -1151,8 +1162,11 @@ int mmc_attach_sd(struct mmc_host *host) | |||
| 1151 | return err; | 1162 | return err; |
| 1152 | 1163 | ||
| 1153 | /* Disable preset value enable if already set since last time */ | 1164 | /* Disable preset value enable if already set since last time */ |
| 1154 | if (host->ops->enable_preset_value) | 1165 | if (host->ops->enable_preset_value) { |
| 1166 | mmc_host_clk_hold(host); | ||
| 1155 | host->ops->enable_preset_value(host, false); | 1167 | host->ops->enable_preset_value(host, false); |
| 1168 | mmc_host_clk_release(host); | ||
| 1169 | } | ||
| 1156 | 1170 | ||
| 1157 | err = mmc_send_app_op_cond(host, 0, &ocr); | 1171 | err = mmc_send_app_op_cond(host, 0, &ocr); |
| 1158 | if (err) | 1172 | if (err) |
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index bd7bacc950dc..12cde6ee17f5 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c | |||
| @@ -98,10 +98,11 @@ fail: | |||
| 98 | return ret; | 98 | return ret; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static int sdio_read_cccr(struct mmc_card *card) | 101 | static int sdio_read_cccr(struct mmc_card *card, u32 ocr) |
| 102 | { | 102 | { |
| 103 | int ret; | 103 | int ret; |
| 104 | int cccr_vsn; | 104 | int cccr_vsn; |
| 105 | int uhs = ocr & R4_18V_PRESENT; | ||
| 105 | unsigned char data; | 106 | unsigned char data; |
| 106 | unsigned char speed; | 107 | unsigned char speed; |
| 107 | 108 | ||
| @@ -149,7 +150,7 @@ static int sdio_read_cccr(struct mmc_card *card) | |||
| 149 | card->scr.sda_spec3 = 0; | 150 | card->scr.sda_spec3 = 0; |
| 150 | card->sw_caps.sd3_bus_mode = 0; | 151 | card->sw_caps.sd3_bus_mode = 0; |
| 151 | card->sw_caps.sd3_drv_type = 0; | 152 | card->sw_caps.sd3_drv_type = 0; |
| 152 | if (cccr_vsn >= SDIO_CCCR_REV_3_00) { | 153 | if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) { |
| 153 | card->scr.sda_spec3 = 1; | 154 | card->scr.sda_spec3 = 1; |
| 154 | ret = mmc_io_rw_direct(card, 0, 0, | 155 | ret = mmc_io_rw_direct(card, 0, 0, |
| 155 | SDIO_CCCR_UHS, 0, &data); | 156 | SDIO_CCCR_UHS, 0, &data); |
| @@ -712,7 +713,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, | |||
| 712 | /* | 713 | /* |
| 713 | * Read the common registers. | 714 | * Read the common registers. |
| 714 | */ | 715 | */ |
| 715 | err = sdio_read_cccr(card); | 716 | err = sdio_read_cccr(card, ocr); |
| 716 | if (err) | 717 | if (err) |
| 717 | goto remove; | 718 | goto remove; |
| 718 | 719 | ||
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index 68f81b9ee0fb..f573e7f9f740 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c | |||
| @@ -146,15 +146,21 @@ static int sdio_irq_thread(void *_host) | |||
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | set_current_state(TASK_INTERRUPTIBLE); | 148 | set_current_state(TASK_INTERRUPTIBLE); |
| 149 | if (host->caps & MMC_CAP_SDIO_IRQ) | 149 | if (host->caps & MMC_CAP_SDIO_IRQ) { |
| 150 | mmc_host_clk_hold(host); | ||
| 150 | host->ops->enable_sdio_irq(host, 1); | 151 | host->ops->enable_sdio_irq(host, 1); |
| 152 | mmc_host_clk_release(host); | ||
| 153 | } | ||
| 151 | if (!kthread_should_stop()) | 154 | if (!kthread_should_stop()) |
| 152 | schedule_timeout(period); | 155 | schedule_timeout(period); |
| 153 | set_current_state(TASK_RUNNING); | 156 | set_current_state(TASK_RUNNING); |
| 154 | } while (!kthread_should_stop()); | 157 | } while (!kthread_should_stop()); |
| 155 | 158 | ||
| 156 | if (host->caps & MMC_CAP_SDIO_IRQ) | 159 | if (host->caps & MMC_CAP_SDIO_IRQ) { |
| 160 | mmc_host_clk_hold(host); | ||
| 157 | host->ops->enable_sdio_irq(host, 0); | 161 | host->ops->enable_sdio_irq(host, 0); |
| 162 | mmc_host_clk_release(host); | ||
| 163 | } | ||
| 158 | 164 | ||
| 159 | pr_debug("%s: IRQ thread exiting with code %d\n", | 165 | pr_debug("%s: IRQ thread exiting with code %d\n", |
| 160 | mmc_hostname(host), ret); | 166 | mmc_hostname(host), ret); |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index fcfe1eb5acc8..6985cdb0bb26 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
| @@ -969,11 +969,14 @@ static void atmci_start_request(struct atmel_mci *host, | |||
| 969 | host->data_status = 0; | 969 | host->data_status = 0; |
| 970 | 970 | ||
| 971 | if (host->need_reset) { | 971 | if (host->need_reset) { |
| 972 | iflags = atmci_readl(host, ATMCI_IMR); | ||
| 973 | iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB); | ||
| 972 | atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); | 974 | atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); |
| 973 | atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); | 975 | atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); |
| 974 | atmci_writel(host, ATMCI_MR, host->mode_reg); | 976 | atmci_writel(host, ATMCI_MR, host->mode_reg); |
| 975 | if (host->caps.has_cfg_reg) | 977 | if (host->caps.has_cfg_reg) |
| 976 | atmci_writel(host, ATMCI_CFG, host->cfg_reg); | 978 | atmci_writel(host, ATMCI_CFG, host->cfg_reg); |
| 979 | atmci_writel(host, ATMCI_IER, iflags); | ||
| 977 | host->need_reset = false; | 980 | host->need_reset = false; |
| 978 | } | 981 | } |
| 979 | atmci_writel(host, ATMCI_SDCR, slot->sdc_reg); | 982 | atmci_writel(host, ATMCI_SDCR, slot->sdc_reg); |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 0e342793ff14..8bec1c36b159 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include <linux/ioport.h> | 22 | #include <linux/ioport.h> |
| 23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| 24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
| 25 | #include <linux/scatterlist.h> | ||
| 26 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
| 27 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 28 | #include <linux/stat.h> | 27 | #include <linux/stat.h> |
| @@ -502,8 +501,14 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) | |||
| 502 | host->dir_status = DW_MCI_SEND_STATUS; | 501 | host->dir_status = DW_MCI_SEND_STATUS; |
| 503 | 502 | ||
| 504 | if (dw_mci_submit_data_dma(host, data)) { | 503 | if (dw_mci_submit_data_dma(host, data)) { |
| 504 | int flags = SG_MITER_ATOMIC; | ||
| 505 | if (host->data->flags & MMC_DATA_READ) | ||
| 506 | flags |= SG_MITER_TO_SG; | ||
| 507 | else | ||
| 508 | flags |= SG_MITER_FROM_SG; | ||
| 509 | |||
| 510 | sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); | ||
| 505 | host->sg = data->sg; | 511 | host->sg = data->sg; |
| 506 | host->pio_offset = 0; | ||
| 507 | host->part_buf_start = 0; | 512 | host->part_buf_start = 0; |
| 508 | host->part_buf_count = 0; | 513 | host->part_buf_count = 0; |
| 509 | 514 | ||
| @@ -972,6 +977,7 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
| 972 | * generates a block interrupt, hence setting | 977 | * generates a block interrupt, hence setting |
| 973 | * the scatter-gather pointer to NULL. | 978 | * the scatter-gather pointer to NULL. |
| 974 | */ | 979 | */ |
| 980 | sg_miter_stop(&host->sg_miter); | ||
| 975 | host->sg = NULL; | 981 | host->sg = NULL; |
| 976 | ctrl = mci_readl(host, CTRL); | 982 | ctrl = mci_readl(host, CTRL); |
| 977 | ctrl |= SDMMC_CTRL_FIFO_RESET; | 983 | ctrl |= SDMMC_CTRL_FIFO_RESET; |
| @@ -1311,54 +1317,44 @@ static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) | |||
| 1311 | 1317 | ||
| 1312 | static void dw_mci_read_data_pio(struct dw_mci *host) | 1318 | static void dw_mci_read_data_pio(struct dw_mci *host) |
| 1313 | { | 1319 | { |
| 1314 | struct scatterlist *sg = host->sg; | 1320 | struct sg_mapping_iter *sg_miter = &host->sg_miter; |
| 1315 | void *buf = sg_virt(sg); | 1321 | void *buf; |
| 1316 | unsigned int offset = host->pio_offset; | 1322 | unsigned int offset; |
| 1317 | struct mmc_data *data = host->data; | 1323 | struct mmc_data *data = host->data; |
| 1318 | int shift = host->data_shift; | 1324 | int shift = host->data_shift; |
| 1319 | u32 status; | 1325 | u32 status; |
| 1320 | unsigned int nbytes = 0, len; | 1326 | unsigned int nbytes = 0, len; |
| 1327 | unsigned int remain, fcnt; | ||
| 1321 | 1328 | ||
| 1322 | do { | 1329 | do { |
| 1323 | len = host->part_buf_count + | 1330 | if (!sg_miter_next(sg_miter)) |
| 1324 | (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift); | 1331 | goto done; |
| 1325 | if (offset + len <= sg->length) { | 1332 | |
| 1333 | host->sg = sg_miter->__sg; | ||
| 1334 | buf = sg_miter->addr; | ||
| 1335 | remain = sg_miter->length; | ||
| 1336 | offset = 0; | ||
| 1337 | |||
| 1338 | do { | ||
| 1339 | fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) | ||
| 1340 | << shift) + host->part_buf_count; | ||
| 1341 | len = min(remain, fcnt); | ||
| 1342 | if (!len) | ||
| 1343 | break; | ||
| 1326 | dw_mci_pull_data(host, (void *)(buf + offset), len); | 1344 | dw_mci_pull_data(host, (void *)(buf + offset), len); |
| 1327 | |||
| 1328 | offset += len; | 1345 | offset += len; |
| 1329 | nbytes += len; | 1346 | nbytes += len; |
| 1330 | 1347 | remain -= len; | |
| 1331 | if (offset == sg->length) { | 1348 | } while (remain); |
| 1332 | flush_dcache_page(sg_page(sg)); | 1349 | sg_miter->consumed = offset; |
| 1333 | host->sg = sg = sg_next(sg); | ||
| 1334 | if (!sg) | ||
| 1335 | goto done; | ||
| 1336 | |||
| 1337 | offset = 0; | ||
| 1338 | buf = sg_virt(sg); | ||
| 1339 | } | ||
| 1340 | } else { | ||
| 1341 | unsigned int remaining = sg->length - offset; | ||
| 1342 | dw_mci_pull_data(host, (void *)(buf + offset), | ||
| 1343 | remaining); | ||
| 1344 | nbytes += remaining; | ||
| 1345 | |||
| 1346 | flush_dcache_page(sg_page(sg)); | ||
| 1347 | host->sg = sg = sg_next(sg); | ||
| 1348 | if (!sg) | ||
| 1349 | goto done; | ||
| 1350 | |||
| 1351 | offset = len - remaining; | ||
| 1352 | buf = sg_virt(sg); | ||
| 1353 | dw_mci_pull_data(host, buf, offset); | ||
| 1354 | nbytes += offset; | ||
| 1355 | } | ||
| 1356 | 1350 | ||
| 1357 | status = mci_readl(host, MINTSTS); | 1351 | status = mci_readl(host, MINTSTS); |
| 1358 | mci_writel(host, RINTSTS, SDMMC_INT_RXDR); | 1352 | mci_writel(host, RINTSTS, SDMMC_INT_RXDR); |
| 1359 | if (status & DW_MCI_DATA_ERROR_FLAGS) { | 1353 | if (status & DW_MCI_DATA_ERROR_FLAGS) { |
| 1360 | host->data_status = status; | 1354 | host->data_status = status; |
| 1361 | data->bytes_xfered += nbytes; | 1355 | data->bytes_xfered += nbytes; |
| 1356 | sg_miter_stop(sg_miter); | ||
| 1357 | host->sg = NULL; | ||
| 1362 | smp_wmb(); | 1358 | smp_wmb(); |
| 1363 | 1359 | ||
| 1364 | set_bit(EVENT_DATA_ERROR, &host->pending_events); | 1360 | set_bit(EVENT_DATA_ERROR, &host->pending_events); |
| @@ -1367,65 +1363,66 @@ static void dw_mci_read_data_pio(struct dw_mci *host) | |||
| 1367 | return; | 1363 | return; |
| 1368 | } | 1364 | } |
| 1369 | } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ | 1365 | } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ |
| 1370 | host->pio_offset = offset; | ||
| 1371 | data->bytes_xfered += nbytes; | 1366 | data->bytes_xfered += nbytes; |
| 1367 | |||
| 1368 | if (!remain) { | ||
| 1369 | if (!sg_miter_next(sg_miter)) | ||
| 1370 | goto done; | ||
| 1371 | sg_miter->consumed = 0; | ||
| 1372 | } | ||
| 1373 | sg_miter_stop(sg_miter); | ||
| 1372 | return; | 1374 | return; |
| 1373 | 1375 | ||
| 1374 | done: | 1376 | done: |
| 1375 | data->bytes_xfered += nbytes; | 1377 | data->bytes_xfered += nbytes; |
| 1378 | sg_miter_stop(sg_miter); | ||
| 1379 | host->sg = NULL; | ||
| 1376 | smp_wmb(); | 1380 | smp_wmb(); |
| 1377 | set_bit(EVENT_XFER_COMPLETE, &host->pending_events); | 1381 | set_bit(EVENT_XFER_COMPLETE, &host->pending_events); |
| 1378 | } | 1382 | } |
| 1379 | 1383 | ||
| 1380 | static void dw_mci_write_data_pio(struct dw_mci *host) | 1384 | static void dw_mci_write_data_pio(struct dw_mci *host) |
| 1381 | { | 1385 | { |
| 1382 | struct scatterlist *sg = host->sg; | 1386 | struct sg_mapping_iter *sg_miter = &host->sg_miter; |
| 1383 | void *buf = sg_virt(sg); | 1387 | void *buf; |
| 1384 | unsigned int offset = host->pio_offset; | 1388 | unsigned int offset; |
| 1385 | struct mmc_data *data = host->data; | 1389 | struct mmc_data *data = host->data; |
| 1386 | int shift = host->data_shift; | 1390 | int shift = host->data_shift; |
| 1387 | u32 status; | 1391 | u32 status; |
| 1388 | unsigned int nbytes = 0, len; | 1392 | unsigned int nbytes = 0, len; |
| 1393 | unsigned int fifo_depth = host->fifo_depth; | ||
| 1394 | unsigned int remain, fcnt; | ||
| 1389 | 1395 | ||
| 1390 | do { | 1396 | do { |
| 1391 | len = ((host->fifo_depth - | 1397 | if (!sg_miter_next(sg_miter)) |
| 1392 | SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift) | 1398 | goto done; |
| 1393 | - host->part_buf_count; | 1399 | |
| 1394 | if (offset + len <= sg->length) { | 1400 | host->sg = sg_miter->__sg; |
| 1401 | buf = sg_miter->addr; | ||
| 1402 | remain = sg_miter->length; | ||
| 1403 | offset = 0; | ||
| 1404 | |||
| 1405 | do { | ||
| 1406 | fcnt = ((fifo_depth - | ||
| 1407 | SDMMC_GET_FCNT(mci_readl(host, STATUS))) | ||
| 1408 | << shift) - host->part_buf_count; | ||
| 1409 | len = min(remain, fcnt); | ||
| 1410 | if (!len) | ||
| 1411 | break; | ||
| 1395 | host->push_data(host, (void *)(buf + offset), len); | 1412 | host->push_data(host, (void *)(buf + offset), len); |
| 1396 | |||
| 1397 | offset += len; | 1413 | offset += len; |
| 1398 | nbytes += len; | 1414 | nbytes += len; |
| 1399 | if (offset == sg->length) { | 1415 | remain -= len; |
| 1400 | host->sg = sg = sg_next(sg); | 1416 | } while (remain); |
| 1401 | if (!sg) | 1417 | sg_miter->consumed = offset; |
| 1402 | goto done; | ||
| 1403 | |||
| 1404 | offset = 0; | ||
| 1405 | buf = sg_virt(sg); | ||
| 1406 | } | ||
| 1407 | } else { | ||
| 1408 | unsigned int remaining = sg->length - offset; | ||
| 1409 | |||
| 1410 | host->push_data(host, (void *)(buf + offset), | ||
| 1411 | remaining); | ||
| 1412 | nbytes += remaining; | ||
| 1413 | |||
| 1414 | host->sg = sg = sg_next(sg); | ||
| 1415 | if (!sg) | ||
| 1416 | goto done; | ||
| 1417 | |||
| 1418 | offset = len - remaining; | ||
| 1419 | buf = sg_virt(sg); | ||
| 1420 | host->push_data(host, (void *)buf, offset); | ||
| 1421 | nbytes += offset; | ||
| 1422 | } | ||
| 1423 | 1418 | ||
| 1424 | status = mci_readl(host, MINTSTS); | 1419 | status = mci_readl(host, MINTSTS); |
| 1425 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR); | 1420 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR); |
| 1426 | if (status & DW_MCI_DATA_ERROR_FLAGS) { | 1421 | if (status & DW_MCI_DATA_ERROR_FLAGS) { |
| 1427 | host->data_status = status; | 1422 | host->data_status = status; |
| 1428 | data->bytes_xfered += nbytes; | 1423 | data->bytes_xfered += nbytes; |
| 1424 | sg_miter_stop(sg_miter); | ||
| 1425 | host->sg = NULL; | ||
| 1429 | 1426 | ||
| 1430 | smp_wmb(); | 1427 | smp_wmb(); |
| 1431 | 1428 | ||
| @@ -1435,12 +1432,20 @@ static void dw_mci_write_data_pio(struct dw_mci *host) | |||
| 1435 | return; | 1432 | return; |
| 1436 | } | 1433 | } |
| 1437 | } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ | 1434 | } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ |
| 1438 | host->pio_offset = offset; | ||
| 1439 | data->bytes_xfered += nbytes; | 1435 | data->bytes_xfered += nbytes; |
| 1436 | |||
| 1437 | if (!remain) { | ||
| 1438 | if (!sg_miter_next(sg_miter)) | ||
| 1439 | goto done; | ||
| 1440 | sg_miter->consumed = 0; | ||
| 1441 | } | ||
| 1442 | sg_miter_stop(sg_miter); | ||
| 1440 | return; | 1443 | return; |
| 1441 | 1444 | ||
| 1442 | done: | 1445 | done: |
| 1443 | data->bytes_xfered += nbytes; | 1446 | data->bytes_xfered += nbytes; |
| 1447 | sg_miter_stop(sg_miter); | ||
| 1448 | host->sg = NULL; | ||
| 1444 | smp_wmb(); | 1449 | smp_wmb(); |
| 1445 | set_bit(EVENT_XFER_COMPLETE, &host->pending_events); | 1450 | set_bit(EVENT_XFER_COMPLETE, &host->pending_events); |
| 1446 | } | 1451 | } |
| @@ -1643,6 +1648,7 @@ static void dw_mci_work_routine_card(struct work_struct *work) | |||
| 1643 | * block interrupt, hence setting the | 1648 | * block interrupt, hence setting the |
| 1644 | * scatter-gather pointer to NULL. | 1649 | * scatter-gather pointer to NULL. |
| 1645 | */ | 1650 | */ |
| 1651 | sg_miter_stop(&host->sg_miter); | ||
| 1646 | host->sg = NULL; | 1652 | host->sg = NULL; |
| 1647 | 1653 | ||
| 1648 | ctrl = mci_readl(host, CTRL); | 1654 | ctrl = mci_readl(host, CTRL); |
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index ab66f2454dc4..1534b582c419 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c | |||
| @@ -113,8 +113,8 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) | |||
| 113 | const int j = i * 2; | 113 | const int j = i * 2; |
| 114 | u32 mask; | 114 | u32 mask; |
| 115 | 115 | ||
| 116 | mask = mmc_vddrange_to_ocrmask(voltage_ranges[j], | 116 | mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]), |
| 117 | voltage_ranges[j + 1]); | 117 | be32_to_cpu(voltage_ranges[j + 1])); |
| 118 | if (!mask) { | 118 | if (!mask) { |
| 119 | ret = -EINVAL; | 119 | ret = -EINVAL; |
| 120 | dev_err(dev, "OF: voltage-range #%d is invalid\n", i); | 120 | dev_err(dev, "OF: voltage-range #%d is invalid\n", i); |
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index ff4adc018041..5d876ff86f37 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c | |||
| @@ -38,6 +38,23 @@ static u8 esdhc_readb(struct sdhci_host *host, int reg) | |||
| 38 | int base = reg & ~0x3; | 38 | int base = reg & ~0x3; |
| 39 | int shift = (reg & 0x3) * 8; | 39 | int shift = (reg & 0x3) * 8; |
| 40 | u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; | 40 | u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; |
| 41 | |||
| 42 | /* | ||
| 43 | * "DMA select" locates at offset 0x28 in SD specification, but on | ||
| 44 | * P5020 or P3041, it locates at 0x29. | ||
| 45 | */ | ||
| 46 | if (reg == SDHCI_HOST_CONTROL) { | ||
| 47 | u32 dma_bits; | ||
| 48 | |||
| 49 | dma_bits = in_be32(host->ioaddr + reg); | ||
| 50 | /* DMA select is 22,23 bits in Protocol Control Register */ | ||
| 51 | dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK; | ||
| 52 | |||
| 53 | /* fixup the result */ | ||
| 54 | ret &= ~SDHCI_CTRL_DMA_MASK; | ||
| 55 | ret |= dma_bits; | ||
| 56 | } | ||
| 57 | |||
| 41 | return ret; | 58 | return ret; |
| 42 | } | 59 | } |
| 43 | 60 | ||
| @@ -56,6 +73,21 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) | |||
| 56 | 73 | ||
| 57 | static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) | 74 | static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) |
| 58 | { | 75 | { |
| 76 | /* | ||
| 77 | * "DMA select" location is offset 0x28 in SD specification, but on | ||
| 78 | * P5020 or P3041, it's located at 0x29. | ||
| 79 | */ | ||
| 80 | if (reg == SDHCI_HOST_CONTROL) { | ||
| 81 | u32 dma_bits; | ||
| 82 | |||
| 83 | /* DMA select is 22,23 bits in Protocol Control Register */ | ||
| 84 | dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5; | ||
| 85 | clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5, | ||
| 86 | dma_bits); | ||
| 87 | val &= ~SDHCI_CTRL_DMA_MASK; | ||
| 88 | val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK; | ||
| 89 | } | ||
| 90 | |||
| 59 | /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ | 91 | /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ |
| 60 | if (reg == SDHCI_HOST_CONTROL) | 92 | if (reg == SDHCI_HOST_CONTROL) |
| 61 | val &= ~ESDHC_HOST_CONTROL_RES; | 93 | val &= ~ESDHC_HOST_CONTROL_RES; |
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 7165e6a09274..6ebdc4010e7c 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
| @@ -250,7 +250,7 @@ static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) | |||
| 250 | 250 | ||
| 251 | static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) | 251 | static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) |
| 252 | { | 252 | { |
| 253 | slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD; | 253 | slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; |
| 254 | return 0; | 254 | return 0; |
| 255 | } | 255 | } |
| 256 | 256 | ||
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index 03970bcb3495..c5c2a48bdd94 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * sdhci-pltfm.c Support for SDHCI platform devices | 2 | * sdhci-pltfm.c Support for SDHCI platform devices |
| 3 | * Copyright (c) 2009 Intel Corporation | 3 | * Copyright (c) 2009 Intel Corporation |
| 4 | * | 4 | * |
| 5 | * Copyright (c) 2007 Freescale Semiconductor, Inc. | 5 | * Copyright (c) 2007, 2011 Freescale Semiconductor, Inc. |
| 6 | * Copyright (c) 2009 MontaVista Software, Inc. | 6 | * Copyright (c) 2009 MontaVista Software, Inc. |
| 7 | * | 7 | * |
| 8 | * Authors: Xiaobo Xie <X.Xie@freescale.com> | 8 | * Authors: Xiaobo Xie <X.Xie@freescale.com> |
| @@ -71,6 +71,14 @@ void sdhci_get_of_property(struct platform_device *pdev) | |||
| 71 | if (sdhci_of_wp_inverted(np)) | 71 | if (sdhci_of_wp_inverted(np)) |
| 72 | host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; | 72 | host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; |
| 73 | 73 | ||
| 74 | if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) | ||
| 75 | host->quirks |= SDHCI_QUIRK_BROKEN_DMA; | ||
| 76 | |||
| 77 | if (of_device_is_compatible(np, "fsl,p2020-esdhc") || | ||
| 78 | of_device_is_compatible(np, "fsl,p1010-esdhc") || | ||
| 79 | of_device_is_compatible(np, "fsl,mpc8536-esdhc")) | ||
| 80 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; | ||
| 81 | |||
| 74 | clk = of_get_property(np, "clock-frequency", &size); | 82 | clk = of_get_property(np, "clock-frequency", &size); |
| 75 | if (clk && size == sizeof(*clk) && *clk) | 83 | if (clk && size == sizeof(*clk) && *clk) |
| 76 | pltfm_host->clock = be32_to_cpup(clk); | 84 | pltfm_host->clock = be32_to_cpup(clk); |
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index f5d8b53be333..352d4797865b 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
| @@ -1327,7 +1327,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) | |||
| 1327 | if (ret < 0) | 1327 | if (ret < 0) |
| 1328 | goto clean_up2; | 1328 | goto clean_up2; |
| 1329 | 1329 | ||
| 1330 | mmc_add_host(mmc); | 1330 | INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); |
| 1331 | 1331 | ||
| 1332 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); | 1332 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); |
| 1333 | 1333 | ||
| @@ -1338,22 +1338,24 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) | |||
| 1338 | } | 1338 | } |
| 1339 | ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); | 1339 | ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); |
| 1340 | if (ret) { | 1340 | if (ret) { |
| 1341 | free_irq(irq[0], host); | ||
| 1342 | dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); | 1341 | dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); |
| 1343 | goto clean_up3; | 1342 | goto clean_up4; |
| 1344 | } | 1343 | } |
| 1345 | 1344 | ||
| 1346 | INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); | 1345 | ret = mmc_add_host(mmc); |
| 1347 | 1346 | if (ret < 0) | |
| 1348 | mmc_detect_change(host->mmc, 0); | 1347 | goto clean_up5; |
| 1349 | 1348 | ||
| 1350 | dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); | 1349 | dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); |
| 1351 | dev_dbg(&pdev->dev, "chip ver H'%04x\n", | 1350 | dev_dbg(&pdev->dev, "chip ver H'%04x\n", |
| 1352 | sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); | 1351 | sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); |
| 1353 | return ret; | 1352 | return ret; |
| 1354 | 1353 | ||
| 1354 | clean_up5: | ||
| 1355 | free_irq(irq[1], host); | ||
| 1356 | clean_up4: | ||
| 1357 | free_irq(irq[0], host); | ||
| 1355 | clean_up3: | 1358 | clean_up3: |
| 1356 | mmc_remove_host(mmc); | ||
| 1357 | pm_runtime_suspend(&pdev->dev); | 1359 | pm_runtime_suspend(&pdev->dev); |
| 1358 | clean_up2: | 1360 | clean_up2: |
| 1359 | pm_runtime_disable(&pdev->dev); | 1361 | pm_runtime_disable(&pdev->dev); |
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index a95e6d901726..f96c536d130a 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h | |||
| @@ -20,8 +20,8 @@ | |||
| 20 | #include <linux/mmc/tmio.h> | 20 | #include <linux/mmc/tmio.h> |
| 21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
| 22 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
| 23 | #include <linux/spinlock.h> | ||
| 24 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
| 24 | #include <linux/spinlock.h> | ||
| 25 | 25 | ||
| 26 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | 26 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ |
| 27 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | 27 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 |
| @@ -120,6 +120,7 @@ void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); | |||
| 120 | void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable); | 120 | void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable); |
| 121 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); | 121 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); |
| 122 | void tmio_mmc_release_dma(struct tmio_mmc_host *host); | 122 | void tmio_mmc_release_dma(struct tmio_mmc_host *host); |
| 123 | void tmio_mmc_abort_dma(struct tmio_mmc_host *host); | ||
| 123 | #else | 124 | #else |
| 124 | static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, | 125 | static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, |
| 125 | struct mmc_data *data) | 126 | struct mmc_data *data) |
| @@ -140,6 +141,10 @@ static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, | |||
| 140 | static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) | 141 | static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) |
| 141 | { | 142 | { |
| 142 | } | 143 | } |
| 144 | |||
| 145 | static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host) | ||
| 146 | { | ||
| 147 | } | ||
| 143 | #endif | 148 | #endif |
| 144 | 149 | ||
| 145 | #ifdef CONFIG_PM | 150 | #ifdef CONFIG_PM |
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index 7a6e6cc8f8b8..8253ec12003e 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c | |||
| @@ -34,6 +34,18 @@ void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | |||
| 34 | #endif | 34 | #endif |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | void tmio_mmc_abort_dma(struct tmio_mmc_host *host) | ||
| 38 | { | ||
| 39 | tmio_mmc_enable_dma(host, false); | ||
| 40 | |||
| 41 | if (host->chan_rx) | ||
| 42 | dmaengine_terminate_all(host->chan_rx); | ||
| 43 | if (host->chan_tx) | ||
| 44 | dmaengine_terminate_all(host->chan_tx); | ||
| 45 | |||
| 46 | tmio_mmc_enable_dma(host, true); | ||
| 47 | } | ||
| 48 | |||
| 37 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | 49 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) |
| 38 | { | 50 | { |
| 39 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | 51 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; |
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index abad01b37cfb..5f9ad74fbf80 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
| @@ -41,8 +41,8 @@ | |||
| 41 | #include <linux/platform_device.h> | 41 | #include <linux/platform_device.h> |
| 42 | #include <linux/pm_runtime.h> | 42 | #include <linux/pm_runtime.h> |
| 43 | #include <linux/scatterlist.h> | 43 | #include <linux/scatterlist.h> |
| 44 | #include <linux/workqueue.h> | ||
| 45 | #include <linux/spinlock.h> | 44 | #include <linux/spinlock.h> |
| 45 | #include <linux/workqueue.h> | ||
| 46 | 46 | ||
| 47 | #include "tmio_mmc.h" | 47 | #include "tmio_mmc.h" |
| 48 | 48 | ||
| @@ -246,6 +246,7 @@ static void tmio_mmc_reset_work(struct work_struct *work) | |||
| 246 | /* Ready for new calls */ | 246 | /* Ready for new calls */ |
| 247 | host->mrq = NULL; | 247 | host->mrq = NULL; |
| 248 | 248 | ||
| 249 | tmio_mmc_abort_dma(host); | ||
| 249 | mmc_request_done(host->mmc, mrq); | 250 | mmc_request_done(host->mmc, mrq); |
| 250 | } | 251 | } |
| 251 | 252 | ||
| @@ -272,6 +273,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | |||
| 272 | host->mrq = NULL; | 273 | host->mrq = NULL; |
| 273 | spin_unlock_irqrestore(&host->lock, flags); | 274 | spin_unlock_irqrestore(&host->lock, flags); |
| 274 | 275 | ||
| 276 | if (mrq->cmd->error || (mrq->data && mrq->data->error)) | ||
| 277 | tmio_mmc_abort_dma(host); | ||
| 278 | |||
| 275 | mmc_request_done(host->mmc, mrq); | 279 | mmc_request_done(host->mmc, mrq); |
| 276 | } | 280 | } |
| 277 | 281 | ||
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 7cf3d2fcf56a..1620088a0e7e 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c | |||
| @@ -189,7 +189,7 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn, | |||
| 189 | 189 | ||
| 190 | if (verbose_request) | 190 | if (verbose_request) |
| 191 | dev_info(&pdev->xdev->dev, | 191 | dev_info(&pdev->xdev->dev, |
| 192 | "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n", | 192 | "read dev=%04x:%02x:%02x.%d - offset %x size %d\n", |
| 193 | pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), | 193 | pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), |
| 194 | PCI_FUNC(devfn), where, size); | 194 | PCI_FUNC(devfn), where, size); |
| 195 | 195 | ||
| @@ -228,7 +228,7 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn, | |||
| 228 | 228 | ||
| 229 | if (verbose_request) | 229 | if (verbose_request) |
| 230 | dev_info(&pdev->xdev->dev, | 230 | dev_info(&pdev->xdev->dev, |
| 231 | "write dev=%04x:%02x:%02x.%01x - " | 231 | "write dev=%04x:%02x:%02x.%d - " |
| 232 | "offset %x size %d val %x\n", | 232 | "offset %x size %d val %x\n", |
| 233 | pci_domain_nr(bus), bus->number, | 233 | pci_domain_nr(bus), bus->number, |
| 234 | PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); | 234 | PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); |
| @@ -432,7 +432,7 @@ static int __devinit pcifront_scan_bus(struct pcifront_device *pdev, | |||
| 432 | d = pci_scan_single_device(b, devfn); | 432 | d = pci_scan_single_device(b, devfn); |
| 433 | if (d) | 433 | if (d) |
| 434 | dev_info(&pdev->xdev->dev, "New device on " | 434 | dev_info(&pdev->xdev->dev, "New device on " |
| 435 | "%04x:%02x:%02x.%02x found.\n", domain, bus, | 435 | "%04x:%02x:%02x.%d found.\n", domain, bus, |
| 436 | PCI_SLOT(devfn), PCI_FUNC(devfn)); | 436 | PCI_SLOT(devfn), PCI_FUNC(devfn)); |
| 437 | } | 437 | } |
| 438 | 438 | ||
| @@ -1041,7 +1041,7 @@ static int pcifront_detach_devices(struct pcifront_device *pdev) | |||
| 1041 | pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func)); | 1041 | pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func)); |
| 1042 | if (!pci_dev) { | 1042 | if (!pci_dev) { |
| 1043 | dev_dbg(&pdev->xdev->dev, | 1043 | dev_dbg(&pdev->xdev->dev, |
| 1044 | "Cannot get PCI device %04x:%02x:%02x.%02x\n", | 1044 | "Cannot get PCI device %04x:%02x:%02x.%d\n", |
| 1045 | domain, bus, slot, func); | 1045 | domain, bus, slot, func); |
| 1046 | continue; | 1046 | continue; |
| 1047 | } | 1047 | } |
| @@ -1049,7 +1049,7 @@ static int pcifront_detach_devices(struct pcifront_device *pdev) | |||
| 1049 | pci_dev_put(pci_dev); | 1049 | pci_dev_put(pci_dev); |
| 1050 | 1050 | ||
| 1051 | dev_dbg(&pdev->xdev->dev, | 1051 | dev_dbg(&pdev->xdev->dev, |
| 1052 | "PCI device %04x:%02x:%02x.%02x removed.\n", | 1052 | "PCI device %04x:%02x:%02x.%d removed.\n", |
| 1053 | domain, bus, slot, func); | 1053 | domain, bus, slot, func); |
| 1054 | } | 1054 | } |
| 1055 | 1055 | ||
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index b06a2399587c..d0e1180ad961 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c | |||
| @@ -150,7 +150,7 @@ static int max8649_enable_time(struct regulator_dev *rdev) | |||
| 150 | if (ret != 0) | 150 | if (ret != 0) |
| 151 | return ret; | 151 | return ret; |
| 152 | val &= MAX8649_VOL_MASK; | 152 | val &= MAX8649_VOL_MASK; |
| 153 | voltage = max8649_list_voltage(rdev, (unsigned char)ret); /* uV */ | 153 | voltage = max8649_list_voltage(rdev, (unsigned char)val); /* uV */ |
| 154 | 154 | ||
| 155 | /* get rate */ | 155 | /* get rate */ |
| 156 | ret = regmap_read(info->regmap, MAX8649_RAMP, &val); | 156 | ret = regmap_read(info->regmap, MAX8649_RAMP, &val); |
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c index 80ecafef1bc3..62dcd0a432bb 100644 --- a/drivers/regulator/mc13xxx-regulator-core.c +++ b/drivers/regulator/mc13xxx-regulator-core.c | |||
| @@ -254,6 +254,7 @@ int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev) | |||
| 254 | 254 | ||
| 255 | return num; | 255 | return num; |
| 256 | } | 256 | } |
| 257 | EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt); | ||
| 257 | 258 | ||
| 258 | struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt( | 259 | struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt( |
| 259 | struct platform_device *pdev, struct mc13xxx_regulator *regulators, | 260 | struct platform_device *pdev, struct mc13xxx_regulator *regulators, |
| @@ -291,6 +292,7 @@ struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt( | |||
| 291 | 292 | ||
| 292 | return data; | 293 | return data; |
| 293 | } | 294 | } |
| 295 | EXPORT_SYMBOL_GPL(mc13xxx_parse_regulators_dt); | ||
| 294 | #endif | 296 | #endif |
| 295 | 297 | ||
| 296 | MODULE_LICENSE("GPL v2"); | 298 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index 14e2d995e958..4dcfced107f5 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c | |||
| @@ -30,7 +30,8 @@ static int vcpu_online(unsigned int cpu) | |||
| 30 | sprintf(dir, "cpu/%u", cpu); | 30 | sprintf(dir, "cpu/%u", cpu); |
| 31 | err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); | 31 | err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); |
| 32 | if (err != 1) { | 32 | if (err != 1) { |
| 33 | printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); | 33 | if (!xen_initial_domain()) |
| 34 | printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); | ||
| 34 | return err; | 35 | return err; |
| 35 | } | 36 | } |
| 36 | 37 | ||
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 7944a17f5cbf..19834d1c7c36 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c | |||
| @@ -884,7 +884,7 @@ static inline int str_to_quirk(const char *buf, int *domain, int *bus, int | |||
| 884 | int err; | 884 | int err; |
| 885 | 885 | ||
| 886 | err = | 886 | err = |
| 887 | sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot, | 887 | sscanf(buf, " %04x:%02x:%02x.%d-%08x:%1x:%08x", domain, bus, slot, |
| 888 | func, reg, size, mask); | 888 | func, reg, size, mask); |
| 889 | if (err == 7) | 889 | if (err == 7) |
| 890 | return 0; | 890 | return 0; |
| @@ -904,7 +904,7 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func) | |||
| 904 | pci_dev_id->bus = bus; | 904 | pci_dev_id->bus = bus; |
| 905 | pci_dev_id->devfn = PCI_DEVFN(slot, func); | 905 | pci_dev_id->devfn = PCI_DEVFN(slot, func); |
| 906 | 906 | ||
| 907 | pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%01x\n", | 907 | pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%d\n", |
| 908 | domain, bus, slot, func); | 908 | domain, bus, slot, func); |
| 909 | 909 | ||
| 910 | spin_lock_irqsave(&device_ids_lock, flags); | 910 | spin_lock_irqsave(&device_ids_lock, flags); |
| @@ -934,7 +934,7 @@ static int pcistub_device_id_remove(int domain, int bus, int slot, int func) | |||
| 934 | 934 | ||
| 935 | err = 0; | 935 | err = 0; |
| 936 | 936 | ||
| 937 | pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%01x from " | 937 | pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%d from " |
| 938 | "seize list\n", domain, bus, slot, func); | 938 | "seize list\n", domain, bus, slot, func); |
| 939 | } | 939 | } |
| 940 | } | 940 | } |
| @@ -1029,7 +1029,7 @@ static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf) | |||
| 1029 | break; | 1029 | break; |
| 1030 | 1030 | ||
| 1031 | count += scnprintf(buf + count, PAGE_SIZE - count, | 1031 | count += scnprintf(buf + count, PAGE_SIZE - count, |
| 1032 | "%04x:%02x:%02x.%01x\n", | 1032 | "%04x:%02x:%02x.%d\n", |
| 1033 | pci_dev_id->domain, pci_dev_id->bus, | 1033 | pci_dev_id->domain, pci_dev_id->bus, |
| 1034 | PCI_SLOT(pci_dev_id->devfn), | 1034 | PCI_SLOT(pci_dev_id->devfn), |
| 1035 | PCI_FUNC(pci_dev_id->devfn)); | 1035 | PCI_FUNC(pci_dev_id->devfn)); |
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index d5dcf8d5d3d9..64b11f99eacc 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c | |||
| @@ -206,6 +206,7 @@ static int xen_pcibk_publish_pci_dev(struct xen_pcibk_device *pdev, | |||
| 206 | goto out; | 206 | goto out; |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | /* Note: The PV protocol uses %02x, don't change it */ | ||
| 209 | err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str, | 210 | err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str, |
| 210 | "%04x:%02x:%02x.%02x", domain, bus, | 211 | "%04x:%02x:%02x.%02x", domain, bus, |
| 211 | PCI_SLOT(devfn), PCI_FUNC(devfn)); | 212 | PCI_SLOT(devfn), PCI_FUNC(devfn)); |
| @@ -229,7 +230,7 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev, | |||
| 229 | err = -EINVAL; | 230 | err = -EINVAL; |
| 230 | xenbus_dev_fatal(pdev->xdev, err, | 231 | xenbus_dev_fatal(pdev->xdev, err, |
| 231 | "Couldn't locate PCI device " | 232 | "Couldn't locate PCI device " |
| 232 | "(%04x:%02x:%02x.%01x)! " | 233 | "(%04x:%02x:%02x.%d)! " |
| 233 | "perhaps already in-use?", | 234 | "perhaps already in-use?", |
| 234 | domain, bus, slot, func); | 235 | domain, bus, slot, func); |
| 235 | goto out; | 236 | goto out; |
| @@ -274,7 +275,7 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev, | |||
| 274 | if (!dev) { | 275 | if (!dev) { |
| 275 | err = -EINVAL; | 276 | err = -EINVAL; |
| 276 | dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device " | 277 | dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device " |
| 277 | "(%04x:%02x:%02x.%01x)! not owned by this domain\n", | 278 | "(%04x:%02x:%02x.%d)! not owned by this domain\n", |
| 278 | domain, bus, slot, func); | 279 | domain, bus, slot, func); |
| 279 | goto out; | 280 | goto out; |
| 280 | } | 281 | } |
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 527dc2a3b89f..89f76252a16f 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
| @@ -369,6 +369,10 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) | |||
| 369 | goto out; | 369 | goto out; |
| 370 | } | 370 | } |
| 371 | token++; | 371 | token++; |
| 372 | if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { | ||
| 373 | rc = -EILSEQ; | ||
| 374 | goto out; | ||
| 375 | } | ||
| 372 | 376 | ||
| 373 | if (msg_type == XS_WATCH) { | 377 | if (msg_type == XS_WATCH) { |
| 374 | watch = alloc_watch_adapter(path, token); | 378 | watch = alloc_watch_adapter(path, token); |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 3c1063acb2ab..94300fe46cce 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
| @@ -56,6 +56,26 @@ static inline unsigned long hweight_long(unsigned long w) | |||
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | /** | 58 | /** |
| 59 | * rol64 - rotate a 64-bit value left | ||
| 60 | * @word: value to rotate | ||
| 61 | * @shift: bits to roll | ||
| 62 | */ | ||
| 63 | static inline __u64 rol64(__u64 word, unsigned int shift) | ||
| 64 | { | ||
| 65 | return (word << shift) | (word >> (64 - shift)); | ||
| 66 | } | ||
| 67 | |||
| 68 | /** | ||
| 69 | * ror64 - rotate a 64-bit value right | ||
| 70 | * @word: value to rotate | ||
| 71 | * @shift: bits to roll | ||
| 72 | */ | ||
| 73 | static inline __u64 ror64(__u64 word, unsigned int shift) | ||
| 74 | { | ||
| 75 | return (word >> shift) | (word << (64 - shift)); | ||
| 76 | } | ||
| 77 | |||
| 78 | /** | ||
| 59 | * rol32 - rotate a 32-bit value left | 79 | * rol32 - rotate a 32-bit value left |
| 60 | * @word: value to rotate | 80 | * @word: value to rotate |
| 61 | * @shift: bits to roll | 81 | * @shift: bits to roll |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 9f22ba572de0..19a41d1737af 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
| @@ -217,6 +217,7 @@ struct mmc_card { | |||
| 217 | #define MMC_CARD_SDXC (1<<6) /* card is SDXC */ | 217 | #define MMC_CARD_SDXC (1<<6) /* card is SDXC */ |
| 218 | #define MMC_CARD_REMOVED (1<<7) /* card has been removed */ | 218 | #define MMC_CARD_REMOVED (1<<7) /* card has been removed */ |
| 219 | #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ | 219 | #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ |
| 220 | #define MMC_STATE_SLEEP (1<<9) /* card is in sleep state */ | ||
| 220 | unsigned int quirks; /* card quirks */ | 221 | unsigned int quirks; /* card quirks */ |
| 221 | #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ | 222 | #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ |
| 222 | #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ | 223 | #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ |
| @@ -382,6 +383,7 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) | |||
| 382 | #define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) | 383 | #define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) |
| 383 | #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) | 384 | #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) |
| 384 | #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) | 385 | #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) |
| 386 | #define mmc_card_is_sleep(c) ((c)->state & MMC_STATE_SLEEP) | ||
| 385 | 387 | ||
| 386 | #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) | 388 | #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) |
| 387 | #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) | 389 | #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) |
| @@ -393,7 +395,9 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) | |||
| 393 | #define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) | 395 | #define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) |
| 394 | #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) | 396 | #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) |
| 395 | #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) | 397 | #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) |
| 398 | #define mmc_card_set_sleep(c) ((c)->state |= MMC_STATE_SLEEP) | ||
| 396 | 399 | ||
| 400 | #define mmc_card_clr_sleep(c) ((c)->state &= ~MMC_STATE_SLEEP) | ||
| 397 | /* | 401 | /* |
| 398 | * Quirk add/remove for MMC products. | 402 | * Quirk add/remove for MMC products. |
| 399 | */ | 403 | */ |
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index e8779c6d1759..aae5d1f1bb39 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | #ifndef LINUX_MMC_DW_MMC_H | 14 | #ifndef LINUX_MMC_DW_MMC_H |
| 15 | #define LINUX_MMC_DW_MMC_H | 15 | #define LINUX_MMC_DW_MMC_H |
| 16 | 16 | ||
| 17 | #include <linux/scatterlist.h> | ||
| 18 | |||
| 17 | #define MAX_MCI_SLOTS 2 | 19 | #define MAX_MCI_SLOTS 2 |
| 18 | 20 | ||
| 19 | enum dw_mci_state { | 21 | enum dw_mci_state { |
| @@ -40,7 +42,7 @@ struct mmc_data; | |||
| 40 | * @lock: Spinlock protecting the queue and associated data. | 42 | * @lock: Spinlock protecting the queue and associated data. |
| 41 | * @regs: Pointer to MMIO registers. | 43 | * @regs: Pointer to MMIO registers. |
| 42 | * @sg: Scatterlist entry currently being processed by PIO code, if any. | 44 | * @sg: Scatterlist entry currently being processed by PIO code, if any. |
| 43 | * @pio_offset: Offset into the current scatterlist entry. | 45 | * @sg_miter: PIO mapping scatterlist iterator. |
| 44 | * @cur_slot: The slot which is currently using the controller. | 46 | * @cur_slot: The slot which is currently using the controller. |
| 45 | * @mrq: The request currently being processed on @cur_slot, | 47 | * @mrq: The request currently being processed on @cur_slot, |
| 46 | * or NULL if the controller is idle. | 48 | * or NULL if the controller is idle. |
| @@ -115,7 +117,7 @@ struct dw_mci { | |||
| 115 | void __iomem *regs; | 117 | void __iomem *regs; |
| 116 | 118 | ||
| 117 | struct scatterlist *sg; | 119 | struct scatterlist *sg; |
| 118 | unsigned int pio_offset; | 120 | struct sg_mapping_iter sg_miter; |
| 119 | 121 | ||
| 120 | struct dw_mci_slot *cur_slot; | 122 | struct dw_mci_slot *cur_slot; |
| 121 | struct mmc_request *mrq; | 123 | struct mmc_request *mrq; |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0beba1e5e1ed..ee2b0363c040 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
| @@ -257,6 +257,7 @@ struct mmc_host { | |||
| 257 | #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ | 257 | #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ |
| 258 | #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ | 258 | #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ |
| 259 | MMC_CAP2_HS200_1_2V_SDR) | 259 | MMC_CAP2_HS200_1_2V_SDR) |
| 260 | #define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */ | ||
| 260 | 261 | ||
| 261 | mmc_pm_flag_t pm_caps; /* supported pm features */ | 262 | mmc_pm_flag_t pm_caps; /* supported pm features */ |
| 262 | unsigned int power_notify_type; | 263 | unsigned int power_notify_type; |
| @@ -444,4 +445,23 @@ static inline int mmc_boot_partition_access(struct mmc_host *host) | |||
| 444 | return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC); | 445 | return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC); |
| 445 | } | 446 | } |
| 446 | 447 | ||
| 448 | #ifdef CONFIG_MMC_CLKGATE | ||
| 449 | void mmc_host_clk_hold(struct mmc_host *host); | ||
| 450 | void mmc_host_clk_release(struct mmc_host *host); | ||
| 451 | unsigned int mmc_host_clk_rate(struct mmc_host *host); | ||
| 452 | |||
| 453 | #else | ||
| 454 | static inline void mmc_host_clk_hold(struct mmc_host *host) | ||
| 455 | { | ||
| 456 | } | ||
| 457 | |||
| 458 | static inline void mmc_host_clk_release(struct mmc_host *host) | ||
| 459 | { | ||
| 460 | } | ||
| 461 | |||
| 462 | static inline unsigned int mmc_host_clk_rate(struct mmc_host *host) | ||
| 463 | { | ||
| 464 | return host->ios.clock; | ||
| 465 | } | ||
| 466 | #endif | ||
| 447 | #endif /* LINUX_MMC_HOST_H */ | 467 | #endif /* LINUX_MMC_HOST_H */ |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 9350f3c3bdf8..1358987c49d8 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -4374,6 +4374,7 @@ enum { | |||
| 4374 | ALC882_FIXUP_ACER_ASPIRE_8930G, | 4374 | ALC882_FIXUP_ACER_ASPIRE_8930G, |
| 4375 | ALC882_FIXUP_ASPIRE_8930G_VERBS, | 4375 | ALC882_FIXUP_ASPIRE_8930G_VERBS, |
| 4376 | ALC885_FIXUP_MACPRO_GPIO, | 4376 | ALC885_FIXUP_MACPRO_GPIO, |
| 4377 | ALC889_FIXUP_DAC_ROUTE, | ||
| 4377 | }; | 4378 | }; |
| 4378 | 4379 | ||
| 4379 | static void alc889_fixup_coef(struct hda_codec *codec, | 4380 | static void alc889_fixup_coef(struct hda_codec *codec, |
| @@ -4427,6 +4428,23 @@ static void alc885_fixup_macpro_gpio(struct hda_codec *codec, | |||
| 4427 | alc882_gpio_mute(codec, 1, 0); | 4428 | alc882_gpio_mute(codec, 1, 0); |
| 4428 | } | 4429 | } |
| 4429 | 4430 | ||
| 4431 | /* Fix the connection of some pins for ALC889: | ||
| 4432 | * At least, Acer Aspire 5935 shows the connections to DAC3/4 don't | ||
| 4433 | * work correctly (bko#42740) | ||
| 4434 | */ | ||
| 4435 | static void alc889_fixup_dac_route(struct hda_codec *codec, | ||
| 4436 | const struct alc_fixup *fix, int action) | ||
| 4437 | { | ||
| 4438 | if (action == ALC_FIXUP_ACT_PRE_PROBE) { | ||
| 4439 | hda_nid_t conn1[2] = { 0x0c, 0x0d }; | ||
| 4440 | hda_nid_t conn2[2] = { 0x0e, 0x0f }; | ||
| 4441 | snd_hda_override_conn_list(codec, 0x14, 2, conn1); | ||
| 4442 | snd_hda_override_conn_list(codec, 0x15, 2, conn1); | ||
| 4443 | snd_hda_override_conn_list(codec, 0x18, 2, conn2); | ||
| 4444 | snd_hda_override_conn_list(codec, 0x1a, 2, conn2); | ||
| 4445 | } | ||
| 4446 | } | ||
| 4447 | |||
| 4430 | static const struct alc_fixup alc882_fixups[] = { | 4448 | static const struct alc_fixup alc882_fixups[] = { |
| 4431 | [ALC882_FIXUP_ABIT_AW9D_MAX] = { | 4449 | [ALC882_FIXUP_ABIT_AW9D_MAX] = { |
| 4432 | .type = ALC_FIXUP_PINS, | 4450 | .type = ALC_FIXUP_PINS, |
| @@ -4574,6 +4592,10 @@ static const struct alc_fixup alc882_fixups[] = { | |||
| 4574 | .type = ALC_FIXUP_FUNC, | 4592 | .type = ALC_FIXUP_FUNC, |
| 4575 | .v.func = alc885_fixup_macpro_gpio, | 4593 | .v.func = alc885_fixup_macpro_gpio, |
| 4576 | }, | 4594 | }, |
| 4595 | [ALC889_FIXUP_DAC_ROUTE] = { | ||
| 4596 | .type = ALC_FIXUP_FUNC, | ||
| 4597 | .v.func = alc889_fixup_dac_route, | ||
| 4598 | }, | ||
| 4577 | }; | 4599 | }; |
| 4578 | 4600 | ||
| 4579 | static const struct snd_pci_quirk alc882_fixup_tbl[] = { | 4601 | static const struct snd_pci_quirk alc882_fixup_tbl[] = { |
| @@ -4598,6 +4620,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
| 4598 | SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", | 4620 | SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", |
| 4599 | ALC882_FIXUP_ACER_ASPIRE_4930G), | 4621 | ALC882_FIXUP_ACER_ASPIRE_4930G), |
| 4600 | SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), | 4622 | SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), |
| 4623 | SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE), | ||
| 4601 | SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736), | 4624 | SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736), |
| 4602 | SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD), | 4625 | SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD), |
| 4603 | SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V), | 4626 | SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V), |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 948f0be2f4f3..6345df131a00 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
| @@ -5078,9 +5078,9 @@ static int stac92xx_update_led_status(struct hda_codec *codec) | |||
| 5078 | spec->gpio_dir, spec->gpio_data); | 5078 | spec->gpio_dir, spec->gpio_data); |
| 5079 | } else { | 5079 | } else { |
| 5080 | notmtd_lvl = spec->gpio_led_polarity ? | 5080 | notmtd_lvl = spec->gpio_led_polarity ? |
| 5081 | AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_GRD; | 5081 | AC_PINCTL_VREF_50 : AC_PINCTL_VREF_GRD; |
| 5082 | muted_lvl = spec->gpio_led_polarity ? | 5082 | muted_lvl = spec->gpio_led_polarity ? |
| 5083 | AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ; | 5083 | AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_50; |
| 5084 | spec->vref_led = muted ? muted_lvl : notmtd_lvl; | 5084 | spec->vref_led = muted ? muted_lvl : notmtd_lvl; |
| 5085 | stac_vrefout_set(codec, spec->vref_mute_led_nid, | 5085 | stac_vrefout_set(codec, spec->vref_mute_led_nid, |
| 5086 | spec->vref_led); | 5086 | spec->vref_led); |
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 284e311040fe..dff9a00ee8fb 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c | |||
| @@ -666,6 +666,9 @@ static void via_auto_init_analog_input(struct hda_codec *codec) | |||
| 666 | /* init input-src */ | 666 | /* init input-src */ |
| 667 | for (i = 0; i < spec->num_adc_nids; i++) { | 667 | for (i = 0; i < spec->num_adc_nids; i++) { |
| 668 | int adc_idx = spec->inputs[spec->cur_mux[i]].adc_idx; | 668 | int adc_idx = spec->inputs[spec->cur_mux[i]].adc_idx; |
| 669 | /* secondary ADCs must have the unique MUX */ | ||
| 670 | if (i > 0 && !spec->mux_nids[i]) | ||
| 671 | break; | ||
| 669 | if (spec->mux_nids[adc_idx]) { | 672 | if (spec->mux_nids[adc_idx]) { |
| 670 | int mux_idx = spec->inputs[spec->cur_mux[i]].mux_idx; | 673 | int mux_idx = spec->inputs[spec->cur_mux[i]].mux_idx; |
| 671 | snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0, | 674 | snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0, |
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 9f3b01bb72c8..e0a4263baa20 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c | |||
| @@ -2102,6 +2102,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = { | |||
| 2102 | }, | 2102 | }, |
| 2103 | { | 2103 | { |
| 2104 | .subvendor = 0x161f, | 2104 | .subvendor = 0x161f, |
| 2105 | .subdevice = 0x202f, | ||
| 2106 | .name = "Gateway M520", | ||
| 2107 | .type = AC97_TUNE_INV_EAPD | ||
| 2108 | }, | ||
| 2109 | { | ||
| 2110 | .subvendor = 0x161f, | ||
| 2105 | .subdevice = 0x203a, | 2111 | .subdevice = 0x203a, |
| 2106 | .name = "Gateway 4525GZ", /* AD1981B */ | 2112 | .name = "Gateway 4525GZ", /* AD1981B */ |
| 2107 | .type = AC97_TUNE_INV_EAPD | 2113 | .type = AC97_TUNE_INV_EAPD |
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index db6c89a28bda..ea4a82d01160 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c | |||
| @@ -1152,12 +1152,8 @@ static snd_pcm_uframes_t fsi_pointer(struct snd_pcm_substream *substream) | |||
| 1152 | { | 1152 | { |
| 1153 | struct fsi_priv *fsi = fsi_get_priv(substream); | 1153 | struct fsi_priv *fsi = fsi_get_priv(substream); |
| 1154 | struct fsi_stream *io = fsi_get_stream(fsi, fsi_is_play(substream)); | 1154 | struct fsi_stream *io = fsi_get_stream(fsi, fsi_is_play(substream)); |
| 1155 | int samples_pos = io->buff_sample_pos - 1; | ||
| 1156 | 1155 | ||
| 1157 | if (samples_pos < 0) | 1156 | return fsi_sample2frame(fsi, io->buff_sample_pos); |
| 1158 | samples_pos = 0; | ||
| 1159 | |||
| 1160 | return fsi_sample2frame(fsi, samples_pos); | ||
| 1161 | } | 1157 | } |
| 1162 | 1158 | ||
| 1163 | static struct snd_pcm_ops fsi_pcm_ops = { | 1159 | static struct snd_pcm_ops fsi_pcm_ops = { |
