diff options
Diffstat (limited to 'drivers')
114 files changed, 1322 insertions, 807 deletions
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 1d661b5c3287..eb6fd233764b 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
| @@ -28,23 +28,33 @@ | |||
| 28 | #include "internal.h" | 28 | #include "internal.h" |
| 29 | #include "sleep.h" | 29 | #include "sleep.h" |
| 30 | 30 | ||
| 31 | u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS; | ||
| 31 | static unsigned int gts, bfs; | 32 | static unsigned int gts, bfs; |
| 32 | module_param(gts, uint, 0644); | 33 | static int set_param_wake_flag(const char *val, struct kernel_param *kp) |
| 33 | module_param(bfs, uint, 0644); | ||
| 34 | MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); | ||
| 35 | MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); | ||
| 36 | |||
| 37 | static u8 wake_sleep_flags(void) | ||
| 38 | { | 34 | { |
| 39 | u8 flags = ACPI_NO_OPTIONAL_METHODS; | 35 | int ret = param_set_int(val, kp); |
| 40 | 36 | ||
| 41 | if (gts) | 37 | if (ret) |
| 42 | flags |= ACPI_EXECUTE_GTS; | 38 | return ret; |
| 43 | if (bfs) | ||
| 44 | flags |= ACPI_EXECUTE_BFS; | ||
| 45 | 39 | ||
| 46 | return flags; | 40 | if (kp->arg == (const char *)>s) { |
| 41 | if (gts) | ||
| 42 | wake_sleep_flags |= ACPI_EXECUTE_GTS; | ||
| 43 | else | ||
| 44 | wake_sleep_flags &= ~ACPI_EXECUTE_GTS; | ||
| 45 | } | ||
| 46 | if (kp->arg == (const char *)&bfs) { | ||
| 47 | if (bfs) | ||
| 48 | wake_sleep_flags |= ACPI_EXECUTE_BFS; | ||
| 49 | else | ||
| 50 | wake_sleep_flags &= ~ACPI_EXECUTE_BFS; | ||
| 51 | } | ||
| 52 | return ret; | ||
| 47 | } | 53 | } |
| 54 | module_param_call(gts, set_param_wake_flag, param_get_int, >s, 0644); | ||
| 55 | module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644); | ||
| 56 | MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); | ||
| 57 | MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); | ||
| 48 | 58 | ||
| 49 | static u8 sleep_states[ACPI_S_STATE_COUNT]; | 59 | static u8 sleep_states[ACPI_S_STATE_COUNT]; |
| 50 | 60 | ||
| @@ -263,7 +273,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state) | |||
| 263 | { | 273 | { |
| 264 | acpi_status status = AE_OK; | 274 | acpi_status status = AE_OK; |
| 265 | u32 acpi_state = acpi_target_sleep_state; | 275 | u32 acpi_state = acpi_target_sleep_state; |
| 266 | u8 flags = wake_sleep_flags(); | ||
| 267 | int error; | 276 | int error; |
| 268 | 277 | ||
| 269 | ACPI_FLUSH_CPU_CACHE(); | 278 | ACPI_FLUSH_CPU_CACHE(); |
| @@ -271,7 +280,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) | |||
| 271 | switch (acpi_state) { | 280 | switch (acpi_state) { |
| 272 | case ACPI_STATE_S1: | 281 | case ACPI_STATE_S1: |
| 273 | barrier(); | 282 | barrier(); |
| 274 | status = acpi_enter_sleep_state(acpi_state, flags); | 283 | status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags); |
| 275 | break; | 284 | break; |
| 276 | 285 | ||
| 277 | case ACPI_STATE_S3: | 286 | case ACPI_STATE_S3: |
| @@ -286,7 +295,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) | |||
| 286 | acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); | 295 | acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); |
| 287 | 296 | ||
| 288 | /* Reprogram control registers and execute _BFS */ | 297 | /* Reprogram control registers and execute _BFS */ |
| 289 | acpi_leave_sleep_state_prep(acpi_state, flags); | 298 | acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags); |
| 290 | 299 | ||
| 291 | /* ACPI 3.0 specs (P62) says that it's the responsibility | 300 | /* ACPI 3.0 specs (P62) says that it's the responsibility |
| 292 | * of the OSPM to clear the status bit [ implying that the | 301 | * of the OSPM to clear the status bit [ implying that the |
| @@ -550,30 +559,27 @@ static int acpi_hibernation_begin(void) | |||
| 550 | 559 | ||
| 551 | static int acpi_hibernation_enter(void) | 560 | static int acpi_hibernation_enter(void) |
| 552 | { | 561 | { |
| 553 | u8 flags = wake_sleep_flags(); | ||
| 554 | acpi_status status = AE_OK; | 562 | acpi_status status = AE_OK; |
| 555 | 563 | ||
| 556 | ACPI_FLUSH_CPU_CACHE(); | 564 | ACPI_FLUSH_CPU_CACHE(); |
| 557 | 565 | ||
| 558 | /* This shouldn't return. If it returns, we have a problem */ | 566 | /* This shouldn't return. If it returns, we have a problem */ |
| 559 | status = acpi_enter_sleep_state(ACPI_STATE_S4, flags); | 567 | status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags); |
| 560 | /* Reprogram control registers and execute _BFS */ | 568 | /* Reprogram control registers and execute _BFS */ |
| 561 | acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags); | 569 | acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); |
| 562 | 570 | ||
| 563 | return ACPI_SUCCESS(status) ? 0 : -EFAULT; | 571 | return ACPI_SUCCESS(status) ? 0 : -EFAULT; |
| 564 | } | 572 | } |
| 565 | 573 | ||
| 566 | static void acpi_hibernation_leave(void) | 574 | static void acpi_hibernation_leave(void) |
| 567 | { | 575 | { |
| 568 | u8 flags = wake_sleep_flags(); | ||
| 569 | |||
| 570 | /* | 576 | /* |
| 571 | * If ACPI is not enabled by the BIOS and the boot kernel, we need to | 577 | * If ACPI is not enabled by the BIOS and the boot kernel, we need to |
| 572 | * enable it here. | 578 | * enable it here. |
| 573 | */ | 579 | */ |
| 574 | acpi_enable(); | 580 | acpi_enable(); |
| 575 | /* Reprogram control registers and execute _BFS */ | 581 | /* Reprogram control registers and execute _BFS */ |
| 576 | acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags); | 582 | acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); |
| 577 | /* Check the hardware signature */ | 583 | /* Check the hardware signature */ |
| 578 | if (facs && s4_hardware_signature != facs->hardware_signature) { | 584 | if (facs && s4_hardware_signature != facs->hardware_signature) { |
| 579 | printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " | 585 | printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " |
| @@ -828,12 +834,10 @@ static void acpi_power_off_prepare(void) | |||
| 828 | 834 | ||
| 829 | static void acpi_power_off(void) | 835 | static void acpi_power_off(void) |
| 830 | { | 836 | { |
| 831 | u8 flags = wake_sleep_flags(); | ||
| 832 | |||
| 833 | /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ | 837 | /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ |
| 834 | printk(KERN_DEBUG "%s called\n", __func__); | 838 | printk(KERN_DEBUG "%s called\n", __func__); |
| 835 | local_irq_disable(); | 839 | local_irq_disable(); |
| 836 | acpi_enter_sleep_state(ACPI_STATE_S5, flags); | 840 | acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags); |
| 837 | } | 841 | } |
| 838 | 842 | ||
| 839 | /* | 843 | /* |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 93dabdcd2cbe..22226350cd0c 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
| @@ -3399,7 +3399,8 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) | |||
| 3399 | */ | 3399 | */ |
| 3400 | shost->max_host_blocked = 1; | 3400 | shost->max_host_blocked = 1; |
| 3401 | 3401 | ||
| 3402 | rc = scsi_add_host(ap->scsi_host, &ap->tdev); | 3402 | rc = scsi_add_host_with_dma(ap->scsi_host, |
| 3403 | &ap->tdev, ap->host->dev); | ||
| 3403 | if (rc) | 3404 | if (rc) |
| 3404 | goto err_add; | 3405 | goto err_add; |
| 3405 | } | 3406 | } |
| @@ -3838,18 +3839,25 @@ void ata_sas_port_stop(struct ata_port *ap) | |||
| 3838 | } | 3839 | } |
| 3839 | EXPORT_SYMBOL_GPL(ata_sas_port_stop); | 3840 | EXPORT_SYMBOL_GPL(ata_sas_port_stop); |
| 3840 | 3841 | ||
| 3841 | int ata_sas_async_port_init(struct ata_port *ap) | 3842 | /** |
| 3843 | * ata_sas_async_probe - simply schedule probing and return | ||
| 3844 | * @ap: Port to probe | ||
| 3845 | * | ||
| 3846 | * For batch scheduling of probe for sas attached ata devices, assumes | ||
| 3847 | * the port has already been through ata_sas_port_init() | ||
| 3848 | */ | ||
| 3849 | void ata_sas_async_probe(struct ata_port *ap) | ||
| 3842 | { | 3850 | { |
| 3843 | int rc = ap->ops->port_start(ap); | 3851 | __ata_port_probe(ap); |
| 3844 | 3852 | } | |
| 3845 | if (!rc) { | 3853 | EXPORT_SYMBOL_GPL(ata_sas_async_probe); |
| 3846 | ap->print_id = atomic_inc_return(&ata_print_id); | ||
| 3847 | __ata_port_probe(ap); | ||
| 3848 | } | ||
| 3849 | 3854 | ||
| 3850 | return rc; | 3855 | int ata_sas_sync_probe(struct ata_port *ap) |
| 3856 | { | ||
| 3857 | return ata_port_probe(ap); | ||
| 3851 | } | 3858 | } |
| 3852 | EXPORT_SYMBOL_GPL(ata_sas_async_port_init); | 3859 | EXPORT_SYMBOL_GPL(ata_sas_sync_probe); |
| 3860 | |||
| 3853 | 3861 | ||
| 3854 | /** | 3862 | /** |
| 3855 | * ata_sas_port_init - Initialize a SATA device | 3863 | * ata_sas_port_init - Initialize a SATA device |
| @@ -3866,12 +3874,10 @@ int ata_sas_port_init(struct ata_port *ap) | |||
| 3866 | { | 3874 | { |
| 3867 | int rc = ap->ops->port_start(ap); | 3875 | int rc = ap->ops->port_start(ap); |
| 3868 | 3876 | ||
| 3869 | if (!rc) { | 3877 | if (rc) |
| 3870 | ap->print_id = atomic_inc_return(&ata_print_id); | 3878 | return rc; |
| 3871 | rc = ata_port_probe(ap); | 3879 | ap->print_id = atomic_inc_return(&ata_print_id); |
| 3872 | } | 3880 | return 0; |
| 3873 | |||
| 3874 | return rc; | ||
| 3875 | } | 3881 | } |
| 3876 | EXPORT_SYMBOL_GPL(ata_sas_port_init); | 3882 | EXPORT_SYMBOL_GPL(ata_sas_port_init); |
| 3877 | 3883 | ||
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index cdcf75c0954f..3e2a6002aae6 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c | |||
| @@ -404,16 +404,19 @@ int bcma_sprom_get(struct bcma_bus *bus) | |||
| 404 | return -EOPNOTSUPP; | 404 | return -EOPNOTSUPP; |
| 405 | 405 | ||
| 406 | if (!bcma_sprom_ext_available(bus)) { | 406 | if (!bcma_sprom_ext_available(bus)) { |
| 407 | bool sprom_onchip; | ||
| 408 | |||
| 407 | /* | 409 | /* |
| 408 | * External SPROM takes precedence so check | 410 | * External SPROM takes precedence so check |
| 409 | * on-chip OTP only when no external SPROM | 411 | * on-chip OTP only when no external SPROM |
| 410 | * is present. | 412 | * is present. |
| 411 | */ | 413 | */ |
| 412 | if (bcma_sprom_onchip_available(bus)) { | 414 | sprom_onchip = bcma_sprom_onchip_available(bus); |
| 415 | if (sprom_onchip) { | ||
| 413 | /* determine offset */ | 416 | /* determine offset */ |
| 414 | offset = bcma_sprom_onchip_offset(bus); | 417 | offset = bcma_sprom_onchip_offset(bus); |
| 415 | } | 418 | } |
| 416 | if (!offset) { | 419 | if (!offset || !sprom_onchip) { |
| 417 | /* | 420 | /* |
| 418 | * Maybe there is no SPROM on the device? | 421 | * Maybe there is no SPROM on the device? |
| 419 | * Now we ask the arch code if there is some sprom | 422 | * Now we ask the arch code if there is some sprom |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index c301a8ec31aa..3d704abd7912 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
| @@ -1429,6 +1429,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
| 1429 | * signal | 1429 | * signal |
| 1430 | */ | 1430 | */ |
| 1431 | release_phy_channel(plchan); | 1431 | release_phy_channel(plchan); |
| 1432 | plchan->phychan_hold = 0; | ||
| 1432 | } | 1433 | } |
| 1433 | /* Dequeue jobs and free LLIs */ | 1434 | /* Dequeue jobs and free LLIs */ |
| 1434 | if (plchan->at) { | 1435 | if (plchan->at) { |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 7aa58d204892..445fdf811695 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
| @@ -221,10 +221,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | |||
| 221 | 221 | ||
| 222 | vdbg_dump_regs(atchan); | 222 | vdbg_dump_regs(atchan); |
| 223 | 223 | ||
| 224 | /* clear any pending interrupt */ | ||
| 225 | while (dma_readl(atdma, EBCISR)) | ||
| 226 | cpu_relax(); | ||
| 227 | |||
| 228 | channel_writel(atchan, SADDR, 0); | 224 | channel_writel(atchan, SADDR, 0); |
| 229 | channel_writel(atchan, DADDR, 0); | 225 | channel_writel(atchan, DADDR, 0); |
| 230 | channel_writel(atchan, CTRLA, 0); | 226 | channel_writel(atchan, CTRLA, 0); |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index a45b5d2a5987..bb787d8e1529 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
| @@ -571,11 +571,14 @@ static void imxdma_tasklet(unsigned long data) | |||
| 571 | if (desc->desc.callback) | 571 | if (desc->desc.callback) |
| 572 | desc->desc.callback(desc->desc.callback_param); | 572 | desc->desc.callback(desc->desc.callback_param); |
| 573 | 573 | ||
| 574 | dma_cookie_complete(&desc->desc); | 574 | /* If we are dealing with a cyclic descriptor keep it on ld_active |
| 575 | 575 | * and dont mark the descripor as complete. | |
| 576 | /* If we are dealing with a cyclic descriptor keep it on ld_active */ | 576 | * Only in non-cyclic cases it would be marked as complete |
| 577 | */ | ||
| 577 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | 578 | if (imxdma_chan_is_doing_cyclic(imxdmac)) |
| 578 | goto out; | 579 | goto out; |
| 580 | else | ||
| 581 | dma_cookie_complete(&desc->desc); | ||
| 579 | 582 | ||
| 580 | /* Free 2D slot if it was an interleaved transfer */ | 583 | /* Free 2D slot if it was an interleaved transfer */ |
| 581 | if (imxdmac->enabled_2d) { | 584 | if (imxdmac->enabled_2d) { |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index c81ef7e10e08..655d4ce6ed0d 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
| @@ -201,10 +201,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | |||
| 201 | 201 | ||
| 202 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 202 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
| 203 | { | 203 | { |
| 204 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan); | ||
| 205 | |||
| 206 | mxs_dma_enable_chan(mxs_chan); | ||
| 207 | |||
| 208 | return dma_cookie_assign(tx); | 204 | return dma_cookie_assign(tx); |
| 209 | } | 205 | } |
| 210 | 206 | ||
| @@ -558,9 +554,9 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | |||
| 558 | 554 | ||
| 559 | static void mxs_dma_issue_pending(struct dma_chan *chan) | 555 | static void mxs_dma_issue_pending(struct dma_chan *chan) |
| 560 | { | 556 | { |
| 561 | /* | 557 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
| 562 | * Nothing to do. We only have a single descriptor. | 558 | |
| 563 | */ | 559 | mxs_dma_enable_chan(mxs_chan); |
| 564 | } | 560 | } |
| 565 | 561 | ||
| 566 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | 562 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 282caf118be8..2ee6e23930ad 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -2225,12 +2225,9 @@ static inline void free_desc_list(struct list_head *list) | |||
| 2225 | { | 2225 | { |
| 2226 | struct dma_pl330_dmac *pdmac; | 2226 | struct dma_pl330_dmac *pdmac; |
| 2227 | struct dma_pl330_desc *desc; | 2227 | struct dma_pl330_desc *desc; |
| 2228 | struct dma_pl330_chan *pch; | 2228 | struct dma_pl330_chan *pch = NULL; |
| 2229 | unsigned long flags; | 2229 | unsigned long flags; |
| 2230 | 2230 | ||
| 2231 | if (list_empty(list)) | ||
| 2232 | return; | ||
| 2233 | |||
| 2234 | /* Finish off the work list */ | 2231 | /* Finish off the work list */ |
| 2235 | list_for_each_entry(desc, list, node) { | 2232 | list_for_each_entry(desc, list, node) { |
| 2236 | dma_async_tx_callback callback; | 2233 | dma_async_tx_callback callback; |
| @@ -2247,6 +2244,10 @@ static inline void free_desc_list(struct list_head *list) | |||
| 2247 | desc->pchan = NULL; | 2244 | desc->pchan = NULL; |
| 2248 | } | 2245 | } |
| 2249 | 2246 | ||
| 2247 | /* pch will be unset if list was empty */ | ||
| 2248 | if (!pch) | ||
| 2249 | return; | ||
| 2250 | |||
| 2250 | pdmac = pch->dmac; | 2251 | pdmac = pch->dmac; |
| 2251 | 2252 | ||
| 2252 | spin_lock_irqsave(&pdmac->pool_lock, flags); | 2253 | spin_lock_irqsave(&pdmac->pool_lock, flags); |
| @@ -2257,12 +2258,9 @@ static inline void free_desc_list(struct list_head *list) | |||
| 2257 | static inline void handle_cyclic_desc_list(struct list_head *list) | 2258 | static inline void handle_cyclic_desc_list(struct list_head *list) |
| 2258 | { | 2259 | { |
| 2259 | struct dma_pl330_desc *desc; | 2260 | struct dma_pl330_desc *desc; |
| 2260 | struct dma_pl330_chan *pch; | 2261 | struct dma_pl330_chan *pch = NULL; |
| 2261 | unsigned long flags; | 2262 | unsigned long flags; |
| 2262 | 2263 | ||
| 2263 | if (list_empty(list)) | ||
| 2264 | return; | ||
| 2265 | |||
| 2266 | list_for_each_entry(desc, list, node) { | 2264 | list_for_each_entry(desc, list, node) { |
| 2267 | dma_async_tx_callback callback; | 2265 | dma_async_tx_callback callback; |
| 2268 | 2266 | ||
| @@ -2274,6 +2272,10 @@ static inline void handle_cyclic_desc_list(struct list_head *list) | |||
| 2274 | callback(desc->txd.callback_param); | 2272 | callback(desc->txd.callback_param); |
| 2275 | } | 2273 | } |
| 2276 | 2274 | ||
| 2275 | /* pch will be unset if list was empty */ | ||
| 2276 | if (!pch) | ||
| 2277 | return; | ||
| 2278 | |||
| 2277 | spin_lock_irqsave(&pch->lock, flags); | 2279 | spin_lock_irqsave(&pch->lock, flags); |
| 2278 | list_splice_tail_init(list, &pch->work_list); | 2280 | list_splice_tail_init(list, &pch->work_list); |
| 2279 | spin_unlock_irqrestore(&pch->lock, flags); | 2281 | spin_unlock_irqrestore(&pch->lock, flags); |
| @@ -2926,8 +2928,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2926 | INIT_LIST_HEAD(&pd->channels); | 2928 | INIT_LIST_HEAD(&pd->channels); |
| 2927 | 2929 | ||
| 2928 | /* Initialize channel parameters */ | 2930 | /* Initialize channel parameters */ |
| 2929 | num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri, | 2931 | if (pdat) |
| 2930 | (u8)pi->pcfg.num_chan); | 2932 | num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan); |
| 2933 | else | ||
| 2934 | num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); | ||
| 2935 | |||
| 2931 | pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); | 2936 | pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); |
| 2932 | 2937 | ||
| 2933 | for (i = 0; i < num_chan; i++) { | 2938 | for (i = 0; i < num_chan; i++) { |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index bdd41d4bfa8d..2ed1ac3513f3 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/pm_runtime.h> | 18 | #include <linux/pm_runtime.h> |
| 19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
| 20 | #include <linux/amba/bus.h> | 20 | #include <linux/amba/bus.h> |
| 21 | #include <linux/regulator/consumer.h> | ||
| 21 | 22 | ||
| 22 | #include <plat/ste_dma40.h> | 23 | #include <plat/ste_dma40.h> |
| 23 | 24 | ||
| @@ -69,6 +70,22 @@ enum d40_command { | |||
| 69 | }; | 70 | }; |
| 70 | 71 | ||
| 71 | /* | 72 | /* |
| 73 | * enum d40_events - The different Event Enables for the event lines. | ||
| 74 | * | ||
| 75 | * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. | ||
| 76 | * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. | ||
| 77 | * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. | ||
| 78 | * @D40_ROUND_EVENTLINE: Status check for event line. | ||
| 79 | */ | ||
| 80 | |||
| 81 | enum d40_events { | ||
| 82 | D40_DEACTIVATE_EVENTLINE = 0, | ||
| 83 | D40_ACTIVATE_EVENTLINE = 1, | ||
| 84 | D40_SUSPEND_REQ_EVENTLINE = 2, | ||
| 85 | D40_ROUND_EVENTLINE = 3 | ||
| 86 | }; | ||
| 87 | |||
| 88 | /* | ||
| 72 | * These are the registers that has to be saved and later restored | 89 | * These are the registers that has to be saved and later restored |
| 73 | * when the DMA hw is powered off. | 90 | * when the DMA hw is powered off. |
| 74 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. | 91 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. |
| @@ -870,8 +887,8 @@ static void d40_save_restore_registers(struct d40_base *base, bool save) | |||
| 870 | } | 887 | } |
| 871 | #endif | 888 | #endif |
| 872 | 889 | ||
| 873 | static int d40_channel_execute_command(struct d40_chan *d40c, | 890 | static int __d40_execute_command_phy(struct d40_chan *d40c, |
| 874 | enum d40_command command) | 891 | enum d40_command command) |
| 875 | { | 892 | { |
| 876 | u32 status; | 893 | u32 status; |
| 877 | int i; | 894 | int i; |
| @@ -880,6 +897,12 @@ static int d40_channel_execute_command(struct d40_chan *d40c, | |||
| 880 | unsigned long flags; | 897 | unsigned long flags; |
| 881 | u32 wmask; | 898 | u32 wmask; |
| 882 | 899 | ||
| 900 | if (command == D40_DMA_STOP) { | ||
| 901 | ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); | ||
| 902 | if (ret) | ||
| 903 | return ret; | ||
| 904 | } | ||
| 905 | |||
| 883 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); | 906 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); |
| 884 | 907 | ||
| 885 | if (d40c->phy_chan->num % 2 == 0) | 908 | if (d40c->phy_chan->num % 2 == 0) |
| @@ -973,67 +996,109 @@ static void d40_term_all(struct d40_chan *d40c) | |||
| 973 | } | 996 | } |
| 974 | 997 | ||
| 975 | d40c->pending_tx = 0; | 998 | d40c->pending_tx = 0; |
| 976 | d40c->busy = false; | ||
| 977 | } | 999 | } |
| 978 | 1000 | ||
| 979 | static void __d40_config_set_event(struct d40_chan *d40c, bool enable, | 1001 | static void __d40_config_set_event(struct d40_chan *d40c, |
| 980 | u32 event, int reg) | 1002 | enum d40_events event_type, u32 event, |
| 1003 | int reg) | ||
| 981 | { | 1004 | { |
| 982 | void __iomem *addr = chan_base(d40c) + reg; | 1005 | void __iomem *addr = chan_base(d40c) + reg; |
| 983 | int tries; | 1006 | int tries; |
| 1007 | u32 status; | ||
| 1008 | |||
| 1009 | switch (event_type) { | ||
| 1010 | |||
| 1011 | case D40_DEACTIVATE_EVENTLINE: | ||
| 984 | 1012 | ||
| 985 | if (!enable) { | ||
| 986 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | 1013 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) |
| 987 | | ~D40_EVENTLINE_MASK(event), addr); | 1014 | | ~D40_EVENTLINE_MASK(event), addr); |
| 988 | return; | 1015 | break; |
| 989 | } | 1016 | |
| 1017 | case D40_SUSPEND_REQ_EVENTLINE: | ||
| 1018 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> | ||
| 1019 | D40_EVENTLINE_POS(event); | ||
| 1020 | |||
| 1021 | if (status == D40_DEACTIVATE_EVENTLINE || | ||
| 1022 | status == D40_SUSPEND_REQ_EVENTLINE) | ||
| 1023 | break; | ||
| 990 | 1024 | ||
| 1025 | writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) | ||
| 1026 | | ~D40_EVENTLINE_MASK(event), addr); | ||
| 1027 | |||
| 1028 | for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { | ||
| 1029 | |||
| 1030 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> | ||
| 1031 | D40_EVENTLINE_POS(event); | ||
| 1032 | |||
| 1033 | cpu_relax(); | ||
| 1034 | /* | ||
| 1035 | * Reduce the number of bus accesses while | ||
| 1036 | * waiting for the DMA to suspend. | ||
| 1037 | */ | ||
| 1038 | udelay(3); | ||
| 1039 | |||
| 1040 | if (status == D40_DEACTIVATE_EVENTLINE) | ||
| 1041 | break; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | if (tries == D40_SUSPEND_MAX_IT) { | ||
| 1045 | chan_err(d40c, | ||
| 1046 | "unable to stop the event_line chl %d (log: %d)" | ||
| 1047 | "status %x\n", d40c->phy_chan->num, | ||
| 1048 | d40c->log_num, status); | ||
| 1049 | } | ||
| 1050 | break; | ||
| 1051 | |||
| 1052 | case D40_ACTIVATE_EVENTLINE: | ||
| 991 | /* | 1053 | /* |
| 992 | * The hardware sometimes doesn't register the enable when src and dst | 1054 | * The hardware sometimes doesn't register the enable when src and dst |
| 993 | * event lines are active on the same logical channel. Retry to ensure | 1055 | * event lines are active on the same logical channel. Retry to ensure |
| 994 | * it does. Usually only one retry is sufficient. | 1056 | * it does. Usually only one retry is sufficient. |
| 995 | */ | 1057 | */ |
| 996 | tries = 100; | 1058 | tries = 100; |
| 997 | while (--tries) { | 1059 | while (--tries) { |
| 998 | writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | 1060 | writel((D40_ACTIVATE_EVENTLINE << |
| 999 | | ~D40_EVENTLINE_MASK(event), addr); | 1061 | D40_EVENTLINE_POS(event)) | |
| 1062 | ~D40_EVENTLINE_MASK(event), addr); | ||
| 1000 | 1063 | ||
| 1001 | if (readl(addr) & D40_EVENTLINE_MASK(event)) | 1064 | if (readl(addr) & D40_EVENTLINE_MASK(event)) |
| 1002 | break; | 1065 | break; |
| 1003 | } | 1066 | } |
| 1004 | 1067 | ||
| 1005 | if (tries != 99) | 1068 | if (tries != 99) |
| 1006 | dev_dbg(chan2dev(d40c), | 1069 | dev_dbg(chan2dev(d40c), |
| 1007 | "[%s] workaround enable S%cLNK (%d tries)\n", | 1070 | "[%s] workaround enable S%cLNK (%d tries)\n", |
| 1008 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | 1071 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', |
| 1009 | 100 - tries); | 1072 | 100 - tries); |
| 1010 | 1073 | ||
| 1011 | WARN_ON(!tries); | 1074 | WARN_ON(!tries); |
| 1012 | } | 1075 | break; |
| 1013 | 1076 | ||
| 1014 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | 1077 | case D40_ROUND_EVENTLINE: |
| 1015 | { | 1078 | BUG(); |
| 1016 | unsigned long flags; | 1079 | break; |
| 1017 | 1080 | ||
| 1018 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | 1081 | } |
| 1082 | } | ||
| 1019 | 1083 | ||
| 1084 | static void d40_config_set_event(struct d40_chan *d40c, | ||
| 1085 | enum d40_events event_type) | ||
| 1086 | { | ||
| 1020 | /* Enable event line connected to device (or memcpy) */ | 1087 | /* Enable event line connected to device (or memcpy) */ |
| 1021 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | 1088 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || |
| 1022 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | 1089 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { |
| 1023 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1090 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
| 1024 | 1091 | ||
| 1025 | __d40_config_set_event(d40c, do_enable, event, | 1092 | __d40_config_set_event(d40c, event_type, event, |
| 1026 | D40_CHAN_REG_SSLNK); | 1093 | D40_CHAN_REG_SSLNK); |
| 1027 | } | 1094 | } |
| 1028 | 1095 | ||
| 1029 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { | 1096 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { |
| 1030 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 1097 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
| 1031 | 1098 | ||
| 1032 | __d40_config_set_event(d40c, do_enable, event, | 1099 | __d40_config_set_event(d40c, event_type, event, |
| 1033 | D40_CHAN_REG_SDLNK); | 1100 | D40_CHAN_REG_SDLNK); |
| 1034 | } | 1101 | } |
| 1035 | |||
| 1036 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | ||
| 1037 | } | 1102 | } |
| 1038 | 1103 | ||
| 1039 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 1104 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
| @@ -1047,6 +1112,64 @@ static u32 d40_chan_has_events(struct d40_chan *d40c) | |||
| 1047 | return val; | 1112 | return val; |
| 1048 | } | 1113 | } |
| 1049 | 1114 | ||
| 1115 | static int | ||
| 1116 | __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) | ||
| 1117 | { | ||
| 1118 | unsigned long flags; | ||
| 1119 | int ret = 0; | ||
| 1120 | u32 active_status; | ||
| 1121 | void __iomem *active_reg; | ||
| 1122 | |||
| 1123 | if (d40c->phy_chan->num % 2 == 0) | ||
| 1124 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | ||
| 1125 | else | ||
| 1126 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | ||
| 1127 | |||
| 1128 | |||
| 1129 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | ||
| 1130 | |||
| 1131 | switch (command) { | ||
| 1132 | case D40_DMA_STOP: | ||
| 1133 | case D40_DMA_SUSPEND_REQ: | ||
| 1134 | |||
| 1135 | active_status = (readl(active_reg) & | ||
| 1136 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | ||
| 1137 | D40_CHAN_POS(d40c->phy_chan->num); | ||
| 1138 | |||
| 1139 | if (active_status == D40_DMA_RUN) | ||
| 1140 | d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); | ||
| 1141 | else | ||
| 1142 | d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); | ||
| 1143 | |||
| 1144 | if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) | ||
| 1145 | ret = __d40_execute_command_phy(d40c, command); | ||
| 1146 | |||
| 1147 | break; | ||
| 1148 | |||
| 1149 | case D40_DMA_RUN: | ||
| 1150 | |||
| 1151 | d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); | ||
| 1152 | ret = __d40_execute_command_phy(d40c, command); | ||
| 1153 | break; | ||
| 1154 | |||
| 1155 | case D40_DMA_SUSPENDED: | ||
| 1156 | BUG(); | ||
| 1157 | break; | ||
| 1158 | } | ||
| 1159 | |||
| 1160 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | ||
| 1161 | return ret; | ||
| 1162 | } | ||
| 1163 | |||
| 1164 | static int d40_channel_execute_command(struct d40_chan *d40c, | ||
| 1165 | enum d40_command command) | ||
| 1166 | { | ||
| 1167 | if (chan_is_logical(d40c)) | ||
| 1168 | return __d40_execute_command_log(d40c, command); | ||
| 1169 | else | ||
| 1170 | return __d40_execute_command_phy(d40c, command); | ||
| 1171 | } | ||
| 1172 | |||
| 1050 | static u32 d40_get_prmo(struct d40_chan *d40c) | 1173 | static u32 d40_get_prmo(struct d40_chan *d40c) |
| 1051 | { | 1174 | { |
| 1052 | static const unsigned int phy_map[] = { | 1175 | static const unsigned int phy_map[] = { |
| @@ -1149,15 +1272,7 @@ static int d40_pause(struct d40_chan *d40c) | |||
| 1149 | spin_lock_irqsave(&d40c->lock, flags); | 1272 | spin_lock_irqsave(&d40c->lock, flags); |
| 1150 | 1273 | ||
| 1151 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1274 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
| 1152 | if (res == 0) { | 1275 | |
| 1153 | if (chan_is_logical(d40c)) { | ||
| 1154 | d40_config_set_event(d40c, false); | ||
| 1155 | /* Resume the other logical channels if any */ | ||
| 1156 | if (d40_chan_has_events(d40c)) | ||
| 1157 | res = d40_channel_execute_command(d40c, | ||
| 1158 | D40_DMA_RUN); | ||
| 1159 | } | ||
| 1160 | } | ||
| 1161 | pm_runtime_mark_last_busy(d40c->base->dev); | 1276 | pm_runtime_mark_last_busy(d40c->base->dev); |
| 1162 | pm_runtime_put_autosuspend(d40c->base->dev); | 1277 | pm_runtime_put_autosuspend(d40c->base->dev); |
| 1163 | spin_unlock_irqrestore(&d40c->lock, flags); | 1278 | spin_unlock_irqrestore(&d40c->lock, flags); |
| @@ -1174,45 +1289,17 @@ static int d40_resume(struct d40_chan *d40c) | |||
| 1174 | 1289 | ||
| 1175 | spin_lock_irqsave(&d40c->lock, flags); | 1290 | spin_lock_irqsave(&d40c->lock, flags); |
| 1176 | pm_runtime_get_sync(d40c->base->dev); | 1291 | pm_runtime_get_sync(d40c->base->dev); |
| 1177 | if (d40c->base->rev == 0) | ||
| 1178 | if (chan_is_logical(d40c)) { | ||
| 1179 | res = d40_channel_execute_command(d40c, | ||
| 1180 | D40_DMA_SUSPEND_REQ); | ||
| 1181 | goto no_suspend; | ||
| 1182 | } | ||
| 1183 | 1292 | ||
| 1184 | /* If bytes left to transfer or linked tx resume job */ | 1293 | /* If bytes left to transfer or linked tx resume job */ |
| 1185 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | 1294 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) |
| 1186 | |||
| 1187 | if (chan_is_logical(d40c)) | ||
| 1188 | d40_config_set_event(d40c, true); | ||
| 1189 | |||
| 1190 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 1295 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
| 1191 | } | ||
| 1192 | 1296 | ||
| 1193 | no_suspend: | ||
| 1194 | pm_runtime_mark_last_busy(d40c->base->dev); | 1297 | pm_runtime_mark_last_busy(d40c->base->dev); |
| 1195 | pm_runtime_put_autosuspend(d40c->base->dev); | 1298 | pm_runtime_put_autosuspend(d40c->base->dev); |
| 1196 | spin_unlock_irqrestore(&d40c->lock, flags); | 1299 | spin_unlock_irqrestore(&d40c->lock, flags); |
| 1197 | return res; | 1300 | return res; |
| 1198 | } | 1301 | } |
| 1199 | 1302 | ||
| 1200 | static int d40_terminate_all(struct d40_chan *chan) | ||
| 1201 | { | ||
| 1202 | unsigned long flags; | ||
| 1203 | int ret = 0; | ||
| 1204 | |||
| 1205 | ret = d40_pause(chan); | ||
| 1206 | if (!ret && chan_is_physical(chan)) | ||
| 1207 | ret = d40_channel_execute_command(chan, D40_DMA_STOP); | ||
| 1208 | |||
| 1209 | spin_lock_irqsave(&chan->lock, flags); | ||
| 1210 | d40_term_all(chan); | ||
| 1211 | spin_unlock_irqrestore(&chan->lock, flags); | ||
| 1212 | |||
| 1213 | return ret; | ||
| 1214 | } | ||
| 1215 | |||
| 1216 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 1303 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
| 1217 | { | 1304 | { |
| 1218 | struct d40_chan *d40c = container_of(tx->chan, | 1305 | struct d40_chan *d40c = container_of(tx->chan, |
| @@ -1232,20 +1319,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 1232 | 1319 | ||
| 1233 | static int d40_start(struct d40_chan *d40c) | 1320 | static int d40_start(struct d40_chan *d40c) |
| 1234 | { | 1321 | { |
| 1235 | if (d40c->base->rev == 0) { | ||
| 1236 | int err; | ||
| 1237 | |||
| 1238 | if (chan_is_logical(d40c)) { | ||
| 1239 | err = d40_channel_execute_command(d40c, | ||
| 1240 | D40_DMA_SUSPEND_REQ); | ||
| 1241 | if (err) | ||
| 1242 | return err; | ||
| 1243 | } | ||
| 1244 | } | ||
| 1245 | |||
| 1246 | if (chan_is_logical(d40c)) | ||
| 1247 | d40_config_set_event(d40c, true); | ||
| 1248 | |||
| 1249 | return d40_channel_execute_command(d40c, D40_DMA_RUN); | 1322 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
| 1250 | } | 1323 | } |
| 1251 | 1324 | ||
| @@ -1258,10 +1331,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
| 1258 | d40d = d40_first_queued(d40c); | 1331 | d40d = d40_first_queued(d40c); |
| 1259 | 1332 | ||
| 1260 | if (d40d != NULL) { | 1333 | if (d40d != NULL) { |
| 1261 | if (!d40c->busy) | 1334 | if (!d40c->busy) { |
| 1262 | d40c->busy = true; | 1335 | d40c->busy = true; |
| 1263 | 1336 | pm_runtime_get_sync(d40c->base->dev); | |
| 1264 | pm_runtime_get_sync(d40c->base->dev); | 1337 | } |
| 1265 | 1338 | ||
| 1266 | /* Remove from queue */ | 1339 | /* Remove from queue */ |
| 1267 | d40_desc_remove(d40d); | 1340 | d40_desc_remove(d40d); |
| @@ -1388,8 +1461,8 @@ static void dma_tasklet(unsigned long data) | |||
| 1388 | 1461 | ||
| 1389 | return; | 1462 | return; |
| 1390 | 1463 | ||
| 1391 | err: | 1464 | err: |
| 1392 | /* Rescue manoeuvre if receiving double interrupts */ | 1465 | /* Rescue manouver if receiving double interrupts */ |
| 1393 | if (d40c->pending_tx > 0) | 1466 | if (d40c->pending_tx > 0) |
| 1394 | d40c->pending_tx--; | 1467 | d40c->pending_tx--; |
| 1395 | spin_unlock_irqrestore(&d40c->lock, flags); | 1468 | spin_unlock_irqrestore(&d40c->lock, flags); |
| @@ -1770,7 +1843,6 @@ static int d40_config_memcpy(struct d40_chan *d40c) | |||
| 1770 | return 0; | 1843 | return 0; |
| 1771 | } | 1844 | } |
| 1772 | 1845 | ||
| 1773 | |||
| 1774 | static int d40_free_dma(struct d40_chan *d40c) | 1846 | static int d40_free_dma(struct d40_chan *d40c) |
| 1775 | { | 1847 | { |
| 1776 | 1848 | ||
| @@ -1806,43 +1878,18 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
| 1806 | } | 1878 | } |
| 1807 | 1879 | ||
| 1808 | pm_runtime_get_sync(d40c->base->dev); | 1880 | pm_runtime_get_sync(d40c->base->dev); |
| 1809 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1881 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
| 1810 | if (res) { | 1882 | if (res) { |
| 1811 | chan_err(d40c, "suspend failed\n"); | 1883 | chan_err(d40c, "stop failed\n"); |
| 1812 | goto out; | 1884 | goto out; |
| 1813 | } | 1885 | } |
| 1814 | 1886 | ||
| 1815 | if (chan_is_logical(d40c)) { | 1887 | d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); |
| 1816 | /* Release logical channel, deactivate the event line */ | ||
| 1817 | 1888 | ||
| 1818 | d40_config_set_event(d40c, false); | 1889 | if (chan_is_logical(d40c)) |
| 1819 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; | 1890 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; |
| 1820 | 1891 | else | |
| 1821 | /* | 1892 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
| 1822 | * Check if there are more logical allocation | ||
| 1823 | * on this phy channel. | ||
| 1824 | */ | ||
| 1825 | if (!d40_alloc_mask_free(phy, is_src, event)) { | ||
| 1826 | /* Resume the other logical channels if any */ | ||
| 1827 | if (d40_chan_has_events(d40c)) { | ||
| 1828 | res = d40_channel_execute_command(d40c, | ||
| 1829 | D40_DMA_RUN); | ||
| 1830 | if (res) | ||
| 1831 | chan_err(d40c, | ||
| 1832 | "Executing RUN command\n"); | ||
| 1833 | } | ||
| 1834 | goto out; | ||
| 1835 | } | ||
| 1836 | } else { | ||
| 1837 | (void) d40_alloc_mask_free(phy, is_src, 0); | ||
| 1838 | } | ||
| 1839 | |||
| 1840 | /* Release physical channel */ | ||
| 1841 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | ||
| 1842 | if (res) { | ||
| 1843 | chan_err(d40c, "Failed to stop channel\n"); | ||
| 1844 | goto out; | ||
| 1845 | } | ||
| 1846 | 1893 | ||
| 1847 | if (d40c->busy) { | 1894 | if (d40c->busy) { |
| 1848 | pm_runtime_mark_last_busy(d40c->base->dev); | 1895 | pm_runtime_mark_last_busy(d40c->base->dev); |
| @@ -1852,7 +1899,6 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
| 1852 | d40c->busy = false; | 1899 | d40c->busy = false; |
| 1853 | d40c->phy_chan = NULL; | 1900 | d40c->phy_chan = NULL; |
| 1854 | d40c->configured = false; | 1901 | d40c->configured = false; |
| 1855 | d40c->base->lookup_phy_chans[phy->num] = NULL; | ||
| 1856 | out: | 1902 | out: |
| 1857 | 1903 | ||
| 1858 | pm_runtime_mark_last_busy(d40c->base->dev); | 1904 | pm_runtime_mark_last_busy(d40c->base->dev); |
| @@ -2070,7 +2116,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
| 2070 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) | 2116 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) |
| 2071 | desc->cyclic = true; | 2117 | desc->cyclic = true; |
| 2072 | 2118 | ||
| 2073 | if (direction != DMA_NONE) { | 2119 | if (direction != DMA_TRANS_NONE) { |
| 2074 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | 2120 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); |
| 2075 | 2121 | ||
| 2076 | if (direction == DMA_DEV_TO_MEM) | 2122 | if (direction == DMA_DEV_TO_MEM) |
| @@ -2371,6 +2417,31 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
| 2371 | spin_unlock_irqrestore(&d40c->lock, flags); | 2417 | spin_unlock_irqrestore(&d40c->lock, flags); |
| 2372 | } | 2418 | } |
| 2373 | 2419 | ||
| 2420 | static void d40_terminate_all(struct dma_chan *chan) | ||
| 2421 | { | ||
| 2422 | unsigned long flags; | ||
| 2423 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | ||
| 2424 | int ret; | ||
| 2425 | |||
| 2426 | spin_lock_irqsave(&d40c->lock, flags); | ||
| 2427 | |||
| 2428 | pm_runtime_get_sync(d40c->base->dev); | ||
| 2429 | ret = d40_channel_execute_command(d40c, D40_DMA_STOP); | ||
| 2430 | if (ret) | ||
| 2431 | chan_err(d40c, "Failed to stop channel\n"); | ||
| 2432 | |||
| 2433 | d40_term_all(d40c); | ||
| 2434 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
| 2435 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
| 2436 | if (d40c->busy) { | ||
| 2437 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
| 2438 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
| 2439 | } | ||
| 2440 | d40c->busy = false; | ||
| 2441 | |||
| 2442 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
| 2443 | } | ||
| 2444 | |||
| 2374 | static int | 2445 | static int |
| 2375 | dma40_config_to_halfchannel(struct d40_chan *d40c, | 2446 | dma40_config_to_halfchannel(struct d40_chan *d40c, |
| 2376 | struct stedma40_half_channel_info *info, | 2447 | struct stedma40_half_channel_info *info, |
| @@ -2551,7 +2622,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
| 2551 | 2622 | ||
| 2552 | switch (cmd) { | 2623 | switch (cmd) { |
| 2553 | case DMA_TERMINATE_ALL: | 2624 | case DMA_TERMINATE_ALL: |
| 2554 | return d40_terminate_all(d40c); | 2625 | d40_terminate_all(chan); |
| 2626 | return 0; | ||
| 2555 | case DMA_PAUSE: | 2627 | case DMA_PAUSE: |
| 2556 | return d40_pause(d40c); | 2628 | return d40_pause(d40c); |
| 2557 | case DMA_RESUME: | 2629 | case DMA_RESUME: |
| @@ -2908,6 +2980,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2908 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 2980 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", |
| 2909 | rev, res->start); | 2981 | rev, res->start); |
| 2910 | 2982 | ||
| 2983 | if (rev < 2) { | ||
| 2984 | d40_err(&pdev->dev, "hardware revision: %d is not supported", | ||
| 2985 | rev); | ||
| 2986 | goto failure; | ||
| 2987 | } | ||
| 2988 | |||
| 2911 | plat_data = pdev->dev.platform_data; | 2989 | plat_data = pdev->dev.platform_data; |
| 2912 | 2990 | ||
| 2913 | /* Count the number of logical channels in use */ | 2991 | /* Count the number of logical channels in use */ |
| @@ -2998,6 +3076,7 @@ failure: | |||
| 2998 | 3076 | ||
| 2999 | if (base) { | 3077 | if (base) { |
| 3000 | kfree(base->lcla_pool.alloc_map); | 3078 | kfree(base->lcla_pool.alloc_map); |
| 3079 | kfree(base->reg_val_backup_chan); | ||
| 3001 | kfree(base->lookup_log_chans); | 3080 | kfree(base->lookup_log_chans); |
| 3002 | kfree(base->lookup_phy_chans); | 3081 | kfree(base->lookup_phy_chans); |
| 3003 | kfree(base->phy_res); | 3082 | kfree(base->phy_res); |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 8d3d490968a3..51e8e5396e9b 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
| @@ -62,8 +62,6 @@ | |||
| 62 | #define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS) | 62 | #define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS) |
| 63 | 63 | ||
| 64 | /* Link register */ | 64 | /* Link register */ |
| 65 | #define D40_DEACTIVATE_EVENTLINE 0x0 | ||
| 66 | #define D40_ACTIVATE_EVENTLINE 0x1 | ||
| 67 | #define D40_EVENTLINE_POS(i) (2 * i) | 65 | #define D40_EVENTLINE_POS(i) (2 * i) |
| 68 | #define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i)) | 66 | #define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i)) |
| 69 | 67 | ||
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index d25599f2a3f8..891e4674d29b 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c | |||
| @@ -191,6 +191,176 @@ utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len) | |||
| 191 | } | 191 | } |
| 192 | } | 192 | } |
| 193 | 193 | ||
| 194 | static bool | ||
| 195 | validate_device_path(struct efi_variable *var, int match, u8 *buffer, int len) | ||
| 196 | { | ||
| 197 | struct efi_generic_dev_path *node; | ||
| 198 | int offset = 0; | ||
| 199 | |||
| 200 | node = (struct efi_generic_dev_path *)buffer; | ||
| 201 | |||
| 202 | while (offset < len) { | ||
| 203 | offset += node->length; | ||
| 204 | |||
| 205 | if (offset > len) | ||
| 206 | return false; | ||
| 207 | |||
| 208 | if ((node->type == EFI_DEV_END_PATH || | ||
| 209 | node->type == EFI_DEV_END_PATH2) && | ||
| 210 | node->sub_type == EFI_DEV_END_ENTIRE) | ||
| 211 | return true; | ||
| 212 | |||
| 213 | node = (struct efi_generic_dev_path *)(buffer + offset); | ||
| 214 | } | ||
| 215 | |||
| 216 | /* | ||
| 217 | * If we're here then either node->length pointed past the end | ||
| 218 | * of the buffer or we reached the end of the buffer without | ||
| 219 | * finding a device path end node. | ||
| 220 | */ | ||
| 221 | return false; | ||
| 222 | } | ||
| 223 | |||
| 224 | static bool | ||
| 225 | validate_boot_order(struct efi_variable *var, int match, u8 *buffer, int len) | ||
| 226 | { | ||
| 227 | /* An array of 16-bit integers */ | ||
| 228 | if ((len % 2) != 0) | ||
| 229 | return false; | ||
| 230 | |||
| 231 | return true; | ||
| 232 | } | ||
| 233 | |||
| 234 | static bool | ||
| 235 | validate_load_option(struct efi_variable *var, int match, u8 *buffer, int len) | ||
| 236 | { | ||
| 237 | u16 filepathlength; | ||
| 238 | int i, desclength = 0; | ||
| 239 | |||
| 240 | /* Either "Boot" or "Driver" followed by four digits of hex */ | ||
| 241 | for (i = match; i < match+4; i++) { | ||
| 242 | if (hex_to_bin(var->VariableName[i] & 0xff) < 0) | ||
| 243 | return true; | ||
| 244 | } | ||
| 245 | |||
| 246 | /* A valid entry must be at least 6 bytes */ | ||
| 247 | if (len < 6) | ||
| 248 | return false; | ||
| 249 | |||
| 250 | filepathlength = buffer[4] | buffer[5] << 8; | ||
| 251 | |||
| 252 | /* | ||
| 253 | * There's no stored length for the description, so it has to be | ||
| 254 | * found by hand | ||
| 255 | */ | ||
| 256 | desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len) + 2; | ||
| 257 | |||
| 258 | /* Each boot entry must have a descriptor */ | ||
| 259 | if (!desclength) | ||
| 260 | return false; | ||
| 261 | |||
| 262 | /* | ||
| 263 | * If the sum of the length of the description, the claimed filepath | ||
| 264 | * length and the original header are greater than the length of the | ||
| 265 | * variable, it's malformed | ||
| 266 | */ | ||
| 267 | if ((desclength + filepathlength + 6) > len) | ||
| 268 | return false; | ||
| 269 | |||
| 270 | /* | ||
| 271 | * And, finally, check the filepath | ||
| 272 | */ | ||
| 273 | return validate_device_path(var, match, buffer + desclength + 6, | ||
| 274 | filepathlength); | ||
| 275 | } | ||
| 276 | |||
| 277 | static bool | ||
| 278 | validate_uint16(struct efi_variable *var, int match, u8 *buffer, int len) | ||
| 279 | { | ||
| 280 | /* A single 16-bit integer */ | ||
| 281 | if (len != 2) | ||
| 282 | return false; | ||
| 283 | |||
| 284 | return true; | ||
| 285 | } | ||
| 286 | |||
| 287 | static bool | ||
| 288 | validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, int len) | ||
| 289 | { | ||
| 290 | int i; | ||
| 291 | |||
| 292 | for (i = 0; i < len; i++) { | ||
| 293 | if (buffer[i] > 127) | ||
| 294 | return false; | ||
| 295 | |||
| 296 | if (buffer[i] == 0) | ||
| 297 | return true; | ||
| 298 | } | ||
| 299 | |||
| 300 | return false; | ||
| 301 | } | ||
| 302 | |||
| 303 | struct variable_validate { | ||
| 304 | char *name; | ||
| 305 | bool (*validate)(struct efi_variable *var, int match, u8 *data, | ||
| 306 | int len); | ||
| 307 | }; | ||
| 308 | |||
| 309 | static const struct variable_validate variable_validate[] = { | ||
| 310 | { "BootNext", validate_uint16 }, | ||
| 311 | { "BootOrder", validate_boot_order }, | ||
| 312 | { "DriverOrder", validate_boot_order }, | ||
| 313 | { "Boot*", validate_load_option }, | ||
| 314 | { "Driver*", validate_load_option }, | ||
| 315 | { "ConIn", validate_device_path }, | ||
| 316 | { "ConInDev", validate_device_path }, | ||
| 317 | { "ConOut", validate_device_path }, | ||
| 318 | { "ConOutDev", validate_device_path }, | ||
| 319 | { "ErrOut", validate_device_path }, | ||
| 320 | { "ErrOutDev", validate_device_path }, | ||
| 321 | { "Timeout", validate_uint16 }, | ||
| 322 | { "Lang", validate_ascii_string }, | ||
| 323 | { "PlatformLang", validate_ascii_string }, | ||
| 324 | { "", NULL }, | ||
| 325 | }; | ||
| 326 | |||
| 327 | static bool | ||
| 328 | validate_var(struct efi_variable *var, u8 *data, int len) | ||
| 329 | { | ||
| 330 | int i; | ||
| 331 | u16 *unicode_name = var->VariableName; | ||
| 332 | |||
| 333 | for (i = 0; variable_validate[i].validate != NULL; i++) { | ||
| 334 | const char *name = variable_validate[i].name; | ||
| 335 | int match; | ||
| 336 | |||
| 337 | for (match = 0; ; match++) { | ||
| 338 | char c = name[match]; | ||
| 339 | u16 u = unicode_name[match]; | ||
| 340 | |||
| 341 | /* All special variables are plain ascii */ | ||
| 342 | if (u > 127) | ||
| 343 | return true; | ||
| 344 | |||
| 345 | /* Wildcard in the matching name means we've matched */ | ||
| 346 | if (c == '*') | ||
| 347 | return variable_validate[i].validate(var, | ||
| 348 | match, data, len); | ||
| 349 | |||
| 350 | /* Case sensitive match */ | ||
| 351 | if (c != u) | ||
| 352 | break; | ||
| 353 | |||
| 354 | /* Reached the end of the string while matching */ | ||
| 355 | if (!c) | ||
| 356 | return variable_validate[i].validate(var, | ||
| 357 | match, data, len); | ||
| 358 | } | ||
| 359 | } | ||
| 360 | |||
| 361 | return true; | ||
| 362 | } | ||
| 363 | |||
| 194 | static efi_status_t | 364 | static efi_status_t |
| 195 | get_var_data_locked(struct efivars *efivars, struct efi_variable *var) | 365 | get_var_data_locked(struct efivars *efivars, struct efi_variable *var) |
| 196 | { | 366 | { |
| @@ -324,6 +494,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) | |||
| 324 | return -EINVAL; | 494 | return -EINVAL; |
| 325 | } | 495 | } |
| 326 | 496 | ||
| 497 | if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || | ||
| 498 | validate_var(new_var, new_var->Data, new_var->DataSize) == false) { | ||
| 499 | printk(KERN_ERR "efivars: Malformed variable content\n"); | ||
| 500 | return -EINVAL; | ||
| 501 | } | ||
| 502 | |||
| 327 | spin_lock(&efivars->lock); | 503 | spin_lock(&efivars->lock); |
| 328 | status = efivars->ops->set_variable(new_var->VariableName, | 504 | status = efivars->ops->set_variable(new_var->VariableName, |
| 329 | &new_var->VendorGuid, | 505 | &new_var->VendorGuid, |
| @@ -626,6 +802,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, | |||
| 626 | if (!capable(CAP_SYS_ADMIN)) | 802 | if (!capable(CAP_SYS_ADMIN)) |
| 627 | return -EACCES; | 803 | return -EACCES; |
| 628 | 804 | ||
| 805 | if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || | ||
| 806 | validate_var(new_var, new_var->Data, new_var->DataSize) == false) { | ||
| 807 | printk(KERN_ERR "efivars: Malformed variable content\n"); | ||
| 808 | return -EINVAL; | ||
| 809 | } | ||
| 810 | |||
| 629 | spin_lock(&efivars->lock); | 811 | spin_lock(&efivars->lock); |
| 630 | 812 | ||
| 631 | /* | 813 | /* |
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 5689ce62fd81..fc3ace3fd4cb 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c | |||
| @@ -64,6 +64,7 @@ struct pxa_gpio_chip { | |||
| 64 | unsigned long irq_mask; | 64 | unsigned long irq_mask; |
| 65 | unsigned long irq_edge_rise; | 65 | unsigned long irq_edge_rise; |
| 66 | unsigned long irq_edge_fall; | 66 | unsigned long irq_edge_fall; |
| 67 | int (*set_wake)(unsigned int gpio, unsigned int on); | ||
| 67 | 68 | ||
| 68 | #ifdef CONFIG_PM | 69 | #ifdef CONFIG_PM |
| 69 | unsigned long saved_gplr; | 70 | unsigned long saved_gplr; |
| @@ -269,7 +270,8 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
| 269 | (value ? GPSR_OFFSET : GPCR_OFFSET)); | 270 | (value ? GPSR_OFFSET : GPCR_OFFSET)); |
| 270 | } | 271 | } |
| 271 | 272 | ||
| 272 | static int __devinit pxa_init_gpio_chip(int gpio_end) | 273 | static int __devinit pxa_init_gpio_chip(int gpio_end, |
| 274 | int (*set_wake)(unsigned int, unsigned int)) | ||
| 273 | { | 275 | { |
| 274 | int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1; | 276 | int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1; |
| 275 | struct pxa_gpio_chip *chips; | 277 | struct pxa_gpio_chip *chips; |
| @@ -285,6 +287,7 @@ static int __devinit pxa_init_gpio_chip(int gpio_end) | |||
| 285 | 287 | ||
| 286 | sprintf(chips[i].label, "gpio-%d", i); | 288 | sprintf(chips[i].label, "gpio-%d", i); |
| 287 | chips[i].regbase = gpio_reg_base + BANK_OFF(i); | 289 | chips[i].regbase = gpio_reg_base + BANK_OFF(i); |
| 290 | chips[i].set_wake = set_wake; | ||
| 288 | 291 | ||
| 289 | c->base = gpio; | 292 | c->base = gpio; |
| 290 | c->label = chips[i].label; | 293 | c->label = chips[i].label; |
| @@ -412,6 +415,17 @@ static void pxa_mask_muxed_gpio(struct irq_data *d) | |||
| 412 | writel_relaxed(gfer, c->regbase + GFER_OFFSET); | 415 | writel_relaxed(gfer, c->regbase + GFER_OFFSET); |
| 413 | } | 416 | } |
| 414 | 417 | ||
| 418 | static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on) | ||
| 419 | { | ||
| 420 | int gpio = pxa_irq_to_gpio(d->irq); | ||
| 421 | struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); | ||
| 422 | |||
| 423 | if (c->set_wake) | ||
| 424 | return c->set_wake(gpio, on); | ||
| 425 | else | ||
| 426 | return 0; | ||
| 427 | } | ||
| 428 | |||
| 415 | static void pxa_unmask_muxed_gpio(struct irq_data *d) | 429 | static void pxa_unmask_muxed_gpio(struct irq_data *d) |
| 416 | { | 430 | { |
| 417 | int gpio = pxa_irq_to_gpio(d->irq); | 431 | int gpio = pxa_irq_to_gpio(d->irq); |
| @@ -427,6 +441,7 @@ static struct irq_chip pxa_muxed_gpio_chip = { | |||
| 427 | .irq_mask = pxa_mask_muxed_gpio, | 441 | .irq_mask = pxa_mask_muxed_gpio, |
| 428 | .irq_unmask = pxa_unmask_muxed_gpio, | 442 | .irq_unmask = pxa_unmask_muxed_gpio, |
| 429 | .irq_set_type = pxa_gpio_irq_type, | 443 | .irq_set_type = pxa_gpio_irq_type, |
| 444 | .irq_set_wake = pxa_gpio_set_wake, | ||
| 430 | }; | 445 | }; |
| 431 | 446 | ||
| 432 | static int pxa_gpio_nums(void) | 447 | static int pxa_gpio_nums(void) |
| @@ -471,6 +486,7 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev) | |||
| 471 | struct pxa_gpio_chip *c; | 486 | struct pxa_gpio_chip *c; |
| 472 | struct resource *res; | 487 | struct resource *res; |
| 473 | struct clk *clk; | 488 | struct clk *clk; |
| 489 | struct pxa_gpio_platform_data *info; | ||
| 474 | int gpio, irq, ret; | 490 | int gpio, irq, ret; |
| 475 | int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0; | 491 | int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0; |
| 476 | 492 | ||
| @@ -516,7 +532,8 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev) | |||
| 516 | } | 532 | } |
| 517 | 533 | ||
| 518 | /* Initialize GPIO chips */ | 534 | /* Initialize GPIO chips */ |
| 519 | pxa_init_gpio_chip(pxa_last_gpio); | 535 | info = dev_get_platdata(&pdev->dev); |
| 536 | pxa_init_gpio_chip(pxa_last_gpio, info ? info->gpio_set_wake : NULL); | ||
| 520 | 537 | ||
| 521 | /* clear all GPIO edge detects */ | 538 | /* clear all GPIO edge detects */ |
| 522 | for_each_gpio_chip(gpio, c) { | 539 | for_each_gpio_chip(gpio, c) { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 392ce71ed6a1..1dffa8359f88 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
| @@ -149,22 +149,12 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, | |||
| 149 | unsigned long pfn; | 149 | unsigned long pfn; |
| 150 | 150 | ||
| 151 | if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { | 151 | if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { |
| 152 | unsigned long usize = buf->size; | ||
| 153 | |||
| 154 | if (!buf->pages) | 152 | if (!buf->pages) |
| 155 | return -EINTR; | 153 | return -EINTR; |
| 156 | 154 | ||
| 157 | while (usize > 0) { | 155 | pfn = page_to_pfn(buf->pages[page_offset++]); |
| 158 | pfn = page_to_pfn(buf->pages[page_offset++]); | 156 | } else |
| 159 | vm_insert_mixed(vma, f_vaddr, pfn); | 157 | pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset; |
| 160 | f_vaddr += PAGE_SIZE; | ||
| 161 | usize -= PAGE_SIZE; | ||
| 162 | } | ||
| 163 | |||
| 164 | return 0; | ||
| 165 | } | ||
| 166 | |||
| 167 | pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset; | ||
| 168 | 158 | ||
| 169 | return vm_insert_mixed(vma, f_vaddr, pfn); | 159 | return vm_insert_mixed(vma, f_vaddr, pfn); |
| 170 | } | 160 | } |
| @@ -524,6 +514,8 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, | |||
| 524 | if (!buffer->pages) | 514 | if (!buffer->pages) |
| 525 | return -EINVAL; | 515 | return -EINVAL; |
| 526 | 516 | ||
| 517 | vma->vm_flags |= VM_MIXEDMAP; | ||
| 518 | |||
| 527 | do { | 519 | do { |
| 528 | ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); | 520 | ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); |
| 529 | if (ret) { | 521 | if (ret) { |
| @@ -710,7 +702,6 @@ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, | |||
| 710 | int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 702 | int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 711 | { | 703 | { |
| 712 | struct drm_gem_object *obj = vma->vm_private_data; | 704 | struct drm_gem_object *obj = vma->vm_private_data; |
| 713 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | ||
| 714 | struct drm_device *dev = obj->dev; | 705 | struct drm_device *dev = obj->dev; |
| 715 | unsigned long f_vaddr; | 706 | unsigned long f_vaddr; |
| 716 | pgoff_t page_offset; | 707 | pgoff_t page_offset; |
| @@ -722,21 +713,10 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 722 | 713 | ||
| 723 | mutex_lock(&dev->struct_mutex); | 714 | mutex_lock(&dev->struct_mutex); |
| 724 | 715 | ||
| 725 | /* | ||
| 726 | * allocate all pages as desired size if user wants to allocate | ||
| 727 | * physically non-continuous memory. | ||
| 728 | */ | ||
| 729 | if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { | ||
| 730 | ret = exynos_drm_gem_get_pages(obj); | ||
| 731 | if (ret < 0) | ||
| 732 | goto err; | ||
| 733 | } | ||
| 734 | |||
| 735 | ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); | 716 | ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); |
| 736 | if (ret < 0) | 717 | if (ret < 0) |
| 737 | DRM_ERROR("failed to map pages.\n"); | 718 | DRM_ERROR("failed to map pages.\n"); |
| 738 | 719 | ||
| 739 | err: | ||
| 740 | mutex_unlock(&dev->struct_mutex); | 720 | mutex_unlock(&dev->struct_mutex); |
| 741 | 721 | ||
| 742 | return convert_to_vm_err_msg(ret); | 722 | return convert_to_vm_err_msg(ret); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index f51a696486cb..de431942ded4 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -1133,6 +1133,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 1133 | return -EINVAL; | 1133 | return -EINVAL; |
| 1134 | } | 1134 | } |
| 1135 | 1135 | ||
| 1136 | if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { | ||
| 1137 | DRM_DEBUG("execbuf with %u cliprects\n", | ||
| 1138 | args->num_cliprects); | ||
| 1139 | return -EINVAL; | ||
| 1140 | } | ||
| 1136 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), | 1141 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), |
| 1137 | GFP_KERNEL); | 1142 | GFP_KERNEL); |
| 1138 | if (cliprects == NULL) { | 1143 | if (cliprects == NULL) { |
| @@ -1404,7 +1409,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
| 1404 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 1409 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
| 1405 | int ret; | 1410 | int ret; |
| 1406 | 1411 | ||
| 1407 | if (args->buffer_count < 1) { | 1412 | if (args->buffer_count < 1 || |
| 1413 | args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { | ||
| 1408 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); | 1414 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); |
| 1409 | return -EINVAL; | 1415 | return -EINVAL; |
| 1410 | } | 1416 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b4bb1ef77ddc..9d24d65f0c3e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -568,6 +568,7 @@ | |||
| 568 | #define CM0_MASK_SHIFT 16 | 568 | #define CM0_MASK_SHIFT 16 |
| 569 | #define CM0_IZ_OPT_DISABLE (1<<6) | 569 | #define CM0_IZ_OPT_DISABLE (1<<6) |
| 570 | #define CM0_ZR_OPT_DISABLE (1<<5) | 570 | #define CM0_ZR_OPT_DISABLE (1<<5) |
| 571 | #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) | ||
| 571 | #define CM0_DEPTH_EVICT_DISABLE (1<<4) | 572 | #define CM0_DEPTH_EVICT_DISABLE (1<<4) |
| 572 | #define CM0_COLOR_EVICT_DISABLE (1<<3) | 573 | #define CM0_COLOR_EVICT_DISABLE (1<<3) |
| 573 | #define CM0_DEPTH_WRITE_DISABLE (1<<1) | 574 | #define CM0_DEPTH_WRITE_DISABLE (1<<1) |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 4d3d736a4f56..90b9793fd5da 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -430,8 +430,8 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
| 430 | { | 430 | { |
| 431 | struct drm_device *dev = connector->dev; | 431 | struct drm_device *dev = connector->dev; |
| 432 | struct intel_crt *crt = intel_attached_crt(connector); | 432 | struct intel_crt *crt = intel_attached_crt(connector); |
| 433 | struct drm_crtc *crtc; | ||
| 434 | enum drm_connector_status status; | 433 | enum drm_connector_status status; |
| 434 | struct intel_load_detect_pipe tmp; | ||
| 435 | 435 | ||
| 436 | if (I915_HAS_HOTPLUG(dev)) { | 436 | if (I915_HAS_HOTPLUG(dev)) { |
| 437 | if (intel_crt_detect_hotplug(connector)) { | 437 | if (intel_crt_detect_hotplug(connector)) { |
| @@ -450,23 +450,16 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
| 450 | return connector->status; | 450 | return connector->status; |
| 451 | 451 | ||
| 452 | /* for pre-945g platforms use load detect */ | 452 | /* for pre-945g platforms use load detect */ |
| 453 | crtc = crt->base.base.crtc; | 453 | if (intel_get_load_detect_pipe(&crt->base, connector, NULL, |
| 454 | if (crtc && crtc->enabled) { | 454 | &tmp)) { |
| 455 | status = intel_crt_load_detect(crt); | 455 | if (intel_crt_detect_ddc(connector)) |
| 456 | } else { | 456 | status = connector_status_connected; |
| 457 | struct intel_load_detect_pipe tmp; | 457 | else |
| 458 | 458 | status = intel_crt_load_detect(crt); | |
| 459 | if (intel_get_load_detect_pipe(&crt->base, connector, NULL, | 459 | intel_release_load_detect_pipe(&crt->base, connector, |
| 460 | &tmp)) { | 460 | &tmp); |
| 461 | if (intel_crt_detect_ddc(connector)) | 461 | } else |
| 462 | status = connector_status_connected; | 462 | status = connector_status_unknown; |
| 463 | else | ||
| 464 | status = intel_crt_load_detect(crt); | ||
| 465 | intel_release_load_detect_pipe(&crt->base, connector, | ||
| 466 | &tmp); | ||
| 467 | } else | ||
| 468 | status = connector_status_unknown; | ||
| 469 | } | ||
| 470 | 463 | ||
| 471 | return status; | 464 | return status; |
| 472 | } | 465 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f75806e5bff5..80fce51e2f43 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -401,6 +401,14 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
| 401 | if (INTEL_INFO(dev)->gen >= 6) { | 401 | if (INTEL_INFO(dev)->gen >= 6) { |
| 402 | I915_WRITE(INSTPM, | 402 | I915_WRITE(INSTPM, |
| 403 | INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING); | 403 | INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING); |
| 404 | |||
| 405 | /* From the Sandybridge PRM, volume 1 part 3, page 24: | ||
| 406 | * "If this bit is set, STCunit will have LRA as replacement | ||
| 407 | * policy. [...] This bit must be reset. LRA replacement | ||
| 408 | * policy is not supported." | ||
| 409 | */ | ||
| 410 | I915_WRITE(CACHE_MODE_0, | ||
| 411 | CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); | ||
| 404 | } | 412 | } |
| 405 | 413 | ||
| 406 | return ret; | 414 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e36b171c1e7d..232d77d07d8b 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -731,6 +731,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, | |||
| 731 | uint16_t width, height; | 731 | uint16_t width, height; |
| 732 | uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; | 732 | uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; |
| 733 | uint16_t h_sync_offset, v_sync_offset; | 733 | uint16_t h_sync_offset, v_sync_offset; |
| 734 | int mode_clock; | ||
| 734 | 735 | ||
| 735 | width = mode->crtc_hdisplay; | 736 | width = mode->crtc_hdisplay; |
| 736 | height = mode->crtc_vdisplay; | 737 | height = mode->crtc_vdisplay; |
| @@ -745,7 +746,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, | |||
| 745 | h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; | 746 | h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; |
| 746 | v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; | 747 | v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; |
| 747 | 748 | ||
| 748 | dtd->part1.clock = mode->clock / 10; | 749 | mode_clock = mode->clock; |
| 750 | mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1; | ||
| 751 | mode_clock /= 10; | ||
| 752 | dtd->part1.clock = mode_clock; | ||
| 753 | |||
| 749 | dtd->part1.h_active = width & 0xff; | 754 | dtd->part1.h_active = width & 0xff; |
| 750 | dtd->part1.h_blank = h_blank_len & 0xff; | 755 | dtd->part1.h_blank = h_blank_len & 0xff; |
| 751 | dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | | 756 | dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | |
| @@ -996,7 +1001,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
| 996 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); | 1001 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); |
| 997 | u32 sdvox; | 1002 | u32 sdvox; |
| 998 | struct intel_sdvo_in_out_map in_out; | 1003 | struct intel_sdvo_in_out_map in_out; |
| 999 | struct intel_sdvo_dtd input_dtd; | 1004 | struct intel_sdvo_dtd input_dtd, output_dtd; |
| 1000 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | 1005 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
| 1001 | int rate; | 1006 | int rate; |
| 1002 | 1007 | ||
| @@ -1021,20 +1026,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
| 1021 | intel_sdvo->attached_output)) | 1026 | intel_sdvo->attached_output)) |
| 1022 | return; | 1027 | return; |
| 1023 | 1028 | ||
| 1024 | /* We have tried to get input timing in mode_fixup, and filled into | 1029 | /* lvds has a special fixed output timing. */ |
| 1025 | * adjusted_mode. | 1030 | if (intel_sdvo->is_lvds) |
| 1026 | */ | 1031 | intel_sdvo_get_dtd_from_mode(&output_dtd, |
| 1027 | if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { | 1032 | intel_sdvo->sdvo_lvds_fixed_mode); |
| 1028 | input_dtd = intel_sdvo->input_dtd; | 1033 | else |
| 1029 | } else { | 1034 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); |
| 1030 | /* Set the output timing to the screen */ | 1035 | (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd); |
| 1031 | if (!intel_sdvo_set_target_output(intel_sdvo, | ||
| 1032 | intel_sdvo->attached_output)) | ||
| 1033 | return; | ||
| 1034 | |||
| 1035 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); | ||
| 1036 | (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); | ||
| 1037 | } | ||
| 1038 | 1036 | ||
| 1039 | /* Set the input timing to the screen. Assume always input 0. */ | 1037 | /* Set the input timing to the screen. Assume always input 0. */ |
| 1040 | if (!intel_sdvo_set_target_input(intel_sdvo)) | 1038 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
| @@ -1052,6 +1050,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
| 1052 | !intel_sdvo_set_tv_format(intel_sdvo)) | 1050 | !intel_sdvo_set_tv_format(intel_sdvo)) |
| 1053 | return; | 1051 | return; |
| 1054 | 1052 | ||
| 1053 | /* We have tried to get input timing in mode_fixup, and filled into | ||
| 1054 | * adjusted_mode. | ||
| 1055 | */ | ||
| 1056 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); | ||
| 1055 | (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); | 1057 | (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); |
| 1056 | 1058 | ||
| 1057 | switch (pixel_multiplier) { | 1059 | switch (pixel_multiplier) { |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b5ff1f7b6f7e..af1054f8202a 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -575,6 +575,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 575 | 575 | ||
| 576 | if (rdev->family < CHIP_RV770) | 576 | if (rdev->family < CHIP_RV770) |
| 577 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | 577 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; |
| 578 | /* use frac fb div on APUs */ | ||
| 579 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) | ||
| 580 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
| 578 | } else { | 581 | } else { |
| 579 | pll->flags |= RADEON_PLL_LEGACY; | 582 | pll->flags |= RADEON_PLL_LEGACY; |
| 580 | 583 | ||
| @@ -955,8 +958,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
| 955 | break; | 958 | break; |
| 956 | } | 959 | } |
| 957 | 960 | ||
| 958 | if (radeon_encoder->active_device & | 961 | if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || |
| 959 | (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { | 962 | (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { |
| 960 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 963 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 961 | struct drm_connector *connector = | 964 | struct drm_connector *connector = |
| 962 | radeon_get_connector_for_encoder(encoder); | 965 | radeon_get_connector_for_encoder(encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8086c96e0b06..0a1d4bd65edc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -533,7 +533,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) | |||
| 533 | radeon_legacy_init_crtc(dev, radeon_crtc); | 533 | radeon_legacy_init_crtc(dev, radeon_crtc); |
| 534 | } | 534 | } |
| 535 | 535 | ||
| 536 | static const char *encoder_names[36] = { | 536 | static const char *encoder_names[37] = { |
| 537 | "NONE", | 537 | "NONE", |
| 538 | "INTERNAL_LVDS", | 538 | "INTERNAL_LVDS", |
| 539 | "INTERNAL_TMDS1", | 539 | "INTERNAL_TMDS1", |
| @@ -570,6 +570,7 @@ static const char *encoder_names[36] = { | |||
| 570 | "INTERNAL_UNIPHY2", | 570 | "INTERNAL_UNIPHY2", |
| 571 | "NUTMEG", | 571 | "NUTMEG", |
| 572 | "TRAVIS", | 572 | "TRAVIS", |
| 573 | "INTERNAL_VCE" | ||
| 573 | }; | 574 | }; |
| 574 | 575 | ||
| 575 | static const char *connector_names[15] = { | 576 | static const char *connector_names[15] = { |
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c index 88a050df2389..3ad91f6447d8 100644 --- a/drivers/hsi/clients/hsi_char.c +++ b/drivers/hsi/clients/hsi_char.c | |||
| @@ -123,7 +123,7 @@ struct hsc_client_data { | |||
| 123 | static unsigned int hsc_major; | 123 | static unsigned int hsc_major; |
| 124 | /* Maximum buffer size that hsi_char will accept from userspace */ | 124 | /* Maximum buffer size that hsi_char will accept from userspace */ |
| 125 | static unsigned int max_data_size = 0x1000; | 125 | static unsigned int max_data_size = 0x1000; |
| 126 | module_param(max_data_size, uint, S_IRUSR | S_IWUSR); | 126 | module_param(max_data_size, uint, 0); |
| 127 | MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)"); | 127 | MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)"); |
| 128 | 128 | ||
| 129 | static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg, | 129 | static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg, |
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c index 4e2d79b79334..2d58f939d27f 100644 --- a/drivers/hsi/hsi.c +++ b/drivers/hsi/hsi.c | |||
| @@ -21,26 +21,13 @@ | |||
| 21 | */ | 21 | */ |
| 22 | #include <linux/hsi/hsi.h> | 22 | #include <linux/hsi/hsi.h> |
| 23 | #include <linux/compiler.h> | 23 | #include <linux/compiler.h> |
| 24 | #include <linux/rwsem.h> | ||
| 25 | #include <linux/list.h> | 24 | #include <linux/list.h> |
| 26 | #include <linux/spinlock.h> | ||
| 27 | #include <linux/kobject.h> | 25 | #include <linux/kobject.h> |
| 28 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 29 | #include <linux/string.h> | 27 | #include <linux/string.h> |
| 28 | #include <linux/notifier.h> | ||
| 30 | #include "hsi_core.h" | 29 | #include "hsi_core.h" |
| 31 | 30 | ||
| 32 | static struct device_type hsi_ctrl = { | ||
| 33 | .name = "hsi_controller", | ||
| 34 | }; | ||
| 35 | |||
| 36 | static struct device_type hsi_cl = { | ||
| 37 | .name = "hsi_client", | ||
| 38 | }; | ||
| 39 | |||
| 40 | static struct device_type hsi_port = { | ||
| 41 | .name = "hsi_port", | ||
| 42 | }; | ||
| 43 | |||
| 44 | static ssize_t modalias_show(struct device *dev, | 31 | static ssize_t modalias_show(struct device *dev, |
| 45 | struct device_attribute *a __maybe_unused, char *buf) | 32 | struct device_attribute *a __maybe_unused, char *buf) |
| 46 | { | 33 | { |
| @@ -54,8 +41,7 @@ static struct device_attribute hsi_bus_dev_attrs[] = { | |||
| 54 | 41 | ||
| 55 | static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) | 42 | static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) |
| 56 | { | 43 | { |
| 57 | if (dev->type == &hsi_cl) | 44 | add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev)); |
| 58 | add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev)); | ||
| 59 | 45 | ||
| 60 | return 0; | 46 | return 0; |
| 61 | } | 47 | } |
| @@ -80,12 +66,10 @@ static void hsi_client_release(struct device *dev) | |||
| 80 | static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) | 66 | static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) |
| 81 | { | 67 | { |
| 82 | struct hsi_client *cl; | 68 | struct hsi_client *cl; |
| 83 | unsigned long flags; | ||
| 84 | 69 | ||
| 85 | cl = kzalloc(sizeof(*cl), GFP_KERNEL); | 70 | cl = kzalloc(sizeof(*cl), GFP_KERNEL); |
| 86 | if (!cl) | 71 | if (!cl) |
| 87 | return; | 72 | return; |
| 88 | cl->device.type = &hsi_cl; | ||
| 89 | cl->tx_cfg = info->tx_cfg; | 73 | cl->tx_cfg = info->tx_cfg; |
| 90 | cl->rx_cfg = info->rx_cfg; | 74 | cl->rx_cfg = info->rx_cfg; |
| 91 | cl->device.bus = &hsi_bus_type; | 75 | cl->device.bus = &hsi_bus_type; |
| @@ -93,14 +77,11 @@ static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) | |||
| 93 | cl->device.release = hsi_client_release; | 77 | cl->device.release = hsi_client_release; |
| 94 | dev_set_name(&cl->device, info->name); | 78 | dev_set_name(&cl->device, info->name); |
| 95 | cl->device.platform_data = info->platform_data; | 79 | cl->device.platform_data = info->platform_data; |
| 96 | spin_lock_irqsave(&port->clock, flags); | ||
| 97 | list_add_tail(&cl->link, &port->clients); | ||
| 98 | spin_unlock_irqrestore(&port->clock, flags); | ||
| 99 | if (info->archdata) | 80 | if (info->archdata) |
| 100 | cl->device.archdata = *info->archdata; | 81 | cl->device.archdata = *info->archdata; |
| 101 | if (device_register(&cl->device) < 0) { | 82 | if (device_register(&cl->device) < 0) { |
| 102 | pr_err("hsi: failed to register client: %s\n", info->name); | 83 | pr_err("hsi: failed to register client: %s\n", info->name); |
| 103 | kfree(cl); | 84 | put_device(&cl->device); |
| 104 | } | 85 | } |
| 105 | } | 86 | } |
| 106 | 87 | ||
| @@ -120,13 +101,6 @@ static void hsi_scan_board_info(struct hsi_controller *hsi) | |||
| 120 | 101 | ||
| 121 | static int hsi_remove_client(struct device *dev, void *data __maybe_unused) | 102 | static int hsi_remove_client(struct device *dev, void *data __maybe_unused) |
| 122 | { | 103 | { |
| 123 | struct hsi_client *cl = to_hsi_client(dev); | ||
| 124 | struct hsi_port *port = to_hsi_port(dev->parent); | ||
| 125 | unsigned long flags; | ||
| 126 | |||
| 127 | spin_lock_irqsave(&port->clock, flags); | ||
| 128 | list_del(&cl->link); | ||
| 129 | spin_unlock_irqrestore(&port->clock, flags); | ||
| 130 | device_unregister(dev); | 104 | device_unregister(dev); |
| 131 | 105 | ||
| 132 | return 0; | 106 | return 0; |
| @@ -140,12 +114,17 @@ static int hsi_remove_port(struct device *dev, void *data __maybe_unused) | |||
| 140 | return 0; | 114 | return 0; |
| 141 | } | 115 | } |
| 142 | 116 | ||
| 143 | static void hsi_controller_release(struct device *dev __maybe_unused) | 117 | static void hsi_controller_release(struct device *dev) |
| 144 | { | 118 | { |
| 119 | struct hsi_controller *hsi = to_hsi_controller(dev); | ||
| 120 | |||
| 121 | kfree(hsi->port); | ||
| 122 | kfree(hsi); | ||
| 145 | } | 123 | } |
| 146 | 124 | ||
| 147 | static void hsi_port_release(struct device *dev __maybe_unused) | 125 | static void hsi_port_release(struct device *dev) |
| 148 | { | 126 | { |
| 127 | kfree(to_hsi_port(dev)); | ||
| 149 | } | 128 | } |
| 150 | 129 | ||
| 151 | /** | 130 | /** |
| @@ -170,20 +149,12 @@ int hsi_register_controller(struct hsi_controller *hsi) | |||
| 170 | unsigned int i; | 149 | unsigned int i; |
| 171 | int err; | 150 | int err; |
| 172 | 151 | ||
| 173 | hsi->device.type = &hsi_ctrl; | 152 | err = device_add(&hsi->device); |
| 174 | hsi->device.bus = &hsi_bus_type; | ||
| 175 | hsi->device.release = hsi_controller_release; | ||
| 176 | err = device_register(&hsi->device); | ||
| 177 | if (err < 0) | 153 | if (err < 0) |
| 178 | return err; | 154 | return err; |
| 179 | for (i = 0; i < hsi->num_ports; i++) { | 155 | for (i = 0; i < hsi->num_ports; i++) { |
| 180 | hsi->port[i].device.parent = &hsi->device; | 156 | hsi->port[i]->device.parent = &hsi->device; |
| 181 | hsi->port[i].device.bus = &hsi_bus_type; | 157 | err = device_add(&hsi->port[i]->device); |
| 182 | hsi->port[i].device.release = hsi_port_release; | ||
| 183 | hsi->port[i].device.type = &hsi_port; | ||
| 184 | INIT_LIST_HEAD(&hsi->port[i].clients); | ||
| 185 | spin_lock_init(&hsi->port[i].clock); | ||
| 186 | err = device_register(&hsi->port[i].device); | ||
| 187 | if (err < 0) | 158 | if (err < 0) |
| 188 | goto out; | 159 | goto out; |
| 189 | } | 160 | } |
| @@ -192,7 +163,9 @@ int hsi_register_controller(struct hsi_controller *hsi) | |||
| 192 | 163 | ||
| 193 | return 0; | 164 | return 0; |
| 194 | out: | 165 | out: |
| 195 | hsi_unregister_controller(hsi); | 166 | while (i-- > 0) |
| 167 | device_del(&hsi->port[i]->device); | ||
| 168 | device_del(&hsi->device); | ||
| 196 | 169 | ||
| 197 | return err; | 170 | return err; |
| 198 | } | 171 | } |
| @@ -223,6 +196,29 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) | |||
| 223 | } | 196 | } |
| 224 | 197 | ||
| 225 | /** | 198 | /** |
| 199 | * hsi_put_controller - Free an HSI controller | ||
| 200 | * | ||
| 201 | * @hsi: Pointer to the HSI controller to freed | ||
| 202 | * | ||
| 203 | * HSI controller drivers should only use this function if they need | ||
| 204 | * to free their allocated hsi_controller structures before a successful | ||
| 205 | * call to hsi_register_controller. Other use is not allowed. | ||
| 206 | */ | ||
| 207 | void hsi_put_controller(struct hsi_controller *hsi) | ||
| 208 | { | ||
| 209 | unsigned int i; | ||
| 210 | |||
| 211 | if (!hsi) | ||
| 212 | return; | ||
| 213 | |||
| 214 | for (i = 0; i < hsi->num_ports; i++) | ||
| 215 | if (hsi->port && hsi->port[i]) | ||
| 216 | put_device(&hsi->port[i]->device); | ||
| 217 | put_device(&hsi->device); | ||
| 218 | } | ||
| 219 | EXPORT_SYMBOL_GPL(hsi_put_controller); | ||
| 220 | |||
| 221 | /** | ||
| 226 | * hsi_alloc_controller - Allocate an HSI controller and its ports | 222 | * hsi_alloc_controller - Allocate an HSI controller and its ports |
| 227 | * @n_ports: Number of ports on the HSI controller | 223 | * @n_ports: Number of ports on the HSI controller |
| 228 | * @flags: Kernel allocation flags | 224 | * @flags: Kernel allocation flags |
| @@ -232,55 +228,52 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) | |||
| 232 | struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags) | 228 | struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags) |
| 233 | { | 229 | { |
| 234 | struct hsi_controller *hsi; | 230 | struct hsi_controller *hsi; |
| 235 | struct hsi_port *port; | 231 | struct hsi_port **port; |
| 236 | unsigned int i; | 232 | unsigned int i; |
| 237 | 233 | ||
| 238 | if (!n_ports) | 234 | if (!n_ports) |
| 239 | return NULL; | 235 | return NULL; |
| 240 | 236 | ||
| 241 | port = kzalloc(sizeof(*port)*n_ports, flags); | ||
| 242 | if (!port) | ||
| 243 | return NULL; | ||
| 244 | hsi = kzalloc(sizeof(*hsi), flags); | 237 | hsi = kzalloc(sizeof(*hsi), flags); |
| 245 | if (!hsi) | 238 | if (!hsi) |
| 246 | goto out; | 239 | return NULL; |
| 247 | for (i = 0; i < n_ports; i++) { | 240 | port = kzalloc(sizeof(*port)*n_ports, flags); |
| 248 | dev_set_name(&port[i].device, "port%d", i); | 241 | if (!port) { |
| 249 | port[i].num = i; | 242 | kfree(hsi); |
| 250 | port[i].async = hsi_dummy_msg; | 243 | return NULL; |
| 251 | port[i].setup = hsi_dummy_cl; | ||
| 252 | port[i].flush = hsi_dummy_cl; | ||
| 253 | port[i].start_tx = hsi_dummy_cl; | ||
| 254 | port[i].stop_tx = hsi_dummy_cl; | ||
| 255 | port[i].release = hsi_dummy_cl; | ||
| 256 | mutex_init(&port[i].lock); | ||
| 257 | } | 244 | } |
| 258 | hsi->num_ports = n_ports; | 245 | hsi->num_ports = n_ports; |
| 259 | hsi->port = port; | 246 | hsi->port = port; |
| 247 | hsi->device.release = hsi_controller_release; | ||
| 248 | device_initialize(&hsi->device); | ||
| 249 | |||
| 250 | for (i = 0; i < n_ports; i++) { | ||
| 251 | port[i] = kzalloc(sizeof(**port), flags); | ||
| 252 | if (port[i] == NULL) | ||
| 253 | goto out; | ||
| 254 | port[i]->num = i; | ||
| 255 | port[i]->async = hsi_dummy_msg; | ||
| 256 | port[i]->setup = hsi_dummy_cl; | ||
| 257 | port[i]->flush = hsi_dummy_cl; | ||
| 258 | port[i]->start_tx = hsi_dummy_cl; | ||
| 259 | port[i]->stop_tx = hsi_dummy_cl; | ||
| 260 | port[i]->release = hsi_dummy_cl; | ||
| 261 | mutex_init(&port[i]->lock); | ||
| 262 | ATOMIC_INIT_NOTIFIER_HEAD(&port[i]->n_head); | ||
| 263 | dev_set_name(&port[i]->device, "port%d", i); | ||
| 264 | hsi->port[i]->device.release = hsi_port_release; | ||
| 265 | device_initialize(&hsi->port[i]->device); | ||
| 266 | } | ||
| 260 | 267 | ||
| 261 | return hsi; | 268 | return hsi; |
| 262 | out: | 269 | out: |
| 263 | kfree(port); | 270 | hsi_put_controller(hsi); |
| 264 | 271 | ||
| 265 | return NULL; | 272 | return NULL; |
| 266 | } | 273 | } |
| 267 | EXPORT_SYMBOL_GPL(hsi_alloc_controller); | 274 | EXPORT_SYMBOL_GPL(hsi_alloc_controller); |
| 268 | 275 | ||
| 269 | /** | 276 | /** |
| 270 | * hsi_free_controller - Free an HSI controller | ||
| 271 | * @hsi: Pointer to HSI controller | ||
| 272 | */ | ||
| 273 | void hsi_free_controller(struct hsi_controller *hsi) | ||
| 274 | { | ||
| 275 | if (!hsi) | ||
| 276 | return; | ||
| 277 | |||
| 278 | kfree(hsi->port); | ||
| 279 | kfree(hsi); | ||
| 280 | } | ||
| 281 | EXPORT_SYMBOL_GPL(hsi_free_controller); | ||
| 282 | |||
| 283 | /** | ||
| 284 | * hsi_free_msg - Free an HSI message | 277 | * hsi_free_msg - Free an HSI message |
| 285 | * @msg: Pointer to the HSI message | 278 | * @msg: Pointer to the HSI message |
| 286 | * | 279 | * |
| @@ -414,37 +407,67 @@ void hsi_release_port(struct hsi_client *cl) | |||
| 414 | } | 407 | } |
| 415 | EXPORT_SYMBOL_GPL(hsi_release_port); | 408 | EXPORT_SYMBOL_GPL(hsi_release_port); |
| 416 | 409 | ||
| 417 | static int hsi_start_rx(struct hsi_client *cl, void *data __maybe_unused) | 410 | static int hsi_event_notifier_call(struct notifier_block *nb, |
| 411 | unsigned long event, void *data __maybe_unused) | ||
| 418 | { | 412 | { |
| 419 | if (cl->hsi_start_rx) | 413 | struct hsi_client *cl = container_of(nb, struct hsi_client, nb); |
| 420 | (*cl->hsi_start_rx)(cl); | 414 | |
| 415 | (*cl->ehandler)(cl, event); | ||
| 421 | 416 | ||
| 422 | return 0; | 417 | return 0; |
| 423 | } | 418 | } |
| 424 | 419 | ||
| 425 | static int hsi_stop_rx(struct hsi_client *cl, void *data __maybe_unused) | 420 | /** |
| 421 | * hsi_register_port_event - Register a client to receive port events | ||
| 422 | * @cl: HSI client that wants to receive port events | ||
| 423 | * @cb: Event handler callback | ||
| 424 | * | ||
| 425 | * Clients should register a callback to be able to receive | ||
| 426 | * events from the ports. Registration should happen after | ||
| 427 | * claiming the port. | ||
| 428 | * The handler can be called in interrupt context. | ||
| 429 | * | ||
| 430 | * Returns -errno on error, or 0 on success. | ||
| 431 | */ | ||
| 432 | int hsi_register_port_event(struct hsi_client *cl, | ||
| 433 | void (*handler)(struct hsi_client *, unsigned long)) | ||
| 426 | { | 434 | { |
| 427 | if (cl->hsi_stop_rx) | 435 | struct hsi_port *port = hsi_get_port(cl); |
| 428 | (*cl->hsi_stop_rx)(cl); | ||
| 429 | 436 | ||
| 430 | return 0; | 437 | if (!handler || cl->ehandler) |
| 438 | return -EINVAL; | ||
| 439 | if (!hsi_port_claimed(cl)) | ||
| 440 | return -EACCES; | ||
| 441 | cl->ehandler = handler; | ||
| 442 | cl->nb.notifier_call = hsi_event_notifier_call; | ||
| 443 | |||
| 444 | return atomic_notifier_chain_register(&port->n_head, &cl->nb); | ||
| 431 | } | 445 | } |
| 446 | EXPORT_SYMBOL_GPL(hsi_register_port_event); | ||
| 432 | 447 | ||
| 433 | static int hsi_port_for_each_client(struct hsi_port *port, void *data, | 448 | /** |
| 434 | int (*fn)(struct hsi_client *cl, void *data)) | 449 | * hsi_unregister_port_event - Stop receiving port events for a client |
| 450 | * @cl: HSI client that wants to stop receiving port events | ||
| 451 | * | ||
| 452 | * Clients should call this function before releasing their associated | ||
| 453 | * port. | ||
| 454 | * | ||
| 455 | * Returns -errno on error, or 0 on success. | ||
| 456 | */ | ||
| 457 | int hsi_unregister_port_event(struct hsi_client *cl) | ||
| 435 | { | 458 | { |
| 436 | struct hsi_client *cl; | 459 | struct hsi_port *port = hsi_get_port(cl); |
| 460 | int err; | ||
| 437 | 461 | ||
| 438 | spin_lock(&port->clock); | 462 | WARN_ON(!hsi_port_claimed(cl)); |
| 439 | list_for_each_entry(cl, &port->clients, link) { | ||
| 440 | spin_unlock(&port->clock); | ||
| 441 | (*fn)(cl, data); | ||
| 442 | spin_lock(&port->clock); | ||
| 443 | } | ||
| 444 | spin_unlock(&port->clock); | ||
| 445 | 463 | ||
| 446 | return 0; | 464 | err = atomic_notifier_chain_unregister(&port->n_head, &cl->nb); |
| 465 | if (!err) | ||
| 466 | cl->ehandler = NULL; | ||
| 467 | |||
| 468 | return err; | ||
| 447 | } | 469 | } |
| 470 | EXPORT_SYMBOL_GPL(hsi_unregister_port_event); | ||
| 448 | 471 | ||
| 449 | /** | 472 | /** |
| 450 | * hsi_event -Notifies clients about port events | 473 | * hsi_event -Notifies clients about port events |
| @@ -458,22 +481,12 @@ static int hsi_port_for_each_client(struct hsi_port *port, void *data, | |||
| 458 | * Events: | 481 | * Events: |
| 459 | * HSI_EVENT_START_RX - Incoming wake line high | 482 | * HSI_EVENT_START_RX - Incoming wake line high |
| 460 | * HSI_EVENT_STOP_RX - Incoming wake line down | 483 | * HSI_EVENT_STOP_RX - Incoming wake line down |
| 484 | * | ||
| 485 | * Returns -errno on error, or 0 on success. | ||
| 461 | */ | 486 | */ |
| 462 | void hsi_event(struct hsi_port *port, unsigned int event) | 487 | int hsi_event(struct hsi_port *port, unsigned long event) |
| 463 | { | 488 | { |
| 464 | int (*fn)(struct hsi_client *cl, void *data); | 489 | return atomic_notifier_call_chain(&port->n_head, event, NULL); |
| 465 | |||
| 466 | switch (event) { | ||
| 467 | case HSI_EVENT_START_RX: | ||
| 468 | fn = hsi_start_rx; | ||
| 469 | break; | ||
| 470 | case HSI_EVENT_STOP_RX: | ||
| 471 | fn = hsi_stop_rx; | ||
| 472 | break; | ||
| 473 | default: | ||
| 474 | return; | ||
| 475 | } | ||
| 476 | hsi_port_for_each_client(port, NULL, fn); | ||
| 477 | } | 490 | } |
| 478 | EXPORT_SYMBOL_GPL(hsi_event); | 491 | EXPORT_SYMBOL_GPL(hsi_event); |
| 479 | 492 | ||
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index ce43642ef03e..f85ce70d9677 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c | |||
| @@ -47,7 +47,7 @@ struct ad7314_data { | |||
| 47 | u16 rx ____cacheline_aligned; | 47 | u16 rx ____cacheline_aligned; |
| 48 | }; | 48 | }; |
| 49 | 49 | ||
| 50 | static int ad7314_spi_read(struct ad7314_data *chip, s16 *data) | 50 | static int ad7314_spi_read(struct ad7314_data *chip) |
| 51 | { | 51 | { |
| 52 | int ret; | 52 | int ret; |
| 53 | 53 | ||
| @@ -57,9 +57,7 @@ static int ad7314_spi_read(struct ad7314_data *chip, s16 *data) | |||
| 57 | return ret; | 57 | return ret; |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | *data = be16_to_cpu(chip->rx); | 60 | return be16_to_cpu(chip->rx); |
| 61 | |||
| 62 | return ret; | ||
| 63 | } | 61 | } |
| 64 | 62 | ||
| 65 | static ssize_t ad7314_show_temperature(struct device *dev, | 63 | static ssize_t ad7314_show_temperature(struct device *dev, |
| @@ -70,12 +68,12 @@ static ssize_t ad7314_show_temperature(struct device *dev, | |||
| 70 | s16 data; | 68 | s16 data; |
| 71 | int ret; | 69 | int ret; |
| 72 | 70 | ||
| 73 | ret = ad7314_spi_read(chip, &data); | 71 | ret = ad7314_spi_read(chip); |
| 74 | if (ret < 0) | 72 | if (ret < 0) |
| 75 | return ret; | 73 | return ret; |
| 76 | switch (spi_get_device_id(chip->spi_dev)->driver_data) { | 74 | switch (spi_get_device_id(chip->spi_dev)->driver_data) { |
| 77 | case ad7314: | 75 | case ad7314: |
| 78 | data = (data & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET; | 76 | data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET; |
| 79 | data = (data << 6) >> 6; | 77 | data = (data << 6) >> 6; |
| 80 | 78 | ||
| 81 | return sprintf(buf, "%d\n", 250 * data); | 79 | return sprintf(buf, "%d\n", 250 * data); |
| @@ -86,7 +84,7 @@ static ssize_t ad7314_show_temperature(struct device *dev, | |||
| 86 | * with a sign bit - which is a 14 bit 2's complement | 84 | * with a sign bit - which is a 14 bit 2's complement |
| 87 | * register. 1lsb - 31.25 milli degrees centigrade | 85 | * register. 1lsb - 31.25 milli degrees centigrade |
| 88 | */ | 86 | */ |
| 89 | data &= ADT7301_TEMP_MASK; | 87 | data = ret & ADT7301_TEMP_MASK; |
| 90 | data = (data << 2) >> 2; | 88 | data = (data << 2) >> 2; |
| 91 | 89 | ||
| 92 | return sprintf(buf, "%d\n", | 90 | return sprintf(buf, "%d\n", |
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index 37a8fc92b44a..e8e18cab1fb8 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c | |||
| @@ -128,17 +128,20 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4) | |||
| 128 | * counter saturations resulting in bogus power readings. | 128 | * counter saturations resulting in bogus power readings. |
| 129 | * We correct this value ourselves to cope with older BIOSes. | 129 | * We correct this value ourselves to cope with older BIOSes. |
| 130 | */ | 130 | */ |
| 131 | static DEFINE_PCI_DEVICE_TABLE(affected_device) = { | ||
| 132 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, | ||
| 133 | { 0 } | ||
| 134 | }; | ||
| 135 | |||
| 131 | static void __devinit tweak_runavg_range(struct pci_dev *pdev) | 136 | static void __devinit tweak_runavg_range(struct pci_dev *pdev) |
| 132 | { | 137 | { |
| 133 | u32 val; | 138 | u32 val; |
| 134 | const struct pci_device_id affected_device = { | ||
| 135 | PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }; | ||
| 136 | 139 | ||
| 137 | /* | 140 | /* |
| 138 | * let this quirk apply only to the current version of the | 141 | * let this quirk apply only to the current version of the |
| 139 | * northbridge, since future versions may change the behavior | 142 | * northbridge, since future versions may change the behavior |
| 140 | */ | 143 | */ |
| 141 | if (!pci_match_id(&affected_device, pdev)) | 144 | if (!pci_match_id(affected_device, pdev)) |
| 142 | return; | 145 | return; |
| 143 | 146 | ||
| 144 | pci_bus_read_config_dword(pdev->bus, | 147 | pci_bus_read_config_dword(pdev->bus, |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 426bb7617ec6..b0d0bc8a6fb6 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -1854,6 +1854,8 @@ static bool generate_unmatched_resp(struct ib_mad_private *recv, | |||
| 1854 | response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; | 1854 | response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; |
| 1855 | response->mad.mad.mad_hdr.status = | 1855 | response->mad.mad.mad_hdr.status = |
| 1856 | cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); | 1856 | cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); |
| 1857 | if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | ||
| 1858 | response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION; | ||
| 1857 | 1859 | ||
| 1858 | return true; | 1860 | return true; |
| 1859 | } else { | 1861 | } else { |
| @@ -1869,6 +1871,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
| 1869 | struct ib_mad_list_head *mad_list; | 1871 | struct ib_mad_list_head *mad_list; |
| 1870 | struct ib_mad_agent_private *mad_agent; | 1872 | struct ib_mad_agent_private *mad_agent; |
| 1871 | int port_num; | 1873 | int port_num; |
| 1874 | int ret = IB_MAD_RESULT_SUCCESS; | ||
| 1872 | 1875 | ||
| 1873 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | 1876 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; |
| 1874 | qp_info = mad_list->mad_queue->qp_info; | 1877 | qp_info = mad_list->mad_queue->qp_info; |
| @@ -1952,8 +1955,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
| 1952 | local: | 1955 | local: |
| 1953 | /* Give driver "right of first refusal" on incoming MAD */ | 1956 | /* Give driver "right of first refusal" on incoming MAD */ |
| 1954 | if (port_priv->device->process_mad) { | 1957 | if (port_priv->device->process_mad) { |
| 1955 | int ret; | ||
| 1956 | |||
| 1957 | ret = port_priv->device->process_mad(port_priv->device, 0, | 1958 | ret = port_priv->device->process_mad(port_priv->device, 0, |
| 1958 | port_priv->port_num, | 1959 | port_priv->port_num, |
| 1959 | wc, &recv->grh, | 1960 | wc, &recv->grh, |
| @@ -1981,7 +1982,8 @@ local: | |||
| 1981 | * or via recv_handler in ib_mad_complete_recv() | 1982 | * or via recv_handler in ib_mad_complete_recv() |
| 1982 | */ | 1983 | */ |
| 1983 | recv = NULL; | 1984 | recv = NULL; |
| 1984 | } else if (generate_unmatched_resp(recv, response)) { | 1985 | } else if ((ret & IB_MAD_RESULT_SUCCESS) && |
| 1986 | generate_unmatched_resp(recv, response)) { | ||
| 1985 | agent_send_response(&response->mad.mad, &recv->grh, wc, | 1987 | agent_send_response(&response->mad.mad, &recv->grh, wc, |
| 1986 | port_priv->device, port_num, qp_info->qp->qp_num); | 1988 | port_priv->device, port_num, qp_info->qp->qp_num); |
| 1987 | } | 1989 | } |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 669673e81439..b948b6dd5d55 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -247,7 +247,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, | |||
| 247 | err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, | 247 | err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, |
| 248 | NULL, NULL, in_mad, out_mad); | 248 | NULL, NULL, in_mad, out_mad); |
| 249 | if (err) | 249 | if (err) |
| 250 | return err; | 250 | goto out; |
| 251 | 251 | ||
| 252 | /* Checking LinkSpeedActive for FDR-10 */ | 252 | /* Checking LinkSpeedActive for FDR-10 */ |
| 253 | if (out_mad->data[15] & 0x1) | 253 | if (out_mad->data[15] & 0x1) |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 8081a0a5d602..a4b14a41cbf4 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
| @@ -274,7 +274,8 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse) | |||
| 274 | static unsigned char param = 0xc8; | 274 | static unsigned char param = 0xc8; |
| 275 | struct synaptics_data *priv = psmouse->private; | 275 | struct synaptics_data *priv = psmouse->private; |
| 276 | 276 | ||
| 277 | if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) | 277 | if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) || |
| 278 | SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c))) | ||
| 278 | return 0; | 279 | return 0; |
| 279 | 280 | ||
| 280 | if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL)) | 281 | if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL)) |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index b0ba52459ed7..68965e663248 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
| @@ -859,7 +859,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |||
| 859 | int ret; | 859 | int ret; |
| 860 | unsigned redundancy = 0; | 860 | unsigned redundancy = 0; |
| 861 | struct raid_dev *dev; | 861 | struct raid_dev *dev; |
| 862 | struct md_rdev *rdev, *freshest; | 862 | struct md_rdev *rdev, *tmp, *freshest; |
| 863 | struct mddev *mddev = &rs->md; | 863 | struct mddev *mddev = &rs->md; |
| 864 | 864 | ||
| 865 | switch (rs->raid_type->level) { | 865 | switch (rs->raid_type->level) { |
| @@ -877,7 +877,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |||
| 877 | } | 877 | } |
| 878 | 878 | ||
| 879 | freshest = NULL; | 879 | freshest = NULL; |
| 880 | rdev_for_each(rdev, mddev) { | 880 | rdev_for_each_safe(rdev, tmp, mddev) { |
| 881 | if (!rdev->meta_bdev) | 881 | if (!rdev->meta_bdev) |
| 882 | continue; | 882 | continue; |
| 883 | 883 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index b572e1e386ce..477eb2e180c0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -7560,14 +7560,14 @@ void md_check_recovery(struct mddev *mddev) | |||
| 7560 | * any transients in the value of "sync_action". | 7560 | * any transients in the value of "sync_action". |
| 7561 | */ | 7561 | */ |
| 7562 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | 7562 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
| 7563 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | ||
| 7564 | /* Clear some bits that don't mean anything, but | 7563 | /* Clear some bits that don't mean anything, but |
| 7565 | * might be left set | 7564 | * might be left set |
| 7566 | */ | 7565 | */ |
| 7567 | clear_bit(MD_RECOVERY_INTR, &mddev->recovery); | 7566 | clear_bit(MD_RECOVERY_INTR, &mddev->recovery); |
| 7568 | clear_bit(MD_RECOVERY_DONE, &mddev->recovery); | 7567 | clear_bit(MD_RECOVERY_DONE, &mddev->recovery); |
| 7569 | 7568 | ||
| 7570 | if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) | 7569 | if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || |
| 7570 | test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) | ||
| 7571 | goto unlock; | 7571 | goto unlock; |
| 7572 | /* no recovery is running. | 7572 | /* no recovery is running. |
| 7573 | * remove any failed drives, then | 7573 | * remove any failed drives, then |
| @@ -8140,7 +8140,8 @@ static int md_notify_reboot(struct notifier_block *this, | |||
| 8140 | 8140 | ||
| 8141 | for_each_mddev(mddev, tmp) { | 8141 | for_each_mddev(mddev, tmp) { |
| 8142 | if (mddev_trylock(mddev)) { | 8142 | if (mddev_trylock(mddev)) { |
| 8143 | __md_stop_writes(mddev); | 8143 | if (mddev->pers) |
| 8144 | __md_stop_writes(mddev); | ||
| 8144 | mddev->safemode = 2; | 8145 | mddev->safemode = 2; |
| 8145 | mddev_unlock(mddev); | 8146 | mddev_unlock(mddev); |
| 8146 | } | 8147 | } |
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index b0f2ef988188..e3f5af96ab87 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
| @@ -363,6 +363,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host) | |||
| 363 | goto out; | 363 | goto out; |
| 364 | 364 | ||
| 365 | dmaengine_submit(desc); | 365 | dmaengine_submit(desc); |
| 366 | dma_async_issue_pending(host->dmach); | ||
| 366 | return; | 367 | return; |
| 367 | 368 | ||
| 368 | out: | 369 | out: |
| @@ -403,6 +404,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) | |||
| 403 | goto out; | 404 | goto out; |
| 404 | 405 | ||
| 405 | dmaengine_submit(desc); | 406 | dmaengine_submit(desc); |
| 407 | dma_async_issue_pending(host->dmach); | ||
| 406 | return; | 408 | return; |
| 407 | 409 | ||
| 408 | out: | 410 | out: |
| @@ -531,6 +533,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
| 531 | goto out; | 533 | goto out; |
| 532 | 534 | ||
| 533 | dmaengine_submit(desc); | 535 | dmaengine_submit(desc); |
| 536 | dma_async_issue_pending(host->dmach); | ||
| 534 | return; | 537 | return; |
| 535 | out: | 538 | out: |
| 536 | dev_warn(mmc_dev(host->mmc), | 539 | dev_warn(mmc_dev(host->mmc), |
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 75b1dde16358..9ec51cec2e14 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c | |||
| @@ -266,6 +266,7 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this, | |||
| 266 | desc->callback = dma_irq_callback; | 266 | desc->callback = dma_irq_callback; |
| 267 | desc->callback_param = this; | 267 | desc->callback_param = this; |
| 268 | dmaengine_submit(desc); | 268 | dmaengine_submit(desc); |
| 269 | dma_async_issue_pending(get_dma_chan(this)); | ||
| 269 | 270 | ||
| 270 | /* Wait for the interrupt from the DMA block. */ | 271 | /* Wait for the interrupt from the DMA block. */ |
| 271 | err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); | 272 | err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); |
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 25197b698dd6..b8b4c7ba884f 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c | |||
| @@ -89,16 +89,16 @@ static int __init arcrimi_probe(struct net_device *dev) | |||
| 89 | BUGLVL(D_NORMAL) printk(VERSION); | 89 | BUGLVL(D_NORMAL) printk(VERSION); |
| 90 | BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n"); | 90 | BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n"); |
| 91 | 91 | ||
| 92 | BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n", | 92 | BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n", |
| 93 | dev->dev_addr[0], dev->mem_start, dev->irq); | 93 | dev->dev_addr[0], dev->mem_start, dev->irq); |
| 94 | 94 | ||
| 95 | if (dev->mem_start <= 0 || dev->irq <= 0) { | 95 | if (dev->mem_start <= 0 || dev->irq <= 0) { |
| 96 | BUGMSG(D_NORMAL, "No autoprobe for RIM I; you " | 96 | BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you " |
| 97 | "must specify the shmem and irq!\n"); | 97 | "must specify the shmem and irq!\n"); |
| 98 | return -ENODEV; | 98 | return -ENODEV; |
| 99 | } | 99 | } |
| 100 | if (dev->dev_addr[0] == 0) { | 100 | if (dev->dev_addr[0] == 0) { |
| 101 | BUGMSG(D_NORMAL, "You need to specify your card's station " | 101 | BUGLVL(D_NORMAL) printk("You need to specify your card's station " |
| 102 | "ID!\n"); | 102 | "ID!\n"); |
| 103 | return -ENODEV; | 103 | return -ENODEV; |
| 104 | } | 104 | } |
| @@ -109,7 +109,7 @@ static int __init arcrimi_probe(struct net_device *dev) | |||
| 109 | * will be taken. | 109 | * will be taken. |
| 110 | */ | 110 | */ |
| 111 | if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { | 111 | if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { |
| 112 | BUGMSG(D_NORMAL, "Card memory already allocated\n"); | 112 | BUGLVL(D_NORMAL) printk("Card memory already allocated\n"); |
| 113 | return -ENODEV; | 113 | return -ENODEV; |
| 114 | } | 114 | } |
| 115 | return arcrimi_found(dev); | 115 | return arcrimi_found(dev); |
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 9a66e2a910ae..9c1c8cd5223f 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c | |||
| @@ -744,14 +744,14 @@ static void cfhsi_wake_up(struct work_struct *work) | |||
| 744 | size_t fifo_occupancy = 0; | 744 | size_t fifo_occupancy = 0; |
| 745 | 745 | ||
| 746 | /* Wakeup timeout */ | 746 | /* Wakeup timeout */ |
| 747 | dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", | 747 | dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n", |
| 748 | __func__); | 748 | __func__); |
| 749 | 749 | ||
| 750 | /* Check FIFO to check if modem has sent something. */ | 750 | /* Check FIFO to check if modem has sent something. */ |
| 751 | WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, | 751 | WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, |
| 752 | &fifo_occupancy)); | 752 | &fifo_occupancy)); |
| 753 | 753 | ||
| 754 | dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", | 754 | dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", |
| 755 | __func__, (unsigned) fifo_occupancy); | 755 | __func__, (unsigned) fifo_occupancy); |
| 756 | 756 | ||
| 757 | /* Check if we misssed the interrupt. */ | 757 | /* Check if we misssed the interrupt. */ |
| @@ -1210,7 +1210,7 @@ int cfhsi_probe(struct platform_device *pdev) | |||
| 1210 | 1210 | ||
| 1211 | static void cfhsi_shutdown(struct cfhsi *cfhsi) | 1211 | static void cfhsi_shutdown(struct cfhsi *cfhsi) |
| 1212 | { | 1212 | { |
| 1213 | u8 *tx_buf, *rx_buf; | 1213 | u8 *tx_buf, *rx_buf, *flip_buf; |
| 1214 | 1214 | ||
| 1215 | /* Stop TXing */ | 1215 | /* Stop TXing */ |
| 1216 | netif_tx_stop_all_queues(cfhsi->ndev); | 1216 | netif_tx_stop_all_queues(cfhsi->ndev); |
| @@ -1234,7 +1234,7 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi) | |||
| 1234 | /* Store bufferes: will be freed later. */ | 1234 | /* Store bufferes: will be freed later. */ |
| 1235 | tx_buf = cfhsi->tx_buf; | 1235 | tx_buf = cfhsi->tx_buf; |
| 1236 | rx_buf = cfhsi->rx_buf; | 1236 | rx_buf = cfhsi->rx_buf; |
| 1237 | 1237 | flip_buf = cfhsi->rx_flip_buf; | |
| 1238 | /* Flush transmit queues. */ | 1238 | /* Flush transmit queues. */ |
| 1239 | cfhsi_abort_tx(cfhsi); | 1239 | cfhsi_abort_tx(cfhsi); |
| 1240 | 1240 | ||
| @@ -1247,6 +1247,7 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi) | |||
| 1247 | /* Free buffers. */ | 1247 | /* Free buffers. */ |
| 1248 | kfree(tx_buf); | 1248 | kfree(tx_buf); |
| 1249 | kfree(rx_buf); | 1249 | kfree(rx_buf); |
| 1250 | kfree(flip_buf); | ||
| 1250 | } | 1251 | } |
| 1251 | 1252 | ||
| 1252 | int cfhsi_remove(struct platform_device *pdev) | 1253 | int cfhsi_remove(struct platform_device *pdev) |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 5234586dff15..629c4ba5d49d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c | |||
| @@ -875,6 +875,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) | |||
| 875 | PCAN_USBPRO_INFO_FW, | 875 | PCAN_USBPRO_INFO_FW, |
| 876 | &fi, sizeof(fi)); | 876 | &fi, sizeof(fi)); |
| 877 | if (err) { | 877 | if (err) { |
| 878 | kfree(usb_if); | ||
| 878 | dev_err(dev->netdev->dev.parent, | 879 | dev_err(dev->netdev->dev.parent, |
| 879 | "unable to read %s firmware info (err %d)\n", | 880 | "unable to read %s firmware info (err %d)\n", |
| 880 | pcan_usb_pro.name, err); | 881 | pcan_usb_pro.name, err); |
| @@ -885,6 +886,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) | |||
| 885 | PCAN_USBPRO_INFO_BL, | 886 | PCAN_USBPRO_INFO_BL, |
| 886 | &bi, sizeof(bi)); | 887 | &bi, sizeof(bi)); |
| 887 | if (err) { | 888 | if (err) { |
| 889 | kfree(usb_if); | ||
| 888 | dev_err(dev->netdev->dev.parent, | 890 | dev_err(dev->netdev->dev.parent, |
| 889 | "unable to read %s bootloader info (err %d)\n", | 891 | "unable to read %s bootloader info (err %d)\n", |
| 890 | pcan_usb_pro.name, err); | 892 | pcan_usb_pro.name, err); |
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index d5c6d92f1ee7..442d91a2747b 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c | |||
| @@ -107,14 +107,14 @@ static int dummy_dev_init(struct net_device *dev) | |||
| 107 | return 0; | 107 | return 0; |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static void dummy_dev_free(struct net_device *dev) | 110 | static void dummy_dev_uninit(struct net_device *dev) |
| 111 | { | 111 | { |
| 112 | free_percpu(dev->dstats); | 112 | free_percpu(dev->dstats); |
| 113 | free_netdev(dev); | ||
| 114 | } | 113 | } |
| 115 | 114 | ||
| 116 | static const struct net_device_ops dummy_netdev_ops = { | 115 | static const struct net_device_ops dummy_netdev_ops = { |
| 117 | .ndo_init = dummy_dev_init, | 116 | .ndo_init = dummy_dev_init, |
| 117 | .ndo_uninit = dummy_dev_uninit, | ||
| 118 | .ndo_start_xmit = dummy_xmit, | 118 | .ndo_start_xmit = dummy_xmit, |
| 119 | .ndo_validate_addr = eth_validate_addr, | 119 | .ndo_validate_addr = eth_validate_addr, |
| 120 | .ndo_set_rx_mode = set_multicast_list, | 120 | .ndo_set_rx_mode = set_multicast_list, |
| @@ -128,7 +128,7 @@ static void dummy_setup(struct net_device *dev) | |||
| 128 | 128 | ||
| 129 | /* Initialize the device structure. */ | 129 | /* Initialize the device structure. */ |
| 130 | dev->netdev_ops = &dummy_netdev_ops; | 130 | dev->netdev_ops = &dummy_netdev_ops; |
| 131 | dev->destructor = dummy_dev_free; | 131 | dev->destructor = free_netdev; |
| 132 | 132 | ||
| 133 | /* Fill in device structure with ethernet-generic values. */ | 133 | /* Fill in device structure with ethernet-generic values. */ |
| 134 | dev->tx_queue_len = 0; | 134 | dev->tx_queue_len = 0; |
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 40ac41436549..c926857e8205 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c | |||
| @@ -2476,7 +2476,7 @@ static irqreturn_t atl1_intr(int irq, void *data) | |||
| 2476 | "pcie phy link down %x\n", status); | 2476 | "pcie phy link down %x\n", status); |
| 2477 | if (netif_running(adapter->netdev)) { /* reset MAC */ | 2477 | if (netif_running(adapter->netdev)) { /* reset MAC */ |
| 2478 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | 2478 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); |
| 2479 | schedule_work(&adapter->pcie_dma_to_rst_task); | 2479 | schedule_work(&adapter->reset_dev_task); |
| 2480 | return IRQ_HANDLED; | 2480 | return IRQ_HANDLED; |
| 2481 | } | 2481 | } |
| 2482 | } | 2482 | } |
| @@ -2488,7 +2488,7 @@ static irqreturn_t atl1_intr(int irq, void *data) | |||
| 2488 | "pcie DMA r/w error (status = 0x%x)\n", | 2488 | "pcie DMA r/w error (status = 0x%x)\n", |
| 2489 | status); | 2489 | status); |
| 2490 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | 2490 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); |
| 2491 | schedule_work(&adapter->pcie_dma_to_rst_task); | 2491 | schedule_work(&adapter->reset_dev_task); |
| 2492 | return IRQ_HANDLED; | 2492 | return IRQ_HANDLED; |
| 2493 | } | 2493 | } |
| 2494 | 2494 | ||
| @@ -2633,10 +2633,10 @@ static void atl1_down(struct atl1_adapter *adapter) | |||
| 2633 | atl1_clean_rx_ring(adapter); | 2633 | atl1_clean_rx_ring(adapter); |
| 2634 | } | 2634 | } |
| 2635 | 2635 | ||
| 2636 | static void atl1_tx_timeout_task(struct work_struct *work) | 2636 | static void atl1_reset_dev_task(struct work_struct *work) |
| 2637 | { | 2637 | { |
| 2638 | struct atl1_adapter *adapter = | 2638 | struct atl1_adapter *adapter = |
| 2639 | container_of(work, struct atl1_adapter, tx_timeout_task); | 2639 | container_of(work, struct atl1_adapter, reset_dev_task); |
| 2640 | struct net_device *netdev = adapter->netdev; | 2640 | struct net_device *netdev = adapter->netdev; |
| 2641 | 2641 | ||
| 2642 | netif_device_detach(netdev); | 2642 | netif_device_detach(netdev); |
| @@ -3038,12 +3038,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev, | |||
| 3038 | (unsigned long)adapter); | 3038 | (unsigned long)adapter); |
| 3039 | adapter->phy_timer_pending = false; | 3039 | adapter->phy_timer_pending = false; |
| 3040 | 3040 | ||
| 3041 | INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); | 3041 | INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task); |
| 3042 | 3042 | ||
| 3043 | INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); | 3043 | INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); |
| 3044 | 3044 | ||
| 3045 | INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); | ||
| 3046 | |||
| 3047 | err = register_netdev(netdev); | 3045 | err = register_netdev(netdev); |
| 3048 | if (err) | 3046 | if (err) |
| 3049 | goto err_common; | 3047 | goto err_common; |
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h index 109d6da8be97..e04bf4d71e46 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.h +++ b/drivers/net/ethernet/atheros/atlx/atl1.h | |||
| @@ -758,9 +758,8 @@ struct atl1_adapter { | |||
| 758 | u16 link_speed; | 758 | u16 link_speed; |
| 759 | u16 link_duplex; | 759 | u16 link_duplex; |
| 760 | spinlock_t lock; | 760 | spinlock_t lock; |
| 761 | struct work_struct tx_timeout_task; | 761 | struct work_struct reset_dev_task; |
| 762 | struct work_struct link_chg_task; | 762 | struct work_struct link_chg_task; |
| 763 | struct work_struct pcie_dma_to_rst_task; | ||
| 764 | 763 | ||
| 765 | struct timer_list phy_config_timer; | 764 | struct timer_list phy_config_timer; |
| 766 | bool phy_timer_pending; | 765 | bool phy_timer_pending; |
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c index 3cd8837236dc..c9e9dc57986c 100644 --- a/drivers/net/ethernet/atheros/atlx/atlx.c +++ b/drivers/net/ethernet/atheros/atlx/atlx.c | |||
| @@ -194,7 +194,7 @@ static void atlx_tx_timeout(struct net_device *netdev) | |||
| 194 | { | 194 | { |
| 195 | struct atlx_adapter *adapter = netdev_priv(netdev); | 195 | struct atlx_adapter *adapter = netdev_priv(netdev); |
| 196 | /* Do the reset outside of interrupt context */ | 196 | /* Do the reset outside of interrupt context */ |
| 197 | schedule_work(&adapter->tx_timeout_task); | 197 | schedule_work(&adapter->reset_dev_task); |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | /* | 200 | /* |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index ad95324dc042..64392ec410a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
| @@ -942,6 +942,12 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, | |||
| 942 | const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : | 942 | const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : |
| 943 | DCBX_E3B0_MAX_NUM_COS_PORT0; | 943 | DCBX_E3B0_MAX_NUM_COS_PORT0; |
| 944 | 944 | ||
| 945 | if (pri >= max_num_of_cos) { | ||
| 946 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " | ||
| 947 | "parameter Illegal strict priority\n"); | ||
| 948 | return -EINVAL; | ||
| 949 | } | ||
| 950 | |||
| 945 | if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { | 951 | if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { |
| 946 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " | 952 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " |
| 947 | "parameter There can't be two COS's with " | 953 | "parameter There can't be two COS's with " |
| @@ -949,12 +955,6 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, | |||
| 949 | return -EINVAL; | 955 | return -EINVAL; |
| 950 | } | 956 | } |
| 951 | 957 | ||
| 952 | if (pri > max_num_of_cos) { | ||
| 953 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " | ||
| 954 | "parameter Illegal strict priority\n"); | ||
| 955 | return -EINVAL; | ||
| 956 | } | ||
| 957 | |||
| 958 | sp_pri_to_cos[pri] = cos_entry; | 958 | sp_pri_to_cos[pri] = cos_entry; |
| 959 | return 0; | 959 | return 0; |
| 960 | 960 | ||
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 64c76443a7aa..b461c24945e3 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
| @@ -1310,10 +1310,6 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
| 1310 | 1310 | ||
| 1311 | if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) | 1311 | if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) |
| 1312 | oem_reg |= HV_OEM_BITS_LPLU; | 1312 | oem_reg |= HV_OEM_BITS_LPLU; |
| 1313 | |||
| 1314 | /* Set Restart auto-neg to activate the bits */ | ||
| 1315 | if (!hw->phy.ops.check_reset_block(hw)) | ||
| 1316 | oem_reg |= HV_OEM_BITS_RESTART_AN; | ||
| 1317 | } else { | 1313 | } else { |
| 1318 | if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | | 1314 | if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | |
| 1319 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) | 1315 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) |
| @@ -1324,6 +1320,11 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | |||
| 1324 | oem_reg |= HV_OEM_BITS_LPLU; | 1320 | oem_reg |= HV_OEM_BITS_LPLU; |
| 1325 | } | 1321 | } |
| 1326 | 1322 | ||
| 1323 | /* Set Restart auto-neg to activate the bits */ | ||
| 1324 | if ((d0_state || (hw->mac.type != e1000_pchlan)) && | ||
| 1325 | !hw->phy.ops.check_reset_block(hw)) | ||
| 1326 | oem_reg |= HV_OEM_BITS_RESTART_AN; | ||
| 1327 | |||
| 1327 | ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); | 1328 | ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); |
| 1328 | 1329 | ||
| 1329 | release: | 1330 | release: |
| @@ -3682,7 +3683,11 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) | |||
| 3682 | 3683 | ||
| 3683 | if (hw->mac.type >= e1000_pchlan) { | 3684 | if (hw->mac.type >= e1000_pchlan) { |
| 3684 | e1000_oem_bits_config_ich8lan(hw, false); | 3685 | e1000_oem_bits_config_ich8lan(hw, false); |
| 3685 | e1000_phy_hw_reset_ich8lan(hw); | 3686 | |
| 3687 | /* Reset PHY to activate OEM bits on 82577/8 */ | ||
| 3688 | if (hw->mac.type == e1000_pchlan) | ||
| 3689 | e1000e_phy_hw_reset_generic(hw); | ||
| 3690 | |||
| 3686 | ret_val = hw->phy.ops.acquire(hw); | 3691 | ret_val = hw->phy.ops.acquire(hw); |
| 3687 | if (ret_val) | 3692 | if (ret_val) |
| 3688 | return; | 3693 | return; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 027d7a75be39..ed1b47dc0834 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | |||
| @@ -622,6 +622,16 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, | |||
| 622 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 622 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) |
| 623 | set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); | 623 | set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); |
| 624 | 624 | ||
| 625 | #ifdef IXGBE_FCOE | ||
| 626 | if (adapter->netdev->features & NETIF_F_FCOE_MTU) { | ||
| 627 | struct ixgbe_ring_feature *f; | ||
| 628 | f = &adapter->ring_feature[RING_F_FCOE]; | ||
| 629 | if ((rxr_idx >= f->mask) && | ||
| 630 | (rxr_idx < f->mask + f->indices)) | ||
| 631 | set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state); | ||
| 632 | } | ||
| 633 | |||
| 634 | #endif /* IXGBE_FCOE */ | ||
| 625 | /* apply Rx specific ring traits */ | 635 | /* apply Rx specific ring traits */ |
| 626 | ring->count = adapter->rx_ring_count; | 636 | ring->count = adapter->rx_ring_count; |
| 627 | ring->queue_index = rxr_idx; | 637 | ring->queue_index = rxr_idx; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e26b1f9ac75..a7f3cd872caf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -3154,14 +3154,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) | |||
| 3154 | set_ring_rsc_enabled(rx_ring); | 3154 | set_ring_rsc_enabled(rx_ring); |
| 3155 | else | 3155 | else |
| 3156 | clear_ring_rsc_enabled(rx_ring); | 3156 | clear_ring_rsc_enabled(rx_ring); |
| 3157 | #ifdef IXGBE_FCOE | ||
| 3158 | if (netdev->features & NETIF_F_FCOE_MTU) { | ||
| 3159 | struct ixgbe_ring_feature *f; | ||
| 3160 | f = &adapter->ring_feature[RING_F_FCOE]; | ||
| 3161 | if ((i >= f->mask) && (i < f->mask + f->indices)) | ||
| 3162 | set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state); | ||
| 3163 | } | ||
| 3164 | #endif /* IXGBE_FCOE */ | ||
| 3165 | } | 3157 | } |
| 3166 | } | 3158 | } |
| 3167 | 3159 | ||
| @@ -4836,7 +4828,9 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
| 4836 | 4828 | ||
| 4837 | pci_wake_from_d3(pdev, false); | 4829 | pci_wake_from_d3(pdev, false); |
| 4838 | 4830 | ||
| 4831 | rtnl_lock(); | ||
| 4839 | err = ixgbe_init_interrupt_scheme(adapter); | 4832 | err = ixgbe_init_interrupt_scheme(adapter); |
| 4833 | rtnl_unlock(); | ||
| 4840 | if (err) { | 4834 | if (err) { |
| 4841 | e_dev_err("Cannot initialize interrupts for device\n"); | 4835 | e_dev_err("Cannot initialize interrupts for device\n"); |
| 4842 | return err; | 4836 | return err; |
| @@ -4893,6 +4887,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
| 4893 | if (wufc) { | 4887 | if (wufc) { |
| 4894 | ixgbe_set_rx_mode(netdev); | 4888 | ixgbe_set_rx_mode(netdev); |
| 4895 | 4889 | ||
| 4890 | /* | ||
| 4891 | * enable the optics for both mult-speed fiber and | ||
| 4892 | * 82599 SFP+ fiber as we can WoL. | ||
| 4893 | */ | ||
| 4894 | if (hw->mac.ops.enable_tx_laser && | ||
| 4895 | (hw->phy.multispeed_fiber || | ||
| 4896 | (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber && | ||
| 4897 | hw->mac.type == ixgbe_mac_82599EB))) | ||
| 4898 | hw->mac.ops.enable_tx_laser(hw); | ||
| 4899 | |||
| 4896 | /* turn on all-multi mode if wake on multicast is enabled */ | 4900 | /* turn on all-multi mode if wake on multicast is enabled */ |
| 4897 | if (wufc & IXGBE_WUFC_MC) { | 4901 | if (wufc & IXGBE_WUFC_MC) { |
| 4898 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | 4902 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index c722aa607d07..f8dda009d3c0 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c | |||
| @@ -889,16 +889,17 @@ static int ks8851_net_stop(struct net_device *dev) | |||
| 889 | netif_stop_queue(dev); | 889 | netif_stop_queue(dev); |
| 890 | 890 | ||
| 891 | mutex_lock(&ks->lock); | 891 | mutex_lock(&ks->lock); |
| 892 | /* turn off the IRQs and ack any outstanding */ | ||
| 893 | ks8851_wrreg16(ks, KS_IER, 0x0000); | ||
| 894 | ks8851_wrreg16(ks, KS_ISR, 0xffff); | ||
| 895 | mutex_unlock(&ks->lock); | ||
| 892 | 896 | ||
| 893 | /* stop any outstanding work */ | 897 | /* stop any outstanding work */ |
| 894 | flush_work(&ks->irq_work); | 898 | flush_work(&ks->irq_work); |
| 895 | flush_work(&ks->tx_work); | 899 | flush_work(&ks->tx_work); |
| 896 | flush_work(&ks->rxctrl_work); | 900 | flush_work(&ks->rxctrl_work); |
| 897 | 901 | ||
| 898 | /* turn off the IRQs and ack any outstanding */ | 902 | mutex_lock(&ks->lock); |
| 899 | ks8851_wrreg16(ks, KS_IER, 0x0000); | ||
| 900 | ks8851_wrreg16(ks, KS_ISR, 0xffff); | ||
| 901 | |||
| 902 | /* shutdown RX process */ | 903 | /* shutdown RX process */ |
| 903 | ks8851_wrreg16(ks, KS_RXCR1, 0x0000); | 904 | ks8851_wrreg16(ks, KS_RXCR1, 0x0000); |
| 904 | 905 | ||
| @@ -907,6 +908,7 @@ static int ks8851_net_stop(struct net_device *dev) | |||
| 907 | 908 | ||
| 908 | /* set powermode to soft power down to save power */ | 909 | /* set powermode to soft power down to save power */ |
| 909 | ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); | 910 | ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); |
| 911 | mutex_unlock(&ks->lock); | ||
| 910 | 912 | ||
| 911 | /* ensure any queued tx buffers are dumped */ | 913 | /* ensure any queued tx buffers are dumped */ |
| 912 | while (!skb_queue_empty(&ks->txq)) { | 914 | while (!skb_queue_empty(&ks->txq)) { |
| @@ -918,7 +920,6 @@ static int ks8851_net_stop(struct net_device *dev) | |||
| 918 | dev_kfree_skb(txb); | 920 | dev_kfree_skb(txb); |
| 919 | } | 921 | } |
| 920 | 922 | ||
| 921 | mutex_unlock(&ks->lock); | ||
| 922 | return 0; | 923 | return 0; |
| 923 | } | 924 | } |
| 924 | 925 | ||
| @@ -1418,6 +1419,7 @@ static int __devinit ks8851_probe(struct spi_device *spi) | |||
| 1418 | struct net_device *ndev; | 1419 | struct net_device *ndev; |
| 1419 | struct ks8851_net *ks; | 1420 | struct ks8851_net *ks; |
| 1420 | int ret; | 1421 | int ret; |
| 1422 | unsigned cider; | ||
| 1421 | 1423 | ||
| 1422 | ndev = alloc_etherdev(sizeof(struct ks8851_net)); | 1424 | ndev = alloc_etherdev(sizeof(struct ks8851_net)); |
| 1423 | if (!ndev) | 1425 | if (!ndev) |
| @@ -1484,8 +1486,8 @@ static int __devinit ks8851_probe(struct spi_device *spi) | |||
| 1484 | ks8851_soft_reset(ks, GRR_GSR); | 1486 | ks8851_soft_reset(ks, GRR_GSR); |
| 1485 | 1487 | ||
| 1486 | /* simple check for a valid chip being connected to the bus */ | 1488 | /* simple check for a valid chip being connected to the bus */ |
| 1487 | 1489 | cider = ks8851_rdreg16(ks, KS_CIDER); | |
| 1488 | if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { | 1490 | if ((cider & ~CIDER_REV_MASK) != CIDER_ID) { |
| 1489 | dev_err(&spi->dev, "failed to read device ID\n"); | 1491 | dev_err(&spi->dev, "failed to read device ID\n"); |
| 1490 | ret = -ENODEV; | 1492 | ret = -ENODEV; |
| 1491 | goto err_id; | 1493 | goto err_id; |
| @@ -1516,15 +1518,14 @@ static int __devinit ks8851_probe(struct spi_device *spi) | |||
| 1516 | } | 1518 | } |
| 1517 | 1519 | ||
| 1518 | netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", | 1520 | netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", |
| 1519 | CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), | 1521 | CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq, |
| 1520 | ndev->dev_addr, ndev->irq, | ||
| 1521 | ks->rc_ccr & CCR_EEPROM ? "has" : "no"); | 1522 | ks->rc_ccr & CCR_EEPROM ? "has" : "no"); |
| 1522 | 1523 | ||
| 1523 | return 0; | 1524 | return 0; |
| 1524 | 1525 | ||
| 1525 | 1526 | ||
| 1526 | err_netdev: | 1527 | err_netdev: |
| 1527 | free_irq(ndev->irq, ndev); | 1528 | free_irq(ndev->irq, ks); |
| 1528 | 1529 | ||
| 1529 | err_id: | 1530 | err_id: |
| 1530 | err_irq: | 1531 | err_irq: |
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index b8104d9f4081..5ffde23ac8fb 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | #define DRV_NAME "ks8851_mll" | 40 | #define DRV_NAME "ks8851_mll" |
| 41 | 41 | ||
| 42 | static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; | 42 | static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; |
| 43 | #define MAX_RECV_FRAMES 32 | 43 | #define MAX_RECV_FRAMES 255 |
| 44 | #define MAX_BUF_SIZE 2048 | 44 | #define MAX_BUF_SIZE 2048 |
| 45 | #define TX_BUF_SIZE 2000 | 45 | #define TX_BUF_SIZE 2000 |
| 46 | #define RX_BUF_SIZE 2000 | 46 | #define RX_BUF_SIZE 2000 |
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index ef723b185d85..eaf9ff0262a9 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c | |||
| @@ -5675,7 +5675,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr) | |||
| 5675 | memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); | 5675 | memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); |
| 5676 | } | 5676 | } |
| 5677 | 5677 | ||
| 5678 | memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN); | 5678 | memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN); |
| 5679 | 5679 | ||
| 5680 | interrupt = hw_block_intr(hw); | 5680 | interrupt = hw_block_intr(hw); |
| 5681 | 5681 | ||
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index abc79076f867..b3287c0fe279 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
| @@ -958,6 +958,11 @@ static inline void cp_start_hw (struct cp_private *cp) | |||
| 958 | cpw8(Cmd, RxOn | TxOn); | 958 | cpw8(Cmd, RxOn | TxOn); |
| 959 | } | 959 | } |
| 960 | 960 | ||
| 961 | static void cp_enable_irq(struct cp_private *cp) | ||
| 962 | { | ||
| 963 | cpw16_f(IntrMask, cp_intr_mask); | ||
| 964 | } | ||
| 965 | |||
| 961 | static void cp_init_hw (struct cp_private *cp) | 966 | static void cp_init_hw (struct cp_private *cp) |
| 962 | { | 967 | { |
| 963 | struct net_device *dev = cp->dev; | 968 | struct net_device *dev = cp->dev; |
| @@ -997,8 +1002,6 @@ static void cp_init_hw (struct cp_private *cp) | |||
| 997 | 1002 | ||
| 998 | cpw16(MultiIntr, 0); | 1003 | cpw16(MultiIntr, 0); |
| 999 | 1004 | ||
| 1000 | cpw16_f(IntrMask, cp_intr_mask); | ||
| 1001 | |||
| 1002 | cpw8_f(Cfg9346, Cfg9346_Lock); | 1005 | cpw8_f(Cfg9346, Cfg9346_Lock); |
| 1003 | } | 1006 | } |
| 1004 | 1007 | ||
| @@ -1130,6 +1133,8 @@ static int cp_open (struct net_device *dev) | |||
| 1130 | if (rc) | 1133 | if (rc) |
| 1131 | goto err_out_hw; | 1134 | goto err_out_hw; |
| 1132 | 1135 | ||
| 1136 | cp_enable_irq(cp); | ||
| 1137 | |||
| 1133 | netif_carrier_off(dev); | 1138 | netif_carrier_off(dev); |
| 1134 | mii_check_media(&cp->mii_if, netif_msg_link(cp), true); | 1139 | mii_check_media(&cp->mii_if, netif_msg_link(cp), true); |
| 1135 | netif_start_queue(dev); | 1140 | netif_start_queue(dev); |
| @@ -2031,6 +2036,7 @@ static int cp_resume (struct pci_dev *pdev) | |||
| 2031 | /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ | 2036 | /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ |
| 2032 | cp_init_rings_index (cp); | 2037 | cp_init_rings_index (cp); |
| 2033 | cp_init_hw (cp); | 2038 | cp_init_hw (cp); |
| 2039 | cp_enable_irq(cp); | ||
| 2034 | netif_start_queue (dev); | 2040 | netif_start_queue (dev); |
| 2035 | 2041 | ||
| 2036 | spin_lock_irqsave (&cp->lock, flags); | 2042 | spin_lock_irqsave (&cp->lock, flags); |
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 4a6971027076..cd3defb11ffb 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c | |||
| @@ -1166,10 +1166,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat) | |||
| 1166 | 1166 | ||
| 1167 | /* Quickly dumps bad packets */ | 1167 | /* Quickly dumps bad packets */ |
| 1168 | static void | 1168 | static void |
| 1169 | smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) | 1169 | smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords) |
| 1170 | { | 1170 | { |
| 1171 | unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2; | ||
| 1172 | |||
| 1173 | if (likely(pktwords >= 4)) { | 1171 | if (likely(pktwords >= 4)) { |
| 1174 | unsigned int timeout = 500; | 1172 | unsigned int timeout = 500; |
| 1175 | unsigned int val; | 1173 | unsigned int val; |
| @@ -1233,7 +1231,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget) | |||
| 1233 | continue; | 1231 | continue; |
| 1234 | } | 1232 | } |
| 1235 | 1233 | ||
| 1236 | skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); | 1234 | skb = netdev_alloc_skb(dev, pktwords << 2); |
| 1237 | if (unlikely(!skb)) { | 1235 | if (unlikely(!skb)) { |
| 1238 | SMSC_WARN(pdata, rx_err, | 1236 | SMSC_WARN(pdata, rx_err, |
| 1239 | "Unable to allocate skb for rx packet"); | 1237 | "Unable to allocate skb for rx packet"); |
| @@ -1243,14 +1241,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget) | |||
| 1243 | break; | 1241 | break; |
| 1244 | } | 1242 | } |
| 1245 | 1243 | ||
| 1246 | skb->data = skb->head; | 1244 | pdata->ops->rx_readfifo(pdata, |
| 1247 | skb_reset_tail_pointer(skb); | 1245 | (unsigned int *)skb->data, pktwords); |
| 1248 | 1246 | ||
| 1249 | /* Align IP on 16B boundary */ | 1247 | /* Align IP on 16B boundary */ |
| 1250 | skb_reserve(skb, NET_IP_ALIGN); | 1248 | skb_reserve(skb, NET_IP_ALIGN); |
| 1251 | skb_put(skb, pktlength - 4); | 1249 | skb_put(skb, pktlength - 4); |
| 1252 | pdata->ops->rx_readfifo(pdata, | ||
| 1253 | (unsigned int *)skb->head, pktwords); | ||
| 1254 | skb->protocol = eth_type_trans(skb, dev); | 1250 | skb->protocol = eth_type_trans(skb, dev); |
| 1255 | skb_checksum_none_assert(skb); | 1251 | skb_checksum_none_assert(skb); |
| 1256 | netif_receive_skb(skb); | 1252 | netif_receive_skb(skb); |
| @@ -1565,7 +1561,7 @@ static int smsc911x_open(struct net_device *dev) | |||
| 1565 | smsc911x_reg_write(pdata, FIFO_INT, temp); | 1561 | smsc911x_reg_write(pdata, FIFO_INT, temp); |
| 1566 | 1562 | ||
| 1567 | /* set RX Data offset to 2 bytes for alignment */ | 1563 | /* set RX Data offset to 2 bytes for alignment */ |
| 1568 | smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); | 1564 | smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8)); |
| 1569 | 1565 | ||
| 1570 | /* enable NAPI polling before enabling RX interrupts */ | 1566 | /* enable NAPI polling before enabling RX interrupts */ |
| 1571 | napi_enable(&pdata->napi); | 1567 | napi_enable(&pdata->napi); |
| @@ -2382,7 +2378,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) | |||
| 2382 | SET_NETDEV_DEV(dev, &pdev->dev); | 2378 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 2383 | 2379 | ||
| 2384 | pdata = netdev_priv(dev); | 2380 | pdata = netdev_priv(dev); |
| 2385 | |||
| 2386 | dev->irq = irq_res->start; | 2381 | dev->irq = irq_res->start; |
| 2387 | irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; | 2382 | irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; |
| 2388 | pdata->ioaddr = ioremap_nocache(res->start, res_size); | 2383 | pdata->ioaddr = ioremap_nocache(res->start, res_size); |
| @@ -2446,7 +2441,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) | |||
| 2446 | if (retval) { | 2441 | if (retval) { |
| 2447 | SMSC_WARN(pdata, probe, | 2442 | SMSC_WARN(pdata, probe, |
| 2448 | "Unable to claim requested irq: %d", dev->irq); | 2443 | "Unable to claim requested irq: %d", dev->irq); |
| 2449 | goto out_free_irq; | 2444 | goto out_disable_resources; |
| 2450 | } | 2445 | } |
| 2451 | 2446 | ||
| 2452 | retval = register_netdev(dev); | 2447 | retval = register_netdev(dev); |
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 2757c7d6e633..e4e47088e26b 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c | |||
| @@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) | |||
| 181 | __davinci_mdio_reset(data); | 181 | __davinci_mdio_reset(data); |
| 182 | return -EAGAIN; | 182 | return -EAGAIN; |
| 183 | } | 183 | } |
| 184 | |||
| 185 | reg = __raw_readl(®s->user[0].access); | ||
| 186 | if ((reg & USERACCESS_GO) == 0) | ||
| 187 | return 0; | ||
| 188 | |||
| 184 | dev_err(data->dev, "timed out waiting for user access\n"); | 189 | dev_err(data->dev, "timed out waiting for user access\n"); |
| 185 | return -ETIMEDOUT; | 190 | return -ETIMEDOUT; |
| 186 | } | 191 | } |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index cc83af083fd7..44b8d2bad8c3 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h | |||
| @@ -2,9 +2,7 @@ | |||
| 2 | * Definitions for Xilinx Axi Ethernet device driver. | 2 | * Definitions for Xilinx Axi Ethernet device driver. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2009 Secret Lab Technologies, Ltd. | 4 | * Copyright (c) 2009 Secret Lab Technologies, Ltd. |
| 5 | * Copyright (c) 2010 Xilinx, Inc. All rights reserved. | 5 | * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. |
| 6 | * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> | ||
| 7 | * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> | ||
| 8 | */ | 6 | */ |
| 9 | 7 | ||
| 10 | #ifndef XILINX_AXIENET_H | 8 | #ifndef XILINX_AXIENET_H |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 2fcbeba6814b..9c365e192a31 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
| @@ -4,9 +4,9 @@ | |||
| 4 | * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi | 4 | * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi |
| 5 | * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> | 5 | * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> |
| 6 | * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. | 6 | * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. |
| 7 | * Copyright (c) 2010 Xilinx, Inc. All rights reserved. | 7 | * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> |
| 8 | * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> | 8 | * Copyright (c) 2010 - 2011 PetaLogix |
| 9 | * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> | 9 | * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. |
| 10 | * | 10 | * |
| 11 | * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 | 11 | * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 |
| 12 | * and Spartan6. | 12 | * and Spartan6. |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index d70b6e79f6c0..e90e1f46121e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c | |||
| @@ -2,9 +2,9 @@ | |||
| 2 | * MDIO bus driver for the Xilinx Axi Ethernet device | 2 | * MDIO bus driver for the Xilinx Axi Ethernet device |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2009 Secret Lab Technologies, Ltd. | 4 | * Copyright (c) 2009 Secret Lab Technologies, Ltd. |
| 5 | * Copyright (c) 2010 Xilinx, Inc. All rights reserved. | 5 | * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> |
| 6 | * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> | 6 | * Copyright (c) 2010 - 2011 PetaLogix |
| 7 | * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> | 7 | * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/of_address.h> | 10 | #include <linux/of_address.h> |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index dd294783b5c5..2d59138db7f3 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -44,6 +44,7 @@ struct net_device_context { | |||
| 44 | /* point back to our device context */ | 44 | /* point back to our device context */ |
| 45 | struct hv_device *device_ctx; | 45 | struct hv_device *device_ctx; |
| 46 | struct delayed_work dwork; | 46 | struct delayed_work dwork; |
| 47 | struct work_struct work; | ||
| 47 | }; | 48 | }; |
| 48 | 49 | ||
| 49 | 50 | ||
| @@ -51,30 +52,22 @@ static int ring_size = 128; | |||
| 51 | module_param(ring_size, int, S_IRUGO); | 52 | module_param(ring_size, int, S_IRUGO); |
| 52 | MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); | 53 | MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); |
| 53 | 54 | ||
| 54 | struct set_multicast_work { | ||
| 55 | struct work_struct work; | ||
| 56 | struct net_device *net; | ||
| 57 | }; | ||
| 58 | |||
| 59 | static void do_set_multicast(struct work_struct *w) | 55 | static void do_set_multicast(struct work_struct *w) |
| 60 | { | 56 | { |
| 61 | struct set_multicast_work *swk = | 57 | struct net_device_context *ndevctx = |
| 62 | container_of(w, struct set_multicast_work, work); | 58 | container_of(w, struct net_device_context, work); |
| 63 | struct net_device *net = swk->net; | ||
| 64 | |||
| 65 | struct net_device_context *ndevctx = netdev_priv(net); | ||
| 66 | struct netvsc_device *nvdev; | 59 | struct netvsc_device *nvdev; |
| 67 | struct rndis_device *rdev; | 60 | struct rndis_device *rdev; |
| 68 | 61 | ||
| 69 | nvdev = hv_get_drvdata(ndevctx->device_ctx); | 62 | nvdev = hv_get_drvdata(ndevctx->device_ctx); |
| 70 | if (nvdev == NULL) | 63 | if (nvdev == NULL || nvdev->ndev == NULL) |
| 71 | goto out; | 64 | return; |
| 72 | 65 | ||
| 73 | rdev = nvdev->extension; | 66 | rdev = nvdev->extension; |
| 74 | if (rdev == NULL) | 67 | if (rdev == NULL) |
| 75 | goto out; | 68 | return; |
| 76 | 69 | ||
| 77 | if (net->flags & IFF_PROMISC) | 70 | if (nvdev->ndev->flags & IFF_PROMISC) |
| 78 | rndis_filter_set_packet_filter(rdev, | 71 | rndis_filter_set_packet_filter(rdev, |
| 79 | NDIS_PACKET_TYPE_PROMISCUOUS); | 72 | NDIS_PACKET_TYPE_PROMISCUOUS); |
| 80 | else | 73 | else |
| @@ -82,21 +75,13 @@ static void do_set_multicast(struct work_struct *w) | |||
| 82 | NDIS_PACKET_TYPE_BROADCAST | | 75 | NDIS_PACKET_TYPE_BROADCAST | |
| 83 | NDIS_PACKET_TYPE_ALL_MULTICAST | | 76 | NDIS_PACKET_TYPE_ALL_MULTICAST | |
| 84 | NDIS_PACKET_TYPE_DIRECTED); | 77 | NDIS_PACKET_TYPE_DIRECTED); |
| 85 | |||
| 86 | out: | ||
| 87 | kfree(w); | ||
| 88 | } | 78 | } |
| 89 | 79 | ||
| 90 | static void netvsc_set_multicast_list(struct net_device *net) | 80 | static void netvsc_set_multicast_list(struct net_device *net) |
| 91 | { | 81 | { |
| 92 | struct set_multicast_work *swk = | 82 | struct net_device_context *net_device_ctx = netdev_priv(net); |
| 93 | kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC); | ||
| 94 | if (swk == NULL) | ||
| 95 | return; | ||
| 96 | 83 | ||
| 97 | swk->net = net; | 84 | schedule_work(&net_device_ctx->work); |
| 98 | INIT_WORK(&swk->work, do_set_multicast); | ||
| 99 | schedule_work(&swk->work); | ||
| 100 | } | 85 | } |
| 101 | 86 | ||
| 102 | static int netvsc_open(struct net_device *net) | 87 | static int netvsc_open(struct net_device *net) |
| @@ -125,6 +110,8 @@ static int netvsc_close(struct net_device *net) | |||
| 125 | 110 | ||
| 126 | netif_tx_disable(net); | 111 | netif_tx_disable(net); |
| 127 | 112 | ||
| 113 | /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ | ||
| 114 | cancel_work_sync(&net_device_ctx->work); | ||
| 128 | ret = rndis_filter_close(device_obj); | 115 | ret = rndis_filter_close(device_obj); |
| 129 | if (ret != 0) | 116 | if (ret != 0) |
| 130 | netdev_err(net, "unable to close device (ret %d).\n", ret); | 117 | netdev_err(net, "unable to close device (ret %d).\n", ret); |
| @@ -335,6 +322,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) | |||
| 335 | 322 | ||
| 336 | nvdev->start_remove = true; | 323 | nvdev->start_remove = true; |
| 337 | cancel_delayed_work_sync(&ndevctx->dwork); | 324 | cancel_delayed_work_sync(&ndevctx->dwork); |
| 325 | cancel_work_sync(&ndevctx->work); | ||
| 338 | netif_tx_disable(ndev); | 326 | netif_tx_disable(ndev); |
| 339 | rndis_filter_device_remove(hdev); | 327 | rndis_filter_device_remove(hdev); |
| 340 | 328 | ||
| @@ -403,6 +391,7 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 403 | net_device_ctx->device_ctx = dev; | 391 | net_device_ctx->device_ctx = dev; |
| 404 | hv_set_drvdata(dev, net); | 392 | hv_set_drvdata(dev, net); |
| 405 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); | 393 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); |
| 394 | INIT_WORK(&net_device_ctx->work, do_set_multicast); | ||
| 406 | 395 | ||
| 407 | net->netdev_ops = &device_ops; | 396 | net->netdev_ops = &device_ops; |
| 408 | 397 | ||
| @@ -456,6 +445,7 @@ static int netvsc_remove(struct hv_device *dev) | |||
| 456 | 445 | ||
| 457 | ndev_ctx = netdev_priv(net); | 446 | ndev_ctx = netdev_priv(net); |
| 458 | cancel_delayed_work_sync(&ndev_ctx->dwork); | 447 | cancel_delayed_work_sync(&ndev_ctx->dwork); |
| 448 | cancel_work_sync(&ndev_ctx->work); | ||
| 459 | 449 | ||
| 460 | /* Stop outbound asap */ | 450 | /* Stop outbound asap */ |
| 461 | netif_tx_disable(net); | 451 | netif_tx_disable(net); |
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index f08c85acf761..5ac46f5226f3 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c | |||
| @@ -40,6 +40,7 @@ MODULE_LICENSE("GPL"); | |||
| 40 | #define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */ | 40 | #define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */ |
| 41 | #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ | 41 | #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ |
| 42 | #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ | 42 | #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ |
| 43 | #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ | ||
| 43 | 44 | ||
| 44 | static int ip175c_config_init(struct phy_device *phydev) | 45 | static int ip175c_config_init(struct phy_device *phydev) |
| 45 | { | 46 | { |
| @@ -185,6 +186,15 @@ static int ip175c_config_aneg(struct phy_device *phydev) | |||
| 185 | return 0; | 186 | return 0; |
| 186 | } | 187 | } |
| 187 | 188 | ||
| 189 | static int ip101a_g_ack_interrupt(struct phy_device *phydev) | ||
| 190 | { | ||
| 191 | int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS); | ||
| 192 | if (err < 0) | ||
| 193 | return err; | ||
| 194 | |||
| 195 | return 0; | ||
| 196 | } | ||
| 197 | |||
| 188 | static struct phy_driver ip175c_driver = { | 198 | static struct phy_driver ip175c_driver = { |
| 189 | .phy_id = 0x02430d80, | 199 | .phy_id = 0x02430d80, |
| 190 | .name = "ICPlus IP175C", | 200 | .name = "ICPlus IP175C", |
| @@ -204,7 +214,6 @@ static struct phy_driver ip1001_driver = { | |||
| 204 | .phy_id_mask = 0x0ffffff0, | 214 | .phy_id_mask = 0x0ffffff0, |
| 205 | .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | | 215 | .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | |
| 206 | SUPPORTED_Asym_Pause, | 216 | SUPPORTED_Asym_Pause, |
| 207 | .flags = PHY_HAS_INTERRUPT, | ||
| 208 | .config_init = &ip1001_config_init, | 217 | .config_init = &ip1001_config_init, |
| 209 | .config_aneg = &genphy_config_aneg, | 218 | .config_aneg = &genphy_config_aneg, |
| 210 | .read_status = &genphy_read_status, | 219 | .read_status = &genphy_read_status, |
| @@ -220,6 +229,7 @@ static struct phy_driver ip101a_g_driver = { | |||
| 220 | .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | | 229 | .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | |
| 221 | SUPPORTED_Asym_Pause, | 230 | SUPPORTED_Asym_Pause, |
| 222 | .flags = PHY_HAS_INTERRUPT, | 231 | .flags = PHY_HAS_INTERRUPT, |
| 232 | .ack_interrupt = ip101a_g_ack_interrupt, | ||
| 223 | .config_init = &ip101a_g_config_init, | 233 | .config_init = &ip101a_g_config_init, |
| 224 | .config_aneg = &genphy_config_aneg, | 234 | .config_aneg = &genphy_config_aneg, |
| 225 | .read_status = &genphy_read_status, | 235 | .read_status = &genphy_read_status, |
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 33f8c51968b6..21d7151fb0ab 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
| @@ -235,7 +235,7 @@ struct ppp_net { | |||
| 235 | /* Prototypes. */ | 235 | /* Prototypes. */ |
| 236 | static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, | 236 | static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, |
| 237 | struct file *file, unsigned int cmd, unsigned long arg); | 237 | struct file *file, unsigned int cmd, unsigned long arg); |
| 238 | static int ppp_xmit_process(struct ppp *ppp); | 238 | static void ppp_xmit_process(struct ppp *ppp); |
| 239 | static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); | 239 | static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); |
| 240 | static void ppp_push(struct ppp *ppp); | 240 | static void ppp_push(struct ppp *ppp); |
| 241 | static void ppp_channel_push(struct channel *pch); | 241 | static void ppp_channel_push(struct channel *pch); |
| @@ -969,8 +969,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 969 | put_unaligned_be16(proto, pp); | 969 | put_unaligned_be16(proto, pp); |
| 970 | 970 | ||
| 971 | skb_queue_tail(&ppp->file.xq, skb); | 971 | skb_queue_tail(&ppp->file.xq, skb); |
| 972 | if (!ppp_xmit_process(ppp)) | 972 | ppp_xmit_process(ppp); |
| 973 | netif_stop_queue(dev); | ||
| 974 | return NETDEV_TX_OK; | 973 | return NETDEV_TX_OK; |
| 975 | 974 | ||
| 976 | outf: | 975 | outf: |
| @@ -1048,11 +1047,10 @@ static void ppp_setup(struct net_device *dev) | |||
| 1048 | * Called to do any work queued up on the transmit side | 1047 | * Called to do any work queued up on the transmit side |
| 1049 | * that can now be done. | 1048 | * that can now be done. |
| 1050 | */ | 1049 | */ |
| 1051 | static int | 1050 | static void |
| 1052 | ppp_xmit_process(struct ppp *ppp) | 1051 | ppp_xmit_process(struct ppp *ppp) |
| 1053 | { | 1052 | { |
| 1054 | struct sk_buff *skb; | 1053 | struct sk_buff *skb; |
| 1055 | int ret = 0; | ||
| 1056 | 1054 | ||
| 1057 | ppp_xmit_lock(ppp); | 1055 | ppp_xmit_lock(ppp); |
| 1058 | if (!ppp->closing) { | 1056 | if (!ppp->closing) { |
| @@ -1062,13 +1060,12 @@ ppp_xmit_process(struct ppp *ppp) | |||
| 1062 | ppp_send_frame(ppp, skb); | 1060 | ppp_send_frame(ppp, skb); |
| 1063 | /* If there's no work left to do, tell the core net | 1061 | /* If there's no work left to do, tell the core net |
| 1064 | code that we can accept some more. */ | 1062 | code that we can accept some more. */ |
| 1065 | if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) { | 1063 | if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) |
| 1066 | netif_wake_queue(ppp->dev); | 1064 | netif_wake_queue(ppp->dev); |
| 1067 | ret = 1; | 1065 | else |
| 1068 | } | 1066 | netif_stop_queue(ppp->dev); |
| 1069 | } | 1067 | } |
| 1070 | ppp_xmit_unlock(ppp); | 1068 | ppp_xmit_unlock(ppp); |
| 1071 | return ret; | ||
| 1072 | } | 1069 | } |
| 1073 | 1070 | ||
| 1074 | static inline struct sk_buff * | 1071 | static inline struct sk_buff * |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 552d24bf862e..d316503b35d4 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -365,6 +365,27 @@ static const struct driver_info qmi_wwan_force_int4 = { | |||
| 365 | .data = BIT(4), /* interface whitelist bitmap */ | 365 | .data = BIT(4), /* interface whitelist bitmap */ |
| 366 | }; | 366 | }; |
| 367 | 367 | ||
| 368 | /* Sierra Wireless provide equally useless interface descriptors | ||
| 369 | * Devices in QMI mode can be switched between two different | ||
| 370 | * configurations: | ||
| 371 | * a) USB interface #8 is QMI/wwan | ||
| 372 | * b) USB interfaces #8, #19 and #20 are QMI/wwan | ||
| 373 | * | ||
| 374 | * Both configurations provide a number of other interfaces (serial++), | ||
| 375 | * some of which have the same endpoint configuration as we expect, so | ||
| 376 | * a whitelist or blacklist is necessary. | ||
| 377 | * | ||
| 378 | * FIXME: The below whitelist should include BIT(20). It does not | ||
| 379 | * because I cannot get it to work... | ||
| 380 | */ | ||
| 381 | static const struct driver_info qmi_wwan_sierra = { | ||
| 382 | .description = "Sierra Wireless wwan/QMI device", | ||
| 383 | .flags = FLAG_WWAN, | ||
| 384 | .bind = qmi_wwan_bind_gobi, | ||
| 385 | .unbind = qmi_wwan_unbind_shared, | ||
| 386 | .manage_power = qmi_wwan_manage_power, | ||
| 387 | .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ | ||
| 388 | }; | ||
| 368 | 389 | ||
| 369 | #define HUAWEI_VENDOR_ID 0x12D1 | 390 | #define HUAWEI_VENDOR_ID 0x12D1 |
| 370 | #define QMI_GOBI_DEVICE(vend, prod) \ | 391 | #define QMI_GOBI_DEVICE(vend, prod) \ |
| @@ -445,6 +466,15 @@ static const struct usb_device_id products[] = { | |||
| 445 | .bInterfaceProtocol = 0xff, | 466 | .bInterfaceProtocol = 0xff, |
| 446 | .driver_info = (unsigned long)&qmi_wwan_force_int4, | 467 | .driver_info = (unsigned long)&qmi_wwan_force_int4, |
| 447 | }, | 468 | }, |
| 469 | { /* Sierra Wireless MC77xx in QMI mode */ | ||
| 470 | .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, | ||
| 471 | .idVendor = 0x1199, | ||
| 472 | .idProduct = 0x68a2, | ||
| 473 | .bInterfaceClass = 0xff, | ||
| 474 | .bInterfaceSubClass = 0xff, | ||
| 475 | .bInterfaceProtocol = 0xff, | ||
| 476 | .driver_info = (unsigned long)&qmi_wwan_sierra, | ||
| 477 | }, | ||
| 448 | {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ | 478 | {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
| 449 | {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ | 479 | {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ |
| 450 | {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ | 480 | {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 187d01ccb973..a2349483cd2a 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
| @@ -1051,6 +1051,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 1051 | dev->net->ethtool_ops = &smsc75xx_ethtool_ops; | 1051 | dev->net->ethtool_ops = &smsc75xx_ethtool_ops; |
| 1052 | dev->net->flags |= IFF_MULTICAST; | 1052 | dev->net->flags |= IFF_MULTICAST; |
| 1053 | dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; | 1053 | dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; |
| 1054 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | ||
| 1054 | return 0; | 1055 | return 0; |
| 1055 | } | 1056 | } |
| 1056 | 1057 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4de2760c5937..af8acc85f4bb 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -626,16 +626,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 626 | /* This can happen with OOM and indirect buffers. */ | 626 | /* This can happen with OOM and indirect buffers. */ |
| 627 | if (unlikely(capacity < 0)) { | 627 | if (unlikely(capacity < 0)) { |
| 628 | if (likely(capacity == -ENOMEM)) { | 628 | if (likely(capacity == -ENOMEM)) { |
| 629 | if (net_ratelimit()) { | 629 | if (net_ratelimit()) |
| 630 | dev_warn(&dev->dev, | 630 | dev_warn(&dev->dev, |
| 631 | "TX queue failure: out of memory\n"); | 631 | "TX queue failure: out of memory\n"); |
| 632 | } else { | 632 | } else { |
| 633 | dev->stats.tx_fifo_errors++; | 633 | dev->stats.tx_fifo_errors++; |
| 634 | if (net_ratelimit()) | 634 | if (net_ratelimit()) |
| 635 | dev_warn(&dev->dev, | 635 | dev_warn(&dev->dev, |
| 636 | "Unexpected TX queue failure: %d\n", | 636 | "Unexpected TX queue failure: %d\n", |
| 637 | capacity); | 637 | capacity); |
| 638 | } | ||
| 639 | } | 638 | } |
| 640 | dev->stats.tx_dropped++; | 639 | dev->stats.tx_dropped++; |
| 641 | kfree_skb(skb); | 640 | kfree_skb(skb); |
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index ebb9f24eefb5..1a623183cbe5 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c | |||
| @@ -2483,6 +2483,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2483 | pr_err("Control memory remap failed\n"); | 2483 | pr_err("Control memory remap failed\n"); |
| 2484 | pci_release_regions(pdev); | 2484 | pci_release_regions(pdev); |
| 2485 | pci_disable_device(pdev); | 2485 | pci_disable_device(pdev); |
| 2486 | iounmap(card->mem); | ||
| 2486 | kfree(card); | 2487 | kfree(card); |
| 2487 | return -ENODEV; | 2488 | return -ENODEV; |
| 2488 | } | 2489 | } |
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c index 8faa129da5a0..8c50d9d19d78 100644 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ b/drivers/net/wireless/ath/ath5k/ahb.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/nl80211.h> | 19 | #include <linux/nl80211.h> |
| 20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
| 21 | #include <linux/etherdevice.h> | 21 | #include <linux/etherdevice.h> |
| 22 | #include <linux/export.h> | ||
| 22 | #include <ar231x_platform.h> | 23 | #include <ar231x_platform.h> |
| 23 | #include "ath5k.h" | 24 | #include "ath5k.h" |
| 24 | #include "debug.h" | 25 | #include "debug.h" |
| @@ -119,7 +120,7 @@ static int ath_ahb_probe(struct platform_device *pdev) | |||
| 119 | if (res == NULL) { | 120 | if (res == NULL) { |
| 120 | dev_err(&pdev->dev, "no IRQ resource found\n"); | 121 | dev_err(&pdev->dev, "no IRQ resource found\n"); |
| 121 | ret = -ENXIO; | 122 | ret = -ENXIO; |
| 122 | goto err_out; | 123 | goto err_iounmap; |
| 123 | } | 124 | } |
| 124 | 125 | ||
| 125 | irq = res->start; | 126 | irq = res->start; |
| @@ -128,7 +129,7 @@ static int ath_ahb_probe(struct platform_device *pdev) | |||
| 128 | if (hw == NULL) { | 129 | if (hw == NULL) { |
| 129 | dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); | 130 | dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); |
| 130 | ret = -ENOMEM; | 131 | ret = -ENOMEM; |
| 131 | goto err_out; | 132 | goto err_iounmap; |
| 132 | } | 133 | } |
| 133 | 134 | ||
| 134 | ah = hw->priv; | 135 | ah = hw->priv; |
| @@ -185,6 +186,8 @@ static int ath_ahb_probe(struct platform_device *pdev) | |||
| 185 | err_free_hw: | 186 | err_free_hw: |
| 186 | ieee80211_free_hw(hw); | 187 | ieee80211_free_hw(hw); |
| 187 | platform_set_drvdata(pdev, NULL); | 188 | platform_set_drvdata(pdev, NULL); |
| 189 | err_iounmap: | ||
| 190 | iounmap(mem); | ||
| 188 | err_out: | 191 | err_out: |
| 189 | return ret; | 192 | return ret; |
| 190 | } | 193 | } |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 2504ab005589..798ea57252b4 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -1548,6 +1548,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
| 1548 | struct ath_hw *ah = sc->sc_ah; | 1548 | struct ath_hw *ah = sc->sc_ah; |
| 1549 | struct ath_common *common = ath9k_hw_common(ah); | 1549 | struct ath_common *common = ath9k_hw_common(ah); |
| 1550 | struct ieee80211_conf *conf = &hw->conf; | 1550 | struct ieee80211_conf *conf = &hw->conf; |
| 1551 | bool reset_channel = false; | ||
| 1551 | 1552 | ||
| 1552 | ath9k_ps_wakeup(sc); | 1553 | ath9k_ps_wakeup(sc); |
| 1553 | mutex_lock(&sc->mutex); | 1554 | mutex_lock(&sc->mutex); |
| @@ -1556,6 +1557,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
| 1556 | sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); | 1557 | sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); |
| 1557 | if (sc->ps_idle) | 1558 | if (sc->ps_idle) |
| 1558 | ath_cancel_work(sc); | 1559 | ath_cancel_work(sc); |
| 1560 | else | ||
| 1561 | /* | ||
| 1562 | * The chip needs a reset to properly wake up from | ||
| 1563 | * full sleep | ||
| 1564 | */ | ||
| 1565 | reset_channel = ah->chip_fullsleep; | ||
| 1559 | } | 1566 | } |
| 1560 | 1567 | ||
| 1561 | /* | 1568 | /* |
| @@ -1584,7 +1591,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
| 1584 | } | 1591 | } |
| 1585 | } | 1592 | } |
| 1586 | 1593 | ||
| 1587 | if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { | 1594 | if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) { |
| 1588 | struct ieee80211_channel *curchan = hw->conf.channel; | 1595 | struct ieee80211_channel *curchan = hw->conf.channel; |
| 1589 | int pos = curchan->hw_value; | 1596 | int pos = curchan->hw_value; |
| 1590 | int old_pos = -1; | 1597 | int old_pos = -1; |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 834e6bc45e8b..23eaa1b26ebe 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
| @@ -1820,6 +1820,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | |||
| 1820 | struct ath_frame_info *fi = get_frame_info(skb); | 1820 | struct ath_frame_info *fi = get_frame_info(skb); |
| 1821 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 1821 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
| 1822 | struct ath_buf *bf; | 1822 | struct ath_buf *bf; |
| 1823 | int fragno; | ||
| 1823 | u16 seqno; | 1824 | u16 seqno; |
| 1824 | 1825 | ||
| 1825 | bf = ath_tx_get_buffer(sc); | 1826 | bf = ath_tx_get_buffer(sc); |
| @@ -1831,9 +1832,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | |||
| 1831 | ATH_TXBUF_RESET(bf); | 1832 | ATH_TXBUF_RESET(bf); |
| 1832 | 1833 | ||
| 1833 | if (tid) { | 1834 | if (tid) { |
| 1835 | fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; | ||
| 1834 | seqno = tid->seq_next; | 1836 | seqno = tid->seq_next; |
| 1835 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); | 1837 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); |
| 1836 | INCR(tid->seq_next, IEEE80211_SEQ_MAX); | 1838 | |
| 1839 | if (fragno) | ||
| 1840 | hdr->seq_ctrl |= cpu_to_le16(fragno); | ||
| 1841 | |||
| 1842 | if (!ieee80211_has_morefrags(hdr->frame_control)) | ||
| 1843 | INCR(tid->seq_next, IEEE80211_SEQ_MAX); | ||
| 1844 | |||
| 1837 | bf->bf_state.seqno = seqno; | 1845 | bf->bf_state.seqno = seqno; |
| 1838 | } | 1846 | } |
| 1839 | 1847 | ||
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 231ddf4a674f..7083db75b00c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c | |||
| @@ -7614,6 +7614,7 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh, | |||
| 7614 | { | 7614 | { |
| 7615 | int len_mpdu; | 7615 | int len_mpdu; |
| 7616 | struct ieee80211_rx_status rx_status; | 7616 | struct ieee80211_rx_status rx_status; |
| 7617 | struct ieee80211_hdr *hdr; | ||
| 7617 | 7618 | ||
| 7618 | memset(&rx_status, 0, sizeof(rx_status)); | 7619 | memset(&rx_status, 0, sizeof(rx_status)); |
| 7619 | prep_mac80211_status(wlc, rxh, p, &rx_status); | 7620 | prep_mac80211_status(wlc, rxh, p, &rx_status); |
| @@ -7623,6 +7624,13 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh, | |||
| 7623 | skb_pull(p, D11_PHY_HDR_LEN); | 7624 | skb_pull(p, D11_PHY_HDR_LEN); |
| 7624 | __skb_trim(p, len_mpdu); | 7625 | __skb_trim(p, len_mpdu); |
| 7625 | 7626 | ||
| 7627 | /* unmute transmit */ | ||
| 7628 | if (wlc->hw->suspended_fifos) { | ||
| 7629 | hdr = (struct ieee80211_hdr *)p->data; | ||
| 7630 | if (ieee80211_is_beacon(hdr->frame_control)) | ||
| 7631 | brcms_b_mute(wlc->hw, false); | ||
| 7632 | } | ||
| 7633 | |||
| 7626 | memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status)); | 7634 | memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status)); |
| 7627 | ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p); | 7635 | ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p); |
| 7628 | } | 7636 | } |
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 3fa1ecebadfd..2fa879b015b6 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c | |||
| @@ -103,7 +103,7 @@ static const u32 cipher_suites[] = { | |||
| 103 | * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1 | 103 | * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1 |
| 104 | * in the firmware spec | 104 | * in the firmware spec |
| 105 | */ | 105 | */ |
| 106 | static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type) | 106 | static int lbs_auth_to_authtype(enum nl80211_auth_type auth_type) |
| 107 | { | 107 | { |
| 108 | int ret = -ENOTSUPP; | 108 | int ret = -ENOTSUPP; |
| 109 | 109 | ||
| @@ -1411,7 +1411,12 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, | |||
| 1411 | goto done; | 1411 | goto done; |
| 1412 | } | 1412 | } |
| 1413 | 1413 | ||
| 1414 | lbs_set_authtype(priv, sme); | 1414 | ret = lbs_set_authtype(priv, sme); |
| 1415 | if (ret == -ENOTSUPP) { | ||
| 1416 | wiphy_err(wiphy, "unsupported authtype 0x%x\n", sme->auth_type); | ||
| 1417 | goto done; | ||
| 1418 | } | ||
| 1419 | |||
| 1415 | lbs_set_radio(priv, preamble, 1); | 1420 | lbs_set_radio(priv, preamble, 1); |
| 1416 | 1421 | ||
| 1417 | /* Do the actual association */ | 1422 | /* Do the actual association */ |
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h index 445ff21772e2..2f218f9a3fd3 100644 --- a/drivers/net/wireless/mwifiex/pcie.h +++ b/drivers/net/wireless/mwifiex/pcie.h | |||
| @@ -48,15 +48,15 @@ | |||
| 48 | #define PCIE_HOST_INT_STATUS_MASK 0xC3C | 48 | #define PCIE_HOST_INT_STATUS_MASK 0xC3C |
| 49 | #define PCIE_SCRATCH_2_REG 0xC40 | 49 | #define PCIE_SCRATCH_2_REG 0xC40 |
| 50 | #define PCIE_SCRATCH_3_REG 0xC44 | 50 | #define PCIE_SCRATCH_3_REG 0xC44 |
| 51 | #define PCIE_SCRATCH_4_REG 0xCC0 | 51 | #define PCIE_SCRATCH_4_REG 0xCD0 |
| 52 | #define PCIE_SCRATCH_5_REG 0xCC4 | 52 | #define PCIE_SCRATCH_5_REG 0xCD4 |
| 53 | #define PCIE_SCRATCH_6_REG 0xCC8 | 53 | #define PCIE_SCRATCH_6_REG 0xCD8 |
| 54 | #define PCIE_SCRATCH_7_REG 0xCCC | 54 | #define PCIE_SCRATCH_7_REG 0xCDC |
| 55 | #define PCIE_SCRATCH_8_REG 0xCD0 | 55 | #define PCIE_SCRATCH_8_REG 0xCE0 |
| 56 | #define PCIE_SCRATCH_9_REG 0xCD4 | 56 | #define PCIE_SCRATCH_9_REG 0xCE4 |
| 57 | #define PCIE_SCRATCH_10_REG 0xCD8 | 57 | #define PCIE_SCRATCH_10_REG 0xCE8 |
| 58 | #define PCIE_SCRATCH_11_REG 0xCDC | 58 | #define PCIE_SCRATCH_11_REG 0xCEC |
| 59 | #define PCIE_SCRATCH_12_REG 0xCE0 | 59 | #define PCIE_SCRATCH_12_REG 0xCF0 |
| 60 | 60 | ||
| 61 | #define CPU_INTR_DNLD_RDY BIT(0) | 61 | #define CPU_INTR_DNLD_RDY BIT(0) |
| 62 | #define CPU_INTR_DOOR_BELL BIT(1) | 62 | #define CPU_INTR_DOOR_BELL BIT(1) |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 083a49fee56a..165274c064bc 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
| @@ -42,6 +42,7 @@ obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o | |||
| 42 | obj-$(CONFIG_PARISC) += setup-bus.o | 42 | obj-$(CONFIG_PARISC) += setup-bus.o |
| 43 | obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o | 43 | obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o |
| 44 | obj-$(CONFIG_PPC) += setup-bus.o | 44 | obj-$(CONFIG_PPC) += setup-bus.o |
| 45 | obj-$(CONFIG_FRV) += setup-bus.o | ||
| 45 | obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o | 46 | obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o |
| 46 | obj-$(CONFIG_X86_VISWS) += setup-irq.o | 47 | obj-$(CONFIG_X86_VISWS) += setup-irq.o |
| 47 | obj-$(CONFIG_MN10300) += setup-bus.o | 48 | obj-$(CONFIG_MN10300) += setup-bus.o |
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index bc8384c6f3eb..639db4d0aa76 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c | |||
| @@ -50,7 +50,7 @@ | |||
| 50 | */ | 50 | */ |
| 51 | #undef START_IN_KERNEL_MODE | 51 | #undef START_IN_KERNEL_MODE |
| 52 | 52 | ||
| 53 | #define DRV_VER "0.5.24" | 53 | #define DRV_VER "0.5.26" |
| 54 | 54 | ||
| 55 | /* | 55 | /* |
| 56 | * According to the Atom N270 datasheet, | 56 | * According to the Atom N270 datasheet, |
| @@ -83,8 +83,8 @@ static int kernelmode; | |||
| 83 | #endif | 83 | #endif |
| 84 | 84 | ||
| 85 | static unsigned int interval = 10; | 85 | static unsigned int interval = 10; |
| 86 | static unsigned int fanon = 63000; | 86 | static unsigned int fanon = 60000; |
| 87 | static unsigned int fanoff = 58000; | 87 | static unsigned int fanoff = 53000; |
| 88 | static unsigned int verbose; | 88 | static unsigned int verbose; |
| 89 | static unsigned int fanstate = ACERHDF_FAN_AUTO; | 89 | static unsigned int fanstate = ACERHDF_FAN_AUTO; |
| 90 | static char force_bios[16]; | 90 | static char force_bios[16]; |
| @@ -150,6 +150,8 @@ static const struct bios_settings_t bios_tbl[] = { | |||
| 150 | {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} }, | 150 | {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} }, |
| 151 | {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} }, | 151 | {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} }, |
| 152 | {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} }, | 152 | {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} }, |
| 153 | /* LT1005u */ | ||
| 154 | {"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00} }, | ||
| 153 | /* Acer 1410 */ | 155 | /* Acer 1410 */ |
| 154 | {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, | 156 | {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, |
| 155 | {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, | 157 | {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, |
| @@ -161,6 +163,7 @@ static const struct bios_settings_t bios_tbl[] = { | |||
| 161 | {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, | 163 | {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, |
| 162 | {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, | 164 | {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, |
| 163 | {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, | 165 | {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, |
| 166 | {"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, | ||
| 164 | /* Acer 1810xx */ | 167 | /* Acer 1810xx */ |
| 165 | {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, | 168 | {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, |
| 166 | {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, | 169 | {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, |
| @@ -183,29 +186,44 @@ static const struct bios_settings_t bios_tbl[] = { | |||
| 183 | {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, | 186 | {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, |
| 184 | {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, | 187 | {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, |
| 185 | {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, | 188 | {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, |
| 189 | {"Acer", "Aspire 1810T", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, | ||
| 186 | /* Acer 531 */ | 190 | /* Acer 531 */ |
| 191 | {"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00} }, | ||
| 187 | {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} }, | 192 | {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} }, |
| 193 | {"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00} }, | ||
| 194 | /* Acer 751 */ | ||
| 195 | {"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00} }, | ||
| 196 | /* Acer 1825 */ | ||
| 197 | {"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00} }, | ||
| 198 | {"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00} }, | ||
| 199 | /* Acer TravelMate 7730 */ | ||
| 200 | {"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00} }, | ||
| 188 | /* Gateway */ | 201 | /* Gateway */ |
| 189 | {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, | 202 | {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, |
| 190 | {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} }, | 203 | {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} }, |
| 191 | {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} }, | 204 | {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} }, |
| 192 | {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, | 205 | {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, |
| 193 | {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, | 206 | {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, |
| 207 | {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} }, | ||
| 194 | /* Packard Bell */ | 208 | /* Packard Bell */ |
| 195 | {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} }, | 209 | {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} }, |
| 196 | {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, | 210 | {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, |
| 197 | {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, | 211 | {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, |
| 198 | {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, | 212 | {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, |
| 199 | {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, | 213 | {"Packard Bell", "ENBFT", "V1.3118", 0x55, 0x58, {0x9e, 0x00} }, |
| 200 | {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} }, | 214 | {"Packard Bell", "ENBFT", "V1.3127", 0x55, 0x58, {0x9e, 0x00} }, |
| 201 | {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, | 215 | {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, |
| 202 | {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, | 216 | {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} }, |
| 203 | {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} }, | 217 | {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, |
| 204 | {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} }, | 218 | {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, |
| 205 | {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} }, | 219 | {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} }, |
| 206 | {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} }, | 220 | {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} }, |
| 207 | {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, | 221 | {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} }, |
| 208 | {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, | 222 | {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} }, |
| 223 | {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, | ||
| 224 | {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, | ||
| 225 | {"Packard Bell", "DOTMA", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} }, | ||
| 226 | {"Packard Bell", "DOTVR46", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, | ||
| 209 | /* pewpew-terminator */ | 227 | /* pewpew-terminator */ |
| 210 | {"", "", "", 0, 0, {0, 0} } | 228 | {"", "", "", 0, 0, {0, 0} } |
| 211 | }; | 229 | }; |
| @@ -701,15 +719,20 @@ MODULE_LICENSE("GPL"); | |||
| 701 | MODULE_AUTHOR("Peter Feuerer"); | 719 | MODULE_AUTHOR("Peter Feuerer"); |
| 702 | MODULE_DESCRIPTION("Aspire One temperature and fan driver"); | 720 | MODULE_DESCRIPTION("Aspire One temperature and fan driver"); |
| 703 | MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); | 721 | MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); |
| 722 | MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:"); | ||
| 704 | MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:"); | 723 | MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:"); |
| 705 | MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:"); | 724 | MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:"); |
| 725 | MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1825PTZ:"); | ||
| 706 | MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:"); | 726 | MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:"); |
| 727 | MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:"); | ||
| 707 | MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); | 728 | MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); |
| 708 | MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:"); | 729 | MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:"); |
| 709 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:"); | 730 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:"); |
| 710 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:"); | 731 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:"); |
| 711 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:"); | 732 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:"); |
| 733 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnENBFT*:"); | ||
| 712 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:"); | 734 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:"); |
| 735 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTVR46*:"); | ||
| 713 | 736 | ||
| 714 | module_init(acerhdf_init); | 737 | module_init(acerhdf_init); |
| 715 | module_exit(acerhdf_exit); | 738 | module_exit(acerhdf_exit); |
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index a05fc9c955d8..e6c08ee8d46c 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c | |||
| @@ -212,6 +212,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { | |||
| 212 | }, | 212 | }, |
| 213 | .driver_data = &quirk_dell_vostro_v130, | 213 | .driver_data = &quirk_dell_vostro_v130, |
| 214 | }, | 214 | }, |
| 215 | { } | ||
| 215 | }; | 216 | }; |
| 216 | 217 | ||
| 217 | static struct calling_interface_buffer *buffer; | 218 | static struct calling_interface_buffer *buffer; |
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index f7ba316e0ed6..0ffdb3cde2bb 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c | |||
| @@ -1565,7 +1565,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1565 | ips->poll_turbo_status = true; | 1565 | ips->poll_turbo_status = true; |
| 1566 | 1566 | ||
| 1567 | if (!ips_get_i915_syms(ips)) { | 1567 | if (!ips_get_i915_syms(ips)) { |
| 1568 | dev_err(&dev->dev, "failed to get i915 symbols, graphics turbo disabled\n"); | 1568 | dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n"); |
| 1569 | ips->gpu_turbo_enabled = false; | 1569 | ips->gpu_turbo_enabled = false; |
| 1570 | } else { | 1570 | } else { |
| 1571 | dev_dbg(&dev->dev, "graphics turbo enabled\n"); | 1571 | dev_dbg(&dev->dev, "graphics turbo enabled\n"); |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index cd188ab72f79..c293d0cdb104 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
| @@ -902,6 +902,7 @@ read_rtc: | |||
| 902 | } | 902 | } |
| 903 | ds1307->nvram->attr.name = "nvram"; | 903 | ds1307->nvram->attr.name = "nvram"; |
| 904 | ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR; | 904 | ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR; |
| 905 | sysfs_bin_attr_init(ds1307->nvram); | ||
| 905 | ds1307->nvram->read = ds1307_nvram_read, | 906 | ds1307->nvram->read = ds1307_nvram_read, |
| 906 | ds1307->nvram->write = ds1307_nvram_write, | 907 | ds1307->nvram->write = ds1307_nvram_write, |
| 907 | ds1307->nvram->size = chip->nvram_size; | 908 | ds1307->nvram->size = chip->nvram_size; |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index e002cd466e9a..467dc38246f9 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -4549,8 +4549,12 @@ static int ipr_ata_slave_alloc(struct scsi_device *sdev) | |||
| 4549 | ENTER; | 4549 | ENTER; |
| 4550 | if (sdev->sdev_target) | 4550 | if (sdev->sdev_target) |
| 4551 | sata_port = sdev->sdev_target->hostdata; | 4551 | sata_port = sdev->sdev_target->hostdata; |
| 4552 | if (sata_port) | 4552 | if (sata_port) { |
| 4553 | rc = ata_sas_port_init(sata_port->ap); | 4553 | rc = ata_sas_port_init(sata_port->ap); |
| 4554 | if (rc == 0) | ||
| 4555 | rc = ata_sas_sync_probe(sata_port->ap); | ||
| 4556 | } | ||
| 4557 | |||
| 4554 | if (rc) | 4558 | if (rc) |
| 4555 | ipr_slave_destroy(sdev); | 4559 | ipr_slave_destroy(sdev); |
| 4556 | 4560 | ||
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index ef9560dff295..cc83b66d45b7 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
| @@ -1742,17 +1742,19 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
| 1742 | 1742 | ||
| 1743 | mfs = ntohs(flp->fl_csp.sp_bb_data) & | 1743 | mfs = ntohs(flp->fl_csp.sp_bb_data) & |
| 1744 | FC_SP_BB_DATA_MASK; | 1744 | FC_SP_BB_DATA_MASK; |
| 1745 | if (mfs >= FC_SP_MIN_MAX_PAYLOAD && | 1745 | |
| 1746 | mfs <= lport->mfs) { | 1746 | if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) { |
| 1747 | lport->mfs = mfs; | ||
| 1748 | fc_host_maxframe_size(lport->host) = mfs; | ||
| 1749 | } else { | ||
| 1750 | FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " | 1747 | FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " |
| 1751 | "lport->mfs:%hu\n", mfs, lport->mfs); | 1748 | "lport->mfs:%hu\n", mfs, lport->mfs); |
| 1752 | fc_lport_error(lport, fp); | 1749 | fc_lport_error(lport, fp); |
| 1753 | goto err; | 1750 | goto err; |
| 1754 | } | 1751 | } |
| 1755 | 1752 | ||
| 1753 | if (mfs <= lport->mfs) { | ||
| 1754 | lport->mfs = mfs; | ||
| 1755 | fc_host_maxframe_size(lport->host) = mfs; | ||
| 1756 | } | ||
| 1757 | |||
| 1756 | csp_flags = ntohs(flp->fl_csp.sp_features); | 1758 | csp_flags = ntohs(flp->fl_csp.sp_features); |
| 1757 | r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); | 1759 | r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); |
| 1758 | e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); | 1760 | e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index bc0cecc6ad62..441d88ad99a7 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
| @@ -546,11 +546,12 @@ static struct ata_port_info sata_port_info = { | |||
| 546 | .port_ops = &sas_sata_ops | 546 | .port_ops = &sas_sata_ops |
| 547 | }; | 547 | }; |
| 548 | 548 | ||
| 549 | int sas_ata_init_host_and_port(struct domain_device *found_dev) | 549 | int sas_ata_init(struct domain_device *found_dev) |
| 550 | { | 550 | { |
| 551 | struct sas_ha_struct *ha = found_dev->port->ha; | 551 | struct sas_ha_struct *ha = found_dev->port->ha; |
| 552 | struct Scsi_Host *shost = ha->core.shost; | 552 | struct Scsi_Host *shost = ha->core.shost; |
| 553 | struct ata_port *ap; | 553 | struct ata_port *ap; |
| 554 | int rc; | ||
| 554 | 555 | ||
| 555 | ata_host_init(&found_dev->sata_dev.ata_host, | 556 | ata_host_init(&found_dev->sata_dev.ata_host, |
| 556 | ha->dev, | 557 | ha->dev, |
| @@ -567,8 +568,11 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev) | |||
| 567 | ap->private_data = found_dev; | 568 | ap->private_data = found_dev; |
| 568 | ap->cbl = ATA_CBL_SATA; | 569 | ap->cbl = ATA_CBL_SATA; |
| 569 | ap->scsi_host = shost; | 570 | ap->scsi_host = shost; |
| 570 | /* publish initialized ata port */ | 571 | rc = ata_sas_port_init(ap); |
| 571 | smp_wmb(); | 572 | if (rc) { |
| 573 | ata_sas_port_destroy(ap); | ||
| 574 | return rc; | ||
| 575 | } | ||
| 572 | found_dev->sata_dev.ap = ap; | 576 | found_dev->sata_dev.ap = ap; |
| 573 | 577 | ||
| 574 | return 0; | 578 | return 0; |
| @@ -648,18 +652,13 @@ static void sas_get_ata_command_set(struct domain_device *dev) | |||
| 648 | void sas_probe_sata(struct asd_sas_port *port) | 652 | void sas_probe_sata(struct asd_sas_port *port) |
| 649 | { | 653 | { |
| 650 | struct domain_device *dev, *n; | 654 | struct domain_device *dev, *n; |
| 651 | int err; | ||
| 652 | 655 | ||
| 653 | mutex_lock(&port->ha->disco_mutex); | 656 | mutex_lock(&port->ha->disco_mutex); |
| 654 | list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { | 657 | list_for_each_entry(dev, &port->disco_list, disco_list_node) { |
| 655 | if (!dev_is_sata(dev)) | 658 | if (!dev_is_sata(dev)) |
| 656 | continue; | 659 | continue; |
| 657 | 660 | ||
| 658 | err = sas_ata_init_host_and_port(dev); | 661 | ata_sas_async_probe(dev->sata_dev.ap); |
| 659 | if (err) | ||
| 660 | sas_fail_probe(dev, __func__, err); | ||
| 661 | else | ||
| 662 | ata_sas_async_port_init(dev->sata_dev.ap); | ||
| 663 | } | 662 | } |
| 664 | mutex_unlock(&port->ha->disco_mutex); | 663 | mutex_unlock(&port->ha->disco_mutex); |
| 665 | 664 | ||
| @@ -718,18 +717,6 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie) | |||
| 718 | sas_put_device(dev); | 717 | sas_put_device(dev); |
| 719 | } | 718 | } |
| 720 | 719 | ||
| 721 | static bool sas_ata_dev_eh_valid(struct domain_device *dev) | ||
| 722 | { | ||
| 723 | struct ata_port *ap; | ||
| 724 | |||
| 725 | if (!dev_is_sata(dev)) | ||
| 726 | return false; | ||
| 727 | ap = dev->sata_dev.ap; | ||
| 728 | /* consume fully initialized ata ports */ | ||
| 729 | smp_rmb(); | ||
| 730 | return !!ap; | ||
| 731 | } | ||
| 732 | |||
| 733 | void sas_ata_strategy_handler(struct Scsi_Host *shost) | 720 | void sas_ata_strategy_handler(struct Scsi_Host *shost) |
| 734 | { | 721 | { |
| 735 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); | 722 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); |
| @@ -753,7 +740,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost) | |||
| 753 | 740 | ||
| 754 | spin_lock(&port->dev_list_lock); | 741 | spin_lock(&port->dev_list_lock); |
| 755 | list_for_each_entry(dev, &port->dev_list, dev_list_node) { | 742 | list_for_each_entry(dev, &port->dev_list, dev_list_node) { |
| 756 | if (!sas_ata_dev_eh_valid(dev)) | 743 | if (!dev_is_sata(dev)) |
| 757 | continue; | 744 | continue; |
| 758 | async_schedule_domain(async_sas_ata_eh, dev, &async); | 745 | async_schedule_domain(async_sas_ata_eh, dev, &async); |
| 759 | } | 746 | } |
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 364679675602..629a0865b130 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c | |||
| @@ -72,6 +72,7 @@ static int sas_get_port_device(struct asd_sas_port *port) | |||
| 72 | struct asd_sas_phy *phy; | 72 | struct asd_sas_phy *phy; |
| 73 | struct sas_rphy *rphy; | 73 | struct sas_rphy *rphy; |
| 74 | struct domain_device *dev; | 74 | struct domain_device *dev; |
| 75 | int rc = -ENODEV; | ||
| 75 | 76 | ||
| 76 | dev = sas_alloc_device(); | 77 | dev = sas_alloc_device(); |
| 77 | if (!dev) | 78 | if (!dev) |
| @@ -110,9 +111,16 @@ static int sas_get_port_device(struct asd_sas_port *port) | |||
| 110 | 111 | ||
| 111 | sas_init_dev(dev); | 112 | sas_init_dev(dev); |
| 112 | 113 | ||
| 114 | dev->port = port; | ||
| 113 | switch (dev->dev_type) { | 115 | switch (dev->dev_type) { |
| 114 | case SAS_END_DEV: | ||
| 115 | case SATA_DEV: | 116 | case SATA_DEV: |
| 117 | rc = sas_ata_init(dev); | ||
| 118 | if (rc) { | ||
| 119 | rphy = NULL; | ||
| 120 | break; | ||
| 121 | } | ||
| 122 | /* fall through */ | ||
| 123 | case SAS_END_DEV: | ||
| 116 | rphy = sas_end_device_alloc(port->port); | 124 | rphy = sas_end_device_alloc(port->port); |
| 117 | break; | 125 | break; |
| 118 | case EDGE_DEV: | 126 | case EDGE_DEV: |
| @@ -131,19 +139,14 @@ static int sas_get_port_device(struct asd_sas_port *port) | |||
| 131 | 139 | ||
| 132 | if (!rphy) { | 140 | if (!rphy) { |
| 133 | sas_put_device(dev); | 141 | sas_put_device(dev); |
| 134 | return -ENODEV; | 142 | return rc; |
| 135 | } | 143 | } |
| 136 | 144 | ||
| 137 | spin_lock_irq(&port->phy_list_lock); | ||
| 138 | list_for_each_entry(phy, &port->phy_list, port_phy_el) | ||
| 139 | sas_phy_set_target(phy, dev); | ||
| 140 | spin_unlock_irq(&port->phy_list_lock); | ||
| 141 | rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; | 145 | rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; |
| 142 | memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); | 146 | memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); |
| 143 | sas_fill_in_rphy(dev, rphy); | 147 | sas_fill_in_rphy(dev, rphy); |
| 144 | sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); | 148 | sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); |
| 145 | port->port_dev = dev; | 149 | port->port_dev = dev; |
| 146 | dev->port = port; | ||
| 147 | dev->linkrate = port->linkrate; | 150 | dev->linkrate = port->linkrate; |
| 148 | dev->min_linkrate = port->linkrate; | 151 | dev->min_linkrate = port->linkrate; |
| 149 | dev->max_linkrate = port->linkrate; | 152 | dev->max_linkrate = port->linkrate; |
| @@ -155,6 +158,7 @@ static int sas_get_port_device(struct asd_sas_port *port) | |||
| 155 | sas_device_set_phy(dev, port->port); | 158 | sas_device_set_phy(dev, port->port); |
| 156 | 159 | ||
| 157 | dev->rphy = rphy; | 160 | dev->rphy = rphy; |
| 161 | get_device(&dev->rphy->dev); | ||
| 158 | 162 | ||
| 159 | if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) | 163 | if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) |
| 160 | list_add_tail(&dev->disco_list_node, &port->disco_list); | 164 | list_add_tail(&dev->disco_list_node, &port->disco_list); |
| @@ -164,6 +168,11 @@ static int sas_get_port_device(struct asd_sas_port *port) | |||
| 164 | spin_unlock_irq(&port->dev_list_lock); | 168 | spin_unlock_irq(&port->dev_list_lock); |
| 165 | } | 169 | } |
| 166 | 170 | ||
| 171 | spin_lock_irq(&port->phy_list_lock); | ||
| 172 | list_for_each_entry(phy, &port->phy_list, port_phy_el) | ||
| 173 | sas_phy_set_target(phy, dev); | ||
| 174 | spin_unlock_irq(&port->phy_list_lock); | ||
| 175 | |||
| 167 | return 0; | 176 | return 0; |
| 168 | } | 177 | } |
| 169 | 178 | ||
| @@ -205,8 +214,7 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev) | |||
| 205 | static void sas_probe_devices(struct work_struct *work) | 214 | static void sas_probe_devices(struct work_struct *work) |
| 206 | { | 215 | { |
| 207 | struct domain_device *dev, *n; | 216 | struct domain_device *dev, *n; |
| 208 | struct sas_discovery_event *ev = | 217 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
| 209 | container_of(work, struct sas_discovery_event, work); | ||
| 210 | struct asd_sas_port *port = ev->port; | 218 | struct asd_sas_port *port = ev->port; |
| 211 | 219 | ||
| 212 | clear_bit(DISCE_PROBE, &port->disc.pending); | 220 | clear_bit(DISCE_PROBE, &port->disc.pending); |
| @@ -255,6 +263,9 @@ void sas_free_device(struct kref *kref) | |||
| 255 | { | 263 | { |
| 256 | struct domain_device *dev = container_of(kref, typeof(*dev), kref); | 264 | struct domain_device *dev = container_of(kref, typeof(*dev), kref); |
| 257 | 265 | ||
| 266 | put_device(&dev->rphy->dev); | ||
| 267 | dev->rphy = NULL; | ||
| 268 | |||
| 258 | if (dev->parent) | 269 | if (dev->parent) |
| 259 | sas_put_device(dev->parent); | 270 | sas_put_device(dev->parent); |
| 260 | 271 | ||
| @@ -291,8 +302,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d | |||
| 291 | static void sas_destruct_devices(struct work_struct *work) | 302 | static void sas_destruct_devices(struct work_struct *work) |
| 292 | { | 303 | { |
| 293 | struct domain_device *dev, *n; | 304 | struct domain_device *dev, *n; |
| 294 | struct sas_discovery_event *ev = | 305 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
| 295 | container_of(work, struct sas_discovery_event, work); | ||
| 296 | struct asd_sas_port *port = ev->port; | 306 | struct asd_sas_port *port = ev->port; |
| 297 | 307 | ||
| 298 | clear_bit(DISCE_DESTRUCT, &port->disc.pending); | 308 | clear_bit(DISCE_DESTRUCT, &port->disc.pending); |
| @@ -302,7 +312,6 @@ static void sas_destruct_devices(struct work_struct *work) | |||
| 302 | 312 | ||
| 303 | sas_remove_children(&dev->rphy->dev); | 313 | sas_remove_children(&dev->rphy->dev); |
| 304 | sas_rphy_delete(dev->rphy); | 314 | sas_rphy_delete(dev->rphy); |
| 305 | dev->rphy = NULL; | ||
| 306 | sas_unregister_common_dev(port, dev); | 315 | sas_unregister_common_dev(port, dev); |
| 307 | } | 316 | } |
| 308 | } | 317 | } |
| @@ -314,11 +323,11 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev) | |||
| 314 | /* this rphy never saw sas_rphy_add */ | 323 | /* this rphy never saw sas_rphy_add */ |
| 315 | list_del_init(&dev->disco_list_node); | 324 | list_del_init(&dev->disco_list_node); |
| 316 | sas_rphy_free(dev->rphy); | 325 | sas_rphy_free(dev->rphy); |
| 317 | dev->rphy = NULL; | ||
| 318 | sas_unregister_common_dev(port, dev); | 326 | sas_unregister_common_dev(port, dev); |
| 327 | return; | ||
| 319 | } | 328 | } |
| 320 | 329 | ||
| 321 | if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { | 330 | if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { |
| 322 | sas_rphy_unlink(dev->rphy); | 331 | sas_rphy_unlink(dev->rphy); |
| 323 | list_move_tail(&dev->disco_list_node, &port->destroy_list); | 332 | list_move_tail(&dev->disco_list_node, &port->destroy_list); |
| 324 | sas_discover_event(dev->port, DISCE_DESTRUCT); | 333 | sas_discover_event(dev->port, DISCE_DESTRUCT); |
| @@ -377,8 +386,7 @@ static void sas_discover_domain(struct work_struct *work) | |||
| 377 | { | 386 | { |
| 378 | struct domain_device *dev; | 387 | struct domain_device *dev; |
| 379 | int error = 0; | 388 | int error = 0; |
| 380 | struct sas_discovery_event *ev = | 389 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
| 381 | container_of(work, struct sas_discovery_event, work); | ||
| 382 | struct asd_sas_port *port = ev->port; | 390 | struct asd_sas_port *port = ev->port; |
| 383 | 391 | ||
| 384 | clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending); | 392 | clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending); |
| @@ -419,8 +427,6 @@ static void sas_discover_domain(struct work_struct *work) | |||
| 419 | 427 | ||
| 420 | if (error) { | 428 | if (error) { |
| 421 | sas_rphy_free(dev->rphy); | 429 | sas_rphy_free(dev->rphy); |
| 422 | dev->rphy = NULL; | ||
| 423 | |||
| 424 | list_del_init(&dev->disco_list_node); | 430 | list_del_init(&dev->disco_list_node); |
| 425 | spin_lock_irq(&port->dev_list_lock); | 431 | spin_lock_irq(&port->dev_list_lock); |
| 426 | list_del_init(&dev->dev_list_node); | 432 | list_del_init(&dev->dev_list_node); |
| @@ -437,8 +443,7 @@ static void sas_discover_domain(struct work_struct *work) | |||
| 437 | static void sas_revalidate_domain(struct work_struct *work) | 443 | static void sas_revalidate_domain(struct work_struct *work) |
| 438 | { | 444 | { |
| 439 | int res = 0; | 445 | int res = 0; |
| 440 | struct sas_discovery_event *ev = | 446 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
| 441 | container_of(work, struct sas_discovery_event, work); | ||
| 442 | struct asd_sas_port *port = ev->port; | 447 | struct asd_sas_port *port = ev->port; |
| 443 | struct sas_ha_struct *ha = port->ha; | 448 | struct sas_ha_struct *ha = port->ha; |
| 444 | 449 | ||
| @@ -466,21 +471,25 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
| 466 | 471 | ||
| 467 | /* ---------- Events ---------- */ | 472 | /* ---------- Events ---------- */ |
| 468 | 473 | ||
| 469 | static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work) | 474 | static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw) |
| 470 | { | 475 | { |
| 471 | /* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */ | 476 | /* chained work is not subject to SA_HA_DRAINING or |
| 472 | scsi_queue_work(ha->core.shost, work); | 477 | * SAS_HA_REGISTERED, because it is either submitted in the |
| 478 | * workqueue, or known to be submitted from a context that is | ||
| 479 | * not racing against draining | ||
| 480 | */ | ||
| 481 | scsi_queue_work(ha->core.shost, &sw->work); | ||
| 473 | } | 482 | } |
| 474 | 483 | ||
| 475 | static void sas_chain_event(int event, unsigned long *pending, | 484 | static void sas_chain_event(int event, unsigned long *pending, |
| 476 | struct work_struct *work, | 485 | struct sas_work *sw, |
| 477 | struct sas_ha_struct *ha) | 486 | struct sas_ha_struct *ha) |
| 478 | { | 487 | { |
| 479 | if (!test_and_set_bit(event, pending)) { | 488 | if (!test_and_set_bit(event, pending)) { |
| 480 | unsigned long flags; | 489 | unsigned long flags; |
| 481 | 490 | ||
| 482 | spin_lock_irqsave(&ha->state_lock, flags); | 491 | spin_lock_irqsave(&ha->state_lock, flags); |
| 483 | sas_chain_work(ha, work); | 492 | sas_chain_work(ha, sw); |
| 484 | spin_unlock_irqrestore(&ha->state_lock, flags); | 493 | spin_unlock_irqrestore(&ha->state_lock, flags); |
| 485 | } | 494 | } |
| 486 | } | 495 | } |
| @@ -519,7 +528,7 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) | |||
| 519 | 528 | ||
| 520 | disc->pending = 0; | 529 | disc->pending = 0; |
| 521 | for (i = 0; i < DISC_NUM_EVENTS; i++) { | 530 | for (i = 0; i < DISC_NUM_EVENTS; i++) { |
| 522 | INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); | 531 | INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]); |
| 523 | disc->disc_work[i].port = port; | 532 | disc->disc_work[i].port = port; |
| 524 | } | 533 | } |
| 525 | } | 534 | } |
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index 16639bbae629..4e4292d210c1 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c | |||
| @@ -27,19 +27,21 @@ | |||
| 27 | #include "sas_internal.h" | 27 | #include "sas_internal.h" |
| 28 | #include "sas_dump.h" | 28 | #include "sas_dump.h" |
| 29 | 29 | ||
| 30 | void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work) | 30 | void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) |
| 31 | { | 31 | { |
| 32 | if (!test_bit(SAS_HA_REGISTERED, &ha->state)) | 32 | if (!test_bit(SAS_HA_REGISTERED, &ha->state)) |
| 33 | return; | 33 | return; |
| 34 | 34 | ||
| 35 | if (test_bit(SAS_HA_DRAINING, &ha->state)) | 35 | if (test_bit(SAS_HA_DRAINING, &ha->state)) { |
| 36 | list_add(&work->entry, &ha->defer_q); | 36 | /* add it to the defer list, if not already pending */ |
| 37 | else | 37 | if (list_empty(&sw->drain_node)) |
| 38 | scsi_queue_work(ha->core.shost, work); | 38 | list_add(&sw->drain_node, &ha->defer_q); |
| 39 | } else | ||
| 40 | scsi_queue_work(ha->core.shost, &sw->work); | ||
| 39 | } | 41 | } |
| 40 | 42 | ||
| 41 | static void sas_queue_event(int event, unsigned long *pending, | 43 | static void sas_queue_event(int event, unsigned long *pending, |
| 42 | struct work_struct *work, | 44 | struct sas_work *work, |
| 43 | struct sas_ha_struct *ha) | 45 | struct sas_ha_struct *ha) |
| 44 | { | 46 | { |
| 45 | if (!test_and_set_bit(event, pending)) { | 47 | if (!test_and_set_bit(event, pending)) { |
| @@ -55,7 +57,7 @@ static void sas_queue_event(int event, unsigned long *pending, | |||
| 55 | void __sas_drain_work(struct sas_ha_struct *ha) | 57 | void __sas_drain_work(struct sas_ha_struct *ha) |
| 56 | { | 58 | { |
| 57 | struct workqueue_struct *wq = ha->core.shost->work_q; | 59 | struct workqueue_struct *wq = ha->core.shost->work_q; |
| 58 | struct work_struct *w, *_w; | 60 | struct sas_work *sw, *_sw; |
| 59 | 61 | ||
| 60 | set_bit(SAS_HA_DRAINING, &ha->state); | 62 | set_bit(SAS_HA_DRAINING, &ha->state); |
| 61 | /* flush submitters */ | 63 | /* flush submitters */ |
| @@ -66,9 +68,9 @@ void __sas_drain_work(struct sas_ha_struct *ha) | |||
| 66 | 68 | ||
| 67 | spin_lock_irq(&ha->state_lock); | 69 | spin_lock_irq(&ha->state_lock); |
| 68 | clear_bit(SAS_HA_DRAINING, &ha->state); | 70 | clear_bit(SAS_HA_DRAINING, &ha->state); |
| 69 | list_for_each_entry_safe(w, _w, &ha->defer_q, entry) { | 71 | list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { |
| 70 | list_del_init(&w->entry); | 72 | list_del_init(&sw->drain_node); |
| 71 | sas_queue_work(ha, w); | 73 | sas_queue_work(ha, sw); |
| 72 | } | 74 | } |
| 73 | spin_unlock_irq(&ha->state_lock); | 75 | spin_unlock_irq(&ha->state_lock); |
| 74 | } | 76 | } |
| @@ -151,7 +153,7 @@ int sas_init_events(struct sas_ha_struct *sas_ha) | |||
| 151 | int i; | 153 | int i; |
| 152 | 154 | ||
| 153 | for (i = 0; i < HA_NUM_EVENTS; i++) { | 155 | for (i = 0; i < HA_NUM_EVENTS; i++) { |
| 154 | INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); | 156 | INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); |
| 155 | sas_ha->ha_events[i].ha = sas_ha; | 157 | sas_ha->ha_events[i].ha = sas_ha; |
| 156 | } | 158 | } |
| 157 | 159 | ||
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 05acd9e35fc4..caa0525d2523 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
| @@ -202,6 +202,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
| 202 | u8 sas_addr[SAS_ADDR_SIZE]; | 202 | u8 sas_addr[SAS_ADDR_SIZE]; |
| 203 | struct smp_resp *resp = rsp; | 203 | struct smp_resp *resp = rsp; |
| 204 | struct discover_resp *dr = &resp->disc; | 204 | struct discover_resp *dr = &resp->disc; |
| 205 | struct sas_ha_struct *ha = dev->port->ha; | ||
| 205 | struct expander_device *ex = &dev->ex_dev; | 206 | struct expander_device *ex = &dev->ex_dev; |
| 206 | struct ex_phy *phy = &ex->ex_phy[phy_id]; | 207 | struct ex_phy *phy = &ex->ex_phy[phy_id]; |
| 207 | struct sas_rphy *rphy = dev->rphy; | 208 | struct sas_rphy *rphy = dev->rphy; |
| @@ -209,6 +210,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
| 209 | char *type; | 210 | char *type; |
| 210 | 211 | ||
| 211 | if (new_phy) { | 212 | if (new_phy) { |
| 213 | if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))) | ||
| 214 | return; | ||
| 212 | phy->phy = sas_phy_alloc(&rphy->dev, phy_id); | 215 | phy->phy = sas_phy_alloc(&rphy->dev, phy_id); |
| 213 | 216 | ||
| 214 | /* FIXME: error_handling */ | 217 | /* FIXME: error_handling */ |
| @@ -233,6 +236,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
| 233 | memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); | 236 | memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); |
| 234 | 237 | ||
| 235 | phy->attached_dev_type = to_dev_type(dr); | 238 | phy->attached_dev_type = to_dev_type(dr); |
| 239 | if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) | ||
| 240 | goto out; | ||
| 236 | phy->phy_id = phy_id; | 241 | phy->phy_id = phy_id; |
| 237 | phy->linkrate = dr->linkrate; | 242 | phy->linkrate = dr->linkrate; |
| 238 | phy->attached_sata_host = dr->attached_sata_host; | 243 | phy->attached_sata_host = dr->attached_sata_host; |
| @@ -240,7 +245,14 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
| 240 | phy->attached_sata_ps = dr->attached_sata_ps; | 245 | phy->attached_sata_ps = dr->attached_sata_ps; |
| 241 | phy->attached_iproto = dr->iproto << 1; | 246 | phy->attached_iproto = dr->iproto << 1; |
| 242 | phy->attached_tproto = dr->tproto << 1; | 247 | phy->attached_tproto = dr->tproto << 1; |
| 243 | memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); | 248 | /* help some expanders that fail to zero sas_address in the 'no |
| 249 | * device' case | ||
| 250 | */ | ||
| 251 | if (phy->attached_dev_type == NO_DEVICE || | ||
| 252 | phy->linkrate < SAS_LINK_RATE_1_5_GBPS) | ||
| 253 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); | ||
| 254 | else | ||
| 255 | memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); | ||
| 244 | phy->attached_phy_id = dr->attached_phy_id; | 256 | phy->attached_phy_id = dr->attached_phy_id; |
| 245 | phy->phy_change_count = dr->change_count; | 257 | phy->phy_change_count = dr->change_count; |
| 246 | phy->routing_attr = dr->routing_attr; | 258 | phy->routing_attr = dr->routing_attr; |
| @@ -266,6 +278,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
| 266 | return; | 278 | return; |
| 267 | } | 279 | } |
| 268 | 280 | ||
| 281 | out: | ||
| 269 | switch (phy->attached_dev_type) { | 282 | switch (phy->attached_dev_type) { |
| 270 | case SATA_PENDING: | 283 | case SATA_PENDING: |
| 271 | type = "stp pending"; | 284 | type = "stp pending"; |
| @@ -304,7 +317,15 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
| 304 | else | 317 | else |
| 305 | return; | 318 | return; |
| 306 | 319 | ||
| 307 | SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", | 320 | /* if the attached device type changed and ata_eh is active, |
| 321 | * make sure we run revalidation when eh completes (see: | ||
| 322 | * sas_enable_revalidation) | ||
| 323 | */ | ||
| 324 | if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) | ||
| 325 | set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending); | ||
| 326 | |||
| 327 | SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", | ||
| 328 | test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "", | ||
| 308 | SAS_ADDR(dev->sas_addr), phy->phy_id, | 329 | SAS_ADDR(dev->sas_addr), phy->phy_id, |
| 309 | sas_route_char(dev, phy), phy->linkrate, | 330 | sas_route_char(dev, phy), phy->linkrate, |
| 310 | SAS_ADDR(phy->attached_sas_addr), type); | 331 | SAS_ADDR(phy->attached_sas_addr), type); |
| @@ -776,13 +797,16 @@ static struct domain_device *sas_ex_discover_end_dev( | |||
| 776 | if (res) | 797 | if (res) |
| 777 | goto out_free; | 798 | goto out_free; |
| 778 | 799 | ||
| 800 | sas_init_dev(child); | ||
| 801 | res = sas_ata_init(child); | ||
| 802 | if (res) | ||
| 803 | goto out_free; | ||
| 779 | rphy = sas_end_device_alloc(phy->port); | 804 | rphy = sas_end_device_alloc(phy->port); |
| 780 | if (unlikely(!rphy)) | 805 | if (!rphy) |
| 781 | goto out_free; | 806 | goto out_free; |
| 782 | 807 | ||
| 783 | sas_init_dev(child); | ||
| 784 | |||
| 785 | child->rphy = rphy; | 808 | child->rphy = rphy; |
| 809 | get_device(&rphy->dev); | ||
| 786 | 810 | ||
| 787 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); | 811 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); |
| 788 | 812 | ||
| @@ -806,6 +830,7 @@ static struct domain_device *sas_ex_discover_end_dev( | |||
| 806 | sas_init_dev(child); | 830 | sas_init_dev(child); |
| 807 | 831 | ||
| 808 | child->rphy = rphy; | 832 | child->rphy = rphy; |
| 833 | get_device(&rphy->dev); | ||
| 809 | sas_fill_in_rphy(child, rphy); | 834 | sas_fill_in_rphy(child, rphy); |
| 810 | 835 | ||
| 811 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); | 836 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); |
| @@ -830,8 +855,6 @@ static struct domain_device *sas_ex_discover_end_dev( | |||
| 830 | 855 | ||
| 831 | out_list_del: | 856 | out_list_del: |
| 832 | sas_rphy_free(child->rphy); | 857 | sas_rphy_free(child->rphy); |
| 833 | child->rphy = NULL; | ||
| 834 | |||
| 835 | list_del(&child->disco_list_node); | 858 | list_del(&child->disco_list_node); |
| 836 | spin_lock_irq(&parent->port->dev_list_lock); | 859 | spin_lock_irq(&parent->port->dev_list_lock); |
| 837 | list_del(&child->dev_list_node); | 860 | list_del(&child->dev_list_node); |
| @@ -911,6 +934,7 @@ static struct domain_device *sas_ex_discover_expander( | |||
| 911 | } | 934 | } |
| 912 | port = parent->port; | 935 | port = parent->port; |
| 913 | child->rphy = rphy; | 936 | child->rphy = rphy; |
| 937 | get_device(&rphy->dev); | ||
| 914 | edev = rphy_to_expander_device(rphy); | 938 | edev = rphy_to_expander_device(rphy); |
| 915 | child->dev_type = phy->attached_dev_type; | 939 | child->dev_type = phy->attached_dev_type; |
| 916 | kref_get(&parent->kref); | 940 | kref_get(&parent->kref); |
| @@ -934,6 +958,7 @@ static struct domain_device *sas_ex_discover_expander( | |||
| 934 | 958 | ||
| 935 | res = sas_discover_expander(child); | 959 | res = sas_discover_expander(child); |
| 936 | if (res) { | 960 | if (res) { |
| 961 | sas_rphy_delete(rphy); | ||
| 937 | spin_lock_irq(&parent->port->dev_list_lock); | 962 | spin_lock_irq(&parent->port->dev_list_lock); |
| 938 | list_del(&child->dev_list_node); | 963 | list_del(&child->dev_list_node); |
| 939 | spin_unlock_irq(&parent->port->dev_list_lock); | 964 | spin_unlock_irq(&parent->port->dev_list_lock); |
| @@ -1718,9 +1743,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, | |||
| 1718 | int phy_change_count = 0; | 1743 | int phy_change_count = 0; |
| 1719 | 1744 | ||
| 1720 | res = sas_get_phy_change_count(dev, i, &phy_change_count); | 1745 | res = sas_get_phy_change_count(dev, i, &phy_change_count); |
| 1721 | if (res) | 1746 | switch (res) { |
| 1722 | goto out; | 1747 | case SMP_RESP_PHY_VACANT: |
| 1723 | else if (phy_change_count != ex->ex_phy[i].phy_change_count) { | 1748 | case SMP_RESP_NO_PHY: |
| 1749 | continue; | ||
| 1750 | case SMP_RESP_FUNC_ACC: | ||
| 1751 | break; | ||
| 1752 | default: | ||
| 1753 | return res; | ||
| 1754 | } | ||
| 1755 | |||
| 1756 | if (phy_change_count != ex->ex_phy[i].phy_change_count) { | ||
| 1724 | if (update) | 1757 | if (update) |
| 1725 | ex->ex_phy[i].phy_change_count = | 1758 | ex->ex_phy[i].phy_change_count = |
| 1726 | phy_change_count; | 1759 | phy_change_count; |
| @@ -1728,8 +1761,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, | |||
| 1728 | return 0; | 1761 | return 0; |
| 1729 | } | 1762 | } |
| 1730 | } | 1763 | } |
| 1731 | out: | 1764 | return 0; |
| 1732 | return res; | ||
| 1733 | } | 1765 | } |
| 1734 | 1766 | ||
| 1735 | static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) | 1767 | static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) |
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 120bff64be30..10cb5ae30977 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c | |||
| @@ -94,8 +94,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) | |||
| 94 | 94 | ||
| 95 | void sas_hae_reset(struct work_struct *work) | 95 | void sas_hae_reset(struct work_struct *work) |
| 96 | { | 96 | { |
| 97 | struct sas_ha_event *ev = | 97 | struct sas_ha_event *ev = to_sas_ha_event(work); |
| 98 | container_of(work, struct sas_ha_event, work); | ||
| 99 | struct sas_ha_struct *ha = ev->ha; | 98 | struct sas_ha_struct *ha = ev->ha; |
| 100 | 99 | ||
| 101 | clear_bit(HAE_RESET, &ha->pending); | 100 | clear_bit(HAE_RESET, &ha->pending); |
| @@ -369,14 +368,14 @@ static void sas_phy_release(struct sas_phy *phy) | |||
| 369 | 368 | ||
| 370 | static void phy_reset_work(struct work_struct *work) | 369 | static void phy_reset_work(struct work_struct *work) |
| 371 | { | 370 | { |
| 372 | struct sas_phy_data *d = container_of(work, typeof(*d), reset_work); | 371 | struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work); |
| 373 | 372 | ||
| 374 | d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset); | 373 | d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset); |
| 375 | } | 374 | } |
| 376 | 375 | ||
| 377 | static void phy_enable_work(struct work_struct *work) | 376 | static void phy_enable_work(struct work_struct *work) |
| 378 | { | 377 | { |
| 379 | struct sas_phy_data *d = container_of(work, typeof(*d), enable_work); | 378 | struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work); |
| 380 | 379 | ||
| 381 | d->enable_result = sas_phy_enable(d->phy, d->enable); | 380 | d->enable_result = sas_phy_enable(d->phy, d->enable); |
| 382 | } | 381 | } |
| @@ -389,8 +388,8 @@ static int sas_phy_setup(struct sas_phy *phy) | |||
| 389 | return -ENOMEM; | 388 | return -ENOMEM; |
| 390 | 389 | ||
| 391 | mutex_init(&d->event_lock); | 390 | mutex_init(&d->event_lock); |
| 392 | INIT_WORK(&d->reset_work, phy_reset_work); | 391 | INIT_SAS_WORK(&d->reset_work, phy_reset_work); |
| 393 | INIT_WORK(&d->enable_work, phy_enable_work); | 392 | INIT_SAS_WORK(&d->enable_work, phy_enable_work); |
| 394 | d->phy = phy; | 393 | d->phy = phy; |
| 395 | phy->hostdata = d; | 394 | phy->hostdata = d; |
| 396 | 395 | ||
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index f05c63879949..507e4cf12e56 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h | |||
| @@ -45,10 +45,10 @@ struct sas_phy_data { | |||
| 45 | struct mutex event_lock; | 45 | struct mutex event_lock; |
| 46 | int hard_reset; | 46 | int hard_reset; |
| 47 | int reset_result; | 47 | int reset_result; |
| 48 | struct work_struct reset_work; | 48 | struct sas_work reset_work; |
| 49 | int enable; | 49 | int enable; |
| 50 | int enable_result; | 50 | int enable_result; |
| 51 | struct work_struct enable_work; | 51 | struct sas_work enable_work; |
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| 54 | void sas_scsi_recover_host(struct Scsi_Host *shost); | 54 | void sas_scsi_recover_host(struct Scsi_Host *shost); |
| @@ -80,7 +80,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work); | |||
| 80 | void sas_porte_link_reset_err(struct work_struct *work); | 80 | void sas_porte_link_reset_err(struct work_struct *work); |
| 81 | void sas_porte_timer_event(struct work_struct *work); | 81 | void sas_porte_timer_event(struct work_struct *work); |
| 82 | void sas_porte_hard_reset(struct work_struct *work); | 82 | void sas_porte_hard_reset(struct work_struct *work); |
| 83 | void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work); | 83 | void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw); |
| 84 | 84 | ||
| 85 | int sas_notify_lldd_dev_found(struct domain_device *); | 85 | int sas_notify_lldd_dev_found(struct domain_device *); |
| 86 | void sas_notify_lldd_dev_gone(struct domain_device *); | 86 | void sas_notify_lldd_dev_gone(struct domain_device *); |
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index dcfd4a9105c5..521422e857ab 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c | |||
| @@ -32,8 +32,7 @@ | |||
| 32 | 32 | ||
| 33 | static void sas_phye_loss_of_signal(struct work_struct *work) | 33 | static void sas_phye_loss_of_signal(struct work_struct *work) |
| 34 | { | 34 | { |
| 35 | struct asd_sas_event *ev = | 35 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 36 | container_of(work, struct asd_sas_event, work); | ||
| 37 | struct asd_sas_phy *phy = ev->phy; | 36 | struct asd_sas_phy *phy = ev->phy; |
| 38 | 37 | ||
| 39 | clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending); | 38 | clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending); |
| @@ -43,8 +42,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work) | |||
| 43 | 42 | ||
| 44 | static void sas_phye_oob_done(struct work_struct *work) | 43 | static void sas_phye_oob_done(struct work_struct *work) |
| 45 | { | 44 | { |
| 46 | struct asd_sas_event *ev = | 45 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 47 | container_of(work, struct asd_sas_event, work); | ||
| 48 | struct asd_sas_phy *phy = ev->phy; | 46 | struct asd_sas_phy *phy = ev->phy; |
| 49 | 47 | ||
| 50 | clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending); | 48 | clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending); |
| @@ -53,8 +51,7 @@ static void sas_phye_oob_done(struct work_struct *work) | |||
| 53 | 51 | ||
| 54 | static void sas_phye_oob_error(struct work_struct *work) | 52 | static void sas_phye_oob_error(struct work_struct *work) |
| 55 | { | 53 | { |
| 56 | struct asd_sas_event *ev = | 54 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 57 | container_of(work, struct asd_sas_event, work); | ||
| 58 | struct asd_sas_phy *phy = ev->phy; | 55 | struct asd_sas_phy *phy = ev->phy; |
| 59 | struct sas_ha_struct *sas_ha = phy->ha; | 56 | struct sas_ha_struct *sas_ha = phy->ha; |
| 60 | struct asd_sas_port *port = phy->port; | 57 | struct asd_sas_port *port = phy->port; |
| @@ -85,8 +82,7 @@ static void sas_phye_oob_error(struct work_struct *work) | |||
| 85 | 82 | ||
| 86 | static void sas_phye_spinup_hold(struct work_struct *work) | 83 | static void sas_phye_spinup_hold(struct work_struct *work) |
| 87 | { | 84 | { |
| 88 | struct asd_sas_event *ev = | 85 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 89 | container_of(work, struct asd_sas_event, work); | ||
| 90 | struct asd_sas_phy *phy = ev->phy; | 86 | struct asd_sas_phy *phy = ev->phy; |
| 91 | struct sas_ha_struct *sas_ha = phy->ha; | 87 | struct sas_ha_struct *sas_ha = phy->ha; |
| 92 | struct sas_internal *i = | 88 | struct sas_internal *i = |
| @@ -127,14 +123,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) | |||
| 127 | phy->error = 0; | 123 | phy->error = 0; |
| 128 | INIT_LIST_HEAD(&phy->port_phy_el); | 124 | INIT_LIST_HEAD(&phy->port_phy_el); |
| 129 | for (k = 0; k < PORT_NUM_EVENTS; k++) { | 125 | for (k = 0; k < PORT_NUM_EVENTS; k++) { |
| 130 | INIT_WORK(&phy->port_events[k].work, | 126 | INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]); |
| 131 | sas_port_event_fns[k]); | ||
| 132 | phy->port_events[k].phy = phy; | 127 | phy->port_events[k].phy = phy; |
| 133 | } | 128 | } |
| 134 | 129 | ||
| 135 | for (k = 0; k < PHY_NUM_EVENTS; k++) { | 130 | for (k = 0; k < PHY_NUM_EVENTS; k++) { |
| 136 | INIT_WORK(&phy->phy_events[k].work, | 131 | INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]); |
| 137 | sas_phy_event_fns[k]); | ||
| 138 | phy->phy_events[k].phy = phy; | 132 | phy->phy_events[k].phy = phy; |
| 139 | } | 133 | } |
| 140 | 134 | ||
| @@ -144,8 +138,7 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) | |||
| 144 | spin_lock_init(&phy->sas_prim_lock); | 138 | spin_lock_init(&phy->sas_prim_lock); |
| 145 | phy->frame_rcvd_size = 0; | 139 | phy->frame_rcvd_size = 0; |
| 146 | 140 | ||
| 147 | phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, | 141 | phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i); |
| 148 | i); | ||
| 149 | if (!phy->phy) | 142 | if (!phy->phy) |
| 150 | return -ENOMEM; | 143 | return -ENOMEM; |
| 151 | 144 | ||
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index eb19c016d500..e884a8c58a0c 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c | |||
| @@ -123,7 +123,7 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
| 123 | spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); | 123 | spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); |
| 124 | 124 | ||
| 125 | if (!port->port) { | 125 | if (!port->port) { |
| 126 | port->port = sas_port_alloc(phy->phy->dev.parent, phy->id); | 126 | port->port = sas_port_alloc(phy->phy->dev.parent, port->id); |
| 127 | BUG_ON(!port->port); | 127 | BUG_ON(!port->port); |
| 128 | sas_port_add(port->port); | 128 | sas_port_add(port->port); |
| 129 | } | 129 | } |
| @@ -208,8 +208,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone) | |||
| 208 | 208 | ||
| 209 | void sas_porte_bytes_dmaed(struct work_struct *work) | 209 | void sas_porte_bytes_dmaed(struct work_struct *work) |
| 210 | { | 210 | { |
| 211 | struct asd_sas_event *ev = | 211 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 212 | container_of(work, struct asd_sas_event, work); | ||
| 213 | struct asd_sas_phy *phy = ev->phy; | 212 | struct asd_sas_phy *phy = ev->phy; |
| 214 | 213 | ||
| 215 | clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending); | 214 | clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending); |
| @@ -219,8 +218,7 @@ void sas_porte_bytes_dmaed(struct work_struct *work) | |||
| 219 | 218 | ||
| 220 | void sas_porte_broadcast_rcvd(struct work_struct *work) | 219 | void sas_porte_broadcast_rcvd(struct work_struct *work) |
| 221 | { | 220 | { |
| 222 | struct asd_sas_event *ev = | 221 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 223 | container_of(work, struct asd_sas_event, work); | ||
| 224 | struct asd_sas_phy *phy = ev->phy; | 222 | struct asd_sas_phy *phy = ev->phy; |
| 225 | unsigned long flags; | 223 | unsigned long flags; |
| 226 | u32 prim; | 224 | u32 prim; |
| @@ -237,8 +235,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work) | |||
| 237 | 235 | ||
| 238 | void sas_porte_link_reset_err(struct work_struct *work) | 236 | void sas_porte_link_reset_err(struct work_struct *work) |
| 239 | { | 237 | { |
| 240 | struct asd_sas_event *ev = | 238 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 241 | container_of(work, struct asd_sas_event, work); | ||
| 242 | struct asd_sas_phy *phy = ev->phy; | 239 | struct asd_sas_phy *phy = ev->phy; |
| 243 | 240 | ||
| 244 | clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending); | 241 | clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending); |
| @@ -248,8 +245,7 @@ void sas_porte_link_reset_err(struct work_struct *work) | |||
| 248 | 245 | ||
| 249 | void sas_porte_timer_event(struct work_struct *work) | 246 | void sas_porte_timer_event(struct work_struct *work) |
| 250 | { | 247 | { |
| 251 | struct asd_sas_event *ev = | 248 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 252 | container_of(work, struct asd_sas_event, work); | ||
| 253 | struct asd_sas_phy *phy = ev->phy; | 249 | struct asd_sas_phy *phy = ev->phy; |
| 254 | 250 | ||
| 255 | clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending); | 251 | clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending); |
| @@ -259,8 +255,7 @@ void sas_porte_timer_event(struct work_struct *work) | |||
| 259 | 255 | ||
| 260 | void sas_porte_hard_reset(struct work_struct *work) | 256 | void sas_porte_hard_reset(struct work_struct *work) |
| 261 | { | 257 | { |
| 262 | struct asd_sas_event *ev = | 258 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 263 | container_of(work, struct asd_sas_event, work); | ||
| 264 | struct asd_sas_phy *phy = ev->phy; | 259 | struct asd_sas_phy *phy = ev->phy; |
| 265 | 260 | ||
| 266 | clear_bit(PORTE_HARD_RESET, &phy->port_events_pending); | 261 | clear_bit(PORTE_HARD_RESET, &phy->port_events_pending); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ead6405f3e51..5dfd7495d1a1 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -1638,7 +1638,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
| 1638 | request_fn_proc *request_fn) | 1638 | request_fn_proc *request_fn) |
| 1639 | { | 1639 | { |
| 1640 | struct request_queue *q; | 1640 | struct request_queue *q; |
| 1641 | struct device *dev = shost->shost_gendev.parent; | 1641 | struct device *dev = shost->dma_dev; |
| 1642 | 1642 | ||
| 1643 | q = blk_init_queue(request_fn, NULL); | 1643 | q = blk_init_queue(request_fn, NULL); |
| 1644 | if (!q) | 1644 | if (!q) |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3ed748355b98..00c024039c97 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
| @@ -74,7 +74,7 @@ config SPI_ATMEL | |||
| 74 | This selects a driver for the Atmel SPI Controller, present on | 74 | This selects a driver for the Atmel SPI Controller, present on |
| 75 | many AT32 (AVR32) and AT91 (ARM) chips. | 75 | many AT32 (AVR32) and AT91 (ARM) chips. |
| 76 | 76 | ||
| 77 | config SPI_BFIN | 77 | config SPI_BFIN5XX |
| 78 | tristate "SPI controller driver for ADI Blackfin5xx" | 78 | tristate "SPI controller driver for ADI Blackfin5xx" |
| 79 | depends on BLACKFIN | 79 | depends on BLACKFIN |
| 80 | help | 80 | help |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index a1d48e0ba3dc..9d75d2198ff5 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
| @@ -15,7 +15,7 @@ obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o | |||
| 15 | obj-$(CONFIG_SPI_ATH79) += spi-ath79.o | 15 | obj-$(CONFIG_SPI_ATH79) += spi-ath79.o |
| 16 | obj-$(CONFIG_SPI_AU1550) += spi-au1550.o | 16 | obj-$(CONFIG_SPI_AU1550) += spi-au1550.o |
| 17 | obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o | 17 | obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o |
| 18 | obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o | 18 | obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o |
| 19 | obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o | 19 | obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o |
| 20 | obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o | 20 | obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o |
| 21 | obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o | 21 | obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o |
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index f01b2648452e..7491971139a6 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Broadcom BCM63xx SPI controller support | 2 | * Broadcom BCM63xx SPI controller support |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009-2011 Florian Fainelli <florian@openwrt.org> | 4 | * Copyright (C) 2009-2012 Florian Fainelli <florian@openwrt.org> |
| 5 | * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com> | 5 | * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com> |
| 6 | * | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
| @@ -30,6 +30,8 @@ | |||
| 30 | #include <linux/spi/spi.h> | 30 | #include <linux/spi/spi.h> |
| 31 | #include <linux/completion.h> | 31 | #include <linux/completion.h> |
| 32 | #include <linux/err.h> | 32 | #include <linux/err.h> |
| 33 | #include <linux/workqueue.h> | ||
| 34 | #include <linux/pm_runtime.h> | ||
| 33 | 35 | ||
| 34 | #include <bcm63xx_dev_spi.h> | 36 | #include <bcm63xx_dev_spi.h> |
| 35 | 37 | ||
| @@ -37,8 +39,6 @@ | |||
| 37 | #define DRV_VER "0.1.2" | 39 | #define DRV_VER "0.1.2" |
| 38 | 40 | ||
| 39 | struct bcm63xx_spi { | 41 | struct bcm63xx_spi { |
| 40 | spinlock_t lock; | ||
| 41 | int stopping; | ||
| 42 | struct completion done; | 42 | struct completion done; |
| 43 | 43 | ||
| 44 | void __iomem *regs; | 44 | void __iomem *regs; |
| @@ -96,17 +96,12 @@ static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = { | |||
| 96 | { 391000, SPI_CLK_0_391MHZ } | 96 | { 391000, SPI_CLK_0_391MHZ } |
| 97 | }; | 97 | }; |
| 98 | 98 | ||
| 99 | static int bcm63xx_spi_setup_transfer(struct spi_device *spi, | 99 | static int bcm63xx_spi_check_transfer(struct spi_device *spi, |
| 100 | struct spi_transfer *t) | 100 | struct spi_transfer *t) |
| 101 | { | 101 | { |
| 102 | struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); | ||
| 103 | u8 bits_per_word; | 102 | u8 bits_per_word; |
| 104 | u8 clk_cfg, reg; | ||
| 105 | u32 hz; | ||
| 106 | int i; | ||
| 107 | 103 | ||
| 108 | bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; | 104 | bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; |
| 109 | hz = (t) ? t->speed_hz : spi->max_speed_hz; | ||
| 110 | if (bits_per_word != 8) { | 105 | if (bits_per_word != 8) { |
| 111 | dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", | 106 | dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", |
| 112 | __func__, bits_per_word); | 107 | __func__, bits_per_word); |
| @@ -119,6 +114,19 @@ static int bcm63xx_spi_setup_transfer(struct spi_device *spi, | |||
| 119 | return -EINVAL; | 114 | return -EINVAL; |
| 120 | } | 115 | } |
| 121 | 116 | ||
| 117 | return 0; | ||
| 118 | } | ||
| 119 | |||
| 120 | static void bcm63xx_spi_setup_transfer(struct spi_device *spi, | ||
| 121 | struct spi_transfer *t) | ||
| 122 | { | ||
| 123 | struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); | ||
| 124 | u32 hz; | ||
| 125 | u8 clk_cfg, reg; | ||
| 126 | int i; | ||
| 127 | |||
| 128 | hz = (t) ? t->speed_hz : spi->max_speed_hz; | ||
| 129 | |||
| 122 | /* Find the closest clock configuration */ | 130 | /* Find the closest clock configuration */ |
| 123 | for (i = 0; i < SPI_CLK_MASK; i++) { | 131 | for (i = 0; i < SPI_CLK_MASK; i++) { |
| 124 | if (hz <= bcm63xx_spi_freq_table[i][0]) { | 132 | if (hz <= bcm63xx_spi_freq_table[i][0]) { |
| @@ -139,8 +147,6 @@ static int bcm63xx_spi_setup_transfer(struct spi_device *spi, | |||
| 139 | bcm_spi_writeb(bs, reg, SPI_CLK_CFG); | 147 | bcm_spi_writeb(bs, reg, SPI_CLK_CFG); |
| 140 | dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n", | 148 | dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n", |
| 141 | clk_cfg, hz); | 149 | clk_cfg, hz); |
| 142 | |||
| 143 | return 0; | ||
| 144 | } | 150 | } |
| 145 | 151 | ||
| 146 | /* the spi->mode bits understood by this driver: */ | 152 | /* the spi->mode bits understood by this driver: */ |
| @@ -153,9 +159,6 @@ static int bcm63xx_spi_setup(struct spi_device *spi) | |||
| 153 | 159 | ||
| 154 | bs = spi_master_get_devdata(spi->master); | 160 | bs = spi_master_get_devdata(spi->master); |
| 155 | 161 | ||
| 156 | if (bs->stopping) | ||
| 157 | return -ESHUTDOWN; | ||
| 158 | |||
| 159 | if (!spi->bits_per_word) | 162 | if (!spi->bits_per_word) |
| 160 | spi->bits_per_word = 8; | 163 | spi->bits_per_word = 8; |
| 161 | 164 | ||
| @@ -165,7 +168,7 @@ static int bcm63xx_spi_setup(struct spi_device *spi) | |||
| 165 | return -EINVAL; | 168 | return -EINVAL; |
| 166 | } | 169 | } |
| 167 | 170 | ||
| 168 | ret = bcm63xx_spi_setup_transfer(spi, NULL); | 171 | ret = bcm63xx_spi_check_transfer(spi, NULL); |
| 169 | if (ret < 0) { | 172 | if (ret < 0) { |
| 170 | dev_err(&spi->dev, "setup: unsupported mode bits %x\n", | 173 | dev_err(&spi->dev, "setup: unsupported mode bits %x\n", |
| 171 | spi->mode & ~MODEBITS); | 174 | spi->mode & ~MODEBITS); |
| @@ -190,28 +193,29 @@ static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs) | |||
| 190 | bs->remaining_bytes -= size; | 193 | bs->remaining_bytes -= size; |
| 191 | } | 194 | } |
| 192 | 195 | ||
| 193 | static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) | 196 | static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi, |
| 197 | struct spi_transfer *t) | ||
| 194 | { | 198 | { |
| 195 | struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); | 199 | struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); |
| 196 | u16 msg_ctl; | 200 | u16 msg_ctl; |
| 197 | u16 cmd; | 201 | u16 cmd; |
| 198 | 202 | ||
| 203 | /* Disable the CMD_DONE interrupt */ | ||
| 204 | bcm_spi_writeb(bs, 0, SPI_INT_MASK); | ||
| 205 | |||
| 199 | dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", | 206 | dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", |
| 200 | t->tx_buf, t->rx_buf, t->len); | 207 | t->tx_buf, t->rx_buf, t->len); |
| 201 | 208 | ||
| 202 | /* Transmitter is inhibited */ | 209 | /* Transmitter is inhibited */ |
| 203 | bs->tx_ptr = t->tx_buf; | 210 | bs->tx_ptr = t->tx_buf; |
| 204 | bs->rx_ptr = t->rx_buf; | 211 | bs->rx_ptr = t->rx_buf; |
| 205 | init_completion(&bs->done); | ||
| 206 | 212 | ||
| 207 | if (t->tx_buf) { | 213 | if (t->tx_buf) { |
| 208 | bs->remaining_bytes = t->len; | 214 | bs->remaining_bytes = t->len; |
| 209 | bcm63xx_spi_fill_tx_fifo(bs); | 215 | bcm63xx_spi_fill_tx_fifo(bs); |
| 210 | } | 216 | } |
| 211 | 217 | ||
| 212 | /* Enable the command done interrupt which | 218 | init_completion(&bs->done); |
| 213 | * we use to determine completion of a command */ | ||
| 214 | bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK); | ||
| 215 | 219 | ||
| 216 | /* Fill in the Message control register */ | 220 | /* Fill in the Message control register */ |
| 217 | msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT); | 221 | msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT); |
| @@ -230,33 +234,76 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
| 230 | cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT); | 234 | cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT); |
| 231 | cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT); | 235 | cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT); |
| 232 | bcm_spi_writew(bs, cmd, SPI_CMD); | 236 | bcm_spi_writew(bs, cmd, SPI_CMD); |
| 233 | wait_for_completion(&bs->done); | ||
| 234 | 237 | ||
| 235 | /* Disable the CMD_DONE interrupt */ | 238 | /* Enable the CMD_DONE interrupt */ |
| 236 | bcm_spi_writeb(bs, 0, SPI_INT_MASK); | 239 | bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK); |
| 237 | 240 | ||
| 238 | return t->len - bs->remaining_bytes; | 241 | return t->len - bs->remaining_bytes; |
| 239 | } | 242 | } |
| 240 | 243 | ||
| 241 | static int bcm63xx_transfer(struct spi_device *spi, struct spi_message *m) | 244 | static int bcm63xx_spi_prepare_transfer(struct spi_master *master) |
| 242 | { | 245 | { |
| 243 | struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); | 246 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); |
| 244 | struct spi_transfer *t; | ||
| 245 | int ret = 0; | ||
| 246 | 247 | ||
| 247 | if (unlikely(list_empty(&m->transfers))) | 248 | pm_runtime_get_sync(&bs->pdev->dev); |
| 248 | return -EINVAL; | ||
| 249 | 249 | ||
| 250 | if (bs->stopping) | 250 | return 0; |
| 251 | return -ESHUTDOWN; | 251 | } |
| 252 | |||
| 253 | static int bcm63xx_spi_unprepare_transfer(struct spi_master *master) | ||
| 254 | { | ||
| 255 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); | ||
| 256 | |||
| 257 | pm_runtime_put(&bs->pdev->dev); | ||
| 258 | |||
| 259 | return 0; | ||
| 260 | } | ||
| 261 | |||
| 262 | static int bcm63xx_spi_transfer_one(struct spi_master *master, | ||
| 263 | struct spi_message *m) | ||
| 264 | { | ||
| 265 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); | ||
| 266 | struct spi_transfer *t; | ||
| 267 | struct spi_device *spi = m->spi; | ||
| 268 | int status = 0; | ||
| 269 | unsigned int timeout = 0; | ||
| 252 | 270 | ||
| 253 | list_for_each_entry(t, &m->transfers, transfer_list) { | 271 | list_for_each_entry(t, &m->transfers, transfer_list) { |
| 254 | ret += bcm63xx_txrx_bufs(spi, t); | 272 | unsigned int len = t->len; |
| 255 | } | 273 | u8 rx_tail; |
| 256 | 274 | ||
| 257 | m->complete(m->context); | 275 | status = bcm63xx_spi_check_transfer(spi, t); |
| 276 | if (status < 0) | ||
| 277 | goto exit; | ||
| 258 | 278 | ||
| 259 | return ret; | 279 | /* configure adapter for a new transfer */ |
| 280 | bcm63xx_spi_setup_transfer(spi, t); | ||
| 281 | |||
| 282 | while (len) { | ||
| 283 | /* send the data */ | ||
| 284 | len -= bcm63xx_txrx_bufs(spi, t); | ||
| 285 | |||
| 286 | timeout = wait_for_completion_timeout(&bs->done, HZ); | ||
| 287 | if (!timeout) { | ||
| 288 | status = -ETIMEDOUT; | ||
| 289 | goto exit; | ||
| 290 | } | ||
| 291 | |||
| 292 | /* read out all data */ | ||
| 293 | rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL); | ||
| 294 | |||
| 295 | /* Read out all the data */ | ||
| 296 | if (rx_tail) | ||
| 297 | memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail); | ||
| 298 | } | ||
| 299 | |||
| 300 | m->actual_length += t->len; | ||
| 301 | } | ||
| 302 | exit: | ||
| 303 | m->status = status; | ||
| 304 | spi_finalize_current_message(master); | ||
| 305 | |||
| 306 | return 0; | ||
| 260 | } | 307 | } |
| 261 | 308 | ||
| 262 | /* This driver supports single master mode only. Hence | 309 | /* This driver supports single master mode only. Hence |
| @@ -267,39 +314,15 @@ static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id) | |||
| 267 | struct spi_master *master = (struct spi_master *)dev_id; | 314 | struct spi_master *master = (struct spi_master *)dev_id; |
| 268 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); | 315 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); |
| 269 | u8 intr; | 316 | u8 intr; |
| 270 | u16 cmd; | ||
| 271 | 317 | ||
| 272 | /* Read interupts and clear them immediately */ | 318 | /* Read interupts and clear them immediately */ |
| 273 | intr = bcm_spi_readb(bs, SPI_INT_STATUS); | 319 | intr = bcm_spi_readb(bs, SPI_INT_STATUS); |
| 274 | bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); | 320 | bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); |
| 275 | bcm_spi_writeb(bs, 0, SPI_INT_MASK); | 321 | bcm_spi_writeb(bs, 0, SPI_INT_MASK); |
| 276 | 322 | ||
| 277 | /* A tansfer completed */ | 323 | /* A transfer completed */ |
| 278 | if (intr & SPI_INTR_CMD_DONE) { | 324 | if (intr & SPI_INTR_CMD_DONE) |
| 279 | u8 rx_tail; | 325 | complete(&bs->done); |
| 280 | |||
| 281 | rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL); | ||
| 282 | |||
| 283 | /* Read out all the data */ | ||
| 284 | if (rx_tail) | ||
| 285 | memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail); | ||
| 286 | |||
| 287 | /* See if there is more data to send */ | ||
| 288 | if (bs->remaining_bytes > 0) { | ||
| 289 | bcm63xx_spi_fill_tx_fifo(bs); | ||
| 290 | |||
| 291 | /* Start the transfer */ | ||
| 292 | bcm_spi_writew(bs, SPI_HD_W << SPI_MSG_TYPE_SHIFT, | ||
| 293 | SPI_MSG_CTL); | ||
| 294 | cmd = bcm_spi_readw(bs, SPI_CMD); | ||
| 295 | cmd |= SPI_CMD_START_IMMEDIATE; | ||
| 296 | cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT); | ||
| 297 | bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK); | ||
| 298 | bcm_spi_writew(bs, cmd, SPI_CMD); | ||
| 299 | } else { | ||
| 300 | complete(&bs->done); | ||
| 301 | } | ||
| 302 | } | ||
| 303 | 326 | ||
| 304 | return IRQ_HANDLED; | 327 | return IRQ_HANDLED; |
| 305 | } | 328 | } |
| @@ -345,7 +368,6 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev) | |||
| 345 | } | 368 | } |
| 346 | 369 | ||
| 347 | bs = spi_master_get_devdata(master); | 370 | bs = spi_master_get_devdata(master); |
| 348 | init_completion(&bs->done); | ||
| 349 | 371 | ||
| 350 | platform_set_drvdata(pdev, master); | 372 | platform_set_drvdata(pdev, master); |
| 351 | bs->pdev = pdev; | 373 | bs->pdev = pdev; |
| @@ -379,12 +401,13 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev) | |||
| 379 | master->bus_num = pdata->bus_num; | 401 | master->bus_num = pdata->bus_num; |
| 380 | master->num_chipselect = pdata->num_chipselect; | 402 | master->num_chipselect = pdata->num_chipselect; |
| 381 | master->setup = bcm63xx_spi_setup; | 403 | master->setup = bcm63xx_spi_setup; |
| 382 | master->transfer = bcm63xx_transfer; | 404 | master->prepare_transfer_hardware = bcm63xx_spi_prepare_transfer; |
| 405 | master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer; | ||
| 406 | master->transfer_one_message = bcm63xx_spi_transfer_one; | ||
| 407 | master->mode_bits = MODEBITS; | ||
| 383 | bs->speed_hz = pdata->speed_hz; | 408 | bs->speed_hz = pdata->speed_hz; |
| 384 | bs->stopping = 0; | ||
| 385 | bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); | 409 | bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); |
| 386 | bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA)); | 410 | bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA)); |
| 387 | spin_lock_init(&bs->lock); | ||
| 388 | 411 | ||
| 389 | /* Initialize hardware */ | 412 | /* Initialize hardware */ |
| 390 | clk_enable(bs->clk); | 413 | clk_enable(bs->clk); |
| @@ -418,18 +441,16 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev) | |||
| 418 | struct spi_master *master = platform_get_drvdata(pdev); | 441 | struct spi_master *master = platform_get_drvdata(pdev); |
| 419 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); | 442 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); |
| 420 | 443 | ||
| 444 | spi_unregister_master(master); | ||
| 445 | |||
| 421 | /* reset spi block */ | 446 | /* reset spi block */ |
| 422 | bcm_spi_writeb(bs, 0, SPI_INT_MASK); | 447 | bcm_spi_writeb(bs, 0, SPI_INT_MASK); |
| 423 | spin_lock(&bs->lock); | ||
| 424 | bs->stopping = 1; | ||
| 425 | 448 | ||
| 426 | /* HW shutdown */ | 449 | /* HW shutdown */ |
| 427 | clk_disable(bs->clk); | 450 | clk_disable(bs->clk); |
| 428 | clk_put(bs->clk); | 451 | clk_put(bs->clk); |
| 429 | 452 | ||
| 430 | spin_unlock(&bs->lock); | ||
| 431 | platform_set_drvdata(pdev, 0); | 453 | platform_set_drvdata(pdev, 0); |
| 432 | spi_unregister_master(master); | ||
| 433 | 454 | ||
| 434 | return 0; | 455 | return 0; |
| 435 | } | 456 | } |
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c index 248a2cc671a9..1fe51198a622 100644 --- a/drivers/spi/spi-bfin-sport.c +++ b/drivers/spi/spi-bfin-sport.c | |||
| @@ -252,19 +252,15 @@ static void | |||
| 252 | bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data) | 252 | bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data) |
| 253 | { | 253 | { |
| 254 | struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; | 254 | struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; |
| 255 | unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15); | ||
| 256 | 255 | ||
| 257 | bfin_sport_spi_disable(drv_data); | 256 | bfin_sport_spi_disable(drv_data); |
| 258 | dev_dbg(drv_data->dev, "restoring spi ctl state\n"); | 257 | dev_dbg(drv_data->dev, "restoring spi ctl state\n"); |
| 259 | 258 | ||
| 260 | bfin_write(&drv_data->regs->tcr1, chip->ctl_reg); | 259 | bfin_write(&drv_data->regs->tcr1, chip->ctl_reg); |
| 261 | bfin_write(&drv_data->regs->tcr2, bits); | ||
| 262 | bfin_write(&drv_data->regs->tclkdiv, chip->baud); | 260 | bfin_write(&drv_data->regs->tclkdiv, chip->baud); |
| 263 | bfin_write(&drv_data->regs->tfsdiv, bits); | ||
| 264 | SSYNC(); | 261 | SSYNC(); |
| 265 | 262 | ||
| 266 | bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS)); | 263 | bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS)); |
| 267 | bfin_write(&drv_data->regs->rcr2, bits); | ||
| 268 | SSYNC(); | 264 | SSYNC(); |
| 269 | 265 | ||
| 270 | bfin_sport_spi_cs_active(chip); | 266 | bfin_sport_spi_cs_active(chip); |
| @@ -420,11 +416,15 @@ bfin_sport_spi_pump_transfers(unsigned long data) | |||
| 420 | drv_data->cs_change = transfer->cs_change; | 416 | drv_data->cs_change = transfer->cs_change; |
| 421 | 417 | ||
| 422 | /* Bits per word setup */ | 418 | /* Bits per word setup */ |
| 423 | bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; | 419 | bits_per_word = transfer->bits_per_word ? : |
| 424 | if (bits_per_word == 8) | 420 | message->spi->bits_per_word ? : 8; |
| 425 | drv_data->ops = &bfin_sport_transfer_ops_u8; | 421 | if (bits_per_word % 16 == 0) |
| 426 | else | ||
| 427 | drv_data->ops = &bfin_sport_transfer_ops_u16; | 422 | drv_data->ops = &bfin_sport_transfer_ops_u16; |
| 423 | else | ||
| 424 | drv_data->ops = &bfin_sport_transfer_ops_u8; | ||
| 425 | bfin_write(&drv_data->regs->tcr2, bits_per_word - 1); | ||
| 426 | bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1); | ||
| 427 | bfin_write(&drv_data->regs->rcr2, bits_per_word - 1); | ||
| 428 | 428 | ||
| 429 | drv_data->state = RUNNING_STATE; | 429 | drv_data->state = RUNNING_STATE; |
| 430 | 430 | ||
| @@ -598,11 +598,12 @@ bfin_sport_spi_setup(struct spi_device *spi) | |||
| 598 | } | 598 | } |
| 599 | chip->cs_chg_udelay = chip_info->cs_chg_udelay; | 599 | chip->cs_chg_udelay = chip_info->cs_chg_udelay; |
| 600 | chip->idle_tx_val = chip_info->idle_tx_val; | 600 | chip->idle_tx_val = chip_info->idle_tx_val; |
| 601 | spi->bits_per_word = chip_info->bits_per_word; | ||
| 602 | } | 601 | } |
| 603 | } | 602 | } |
| 604 | 603 | ||
| 605 | if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { | 604 | if (spi->bits_per_word % 8) { |
| 605 | dev_err(&spi->dev, "%d bits_per_word is not supported\n", | ||
| 606 | spi->bits_per_word); | ||
| 606 | ret = -EINVAL; | 607 | ret = -EINVAL; |
| 607 | goto error; | 608 | goto error; |
| 608 | } | 609 | } |
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c index 3b83ff8b1e2b..9bb4d4af8547 100644 --- a/drivers/spi/spi-bfin5xx.c +++ b/drivers/spi/spi-bfin5xx.c | |||
| @@ -396,7 +396,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) | |||
| 396 | /* last read */ | 396 | /* last read */ |
| 397 | if (drv_data->rx) { | 397 | if (drv_data->rx) { |
| 398 | dev_dbg(&drv_data->pdev->dev, "last read\n"); | 398 | dev_dbg(&drv_data->pdev->dev, "last read\n"); |
| 399 | if (n_bytes % 2) { | 399 | if (!(n_bytes % 2)) { |
| 400 | u16 *buf = (u16 *)drv_data->rx; | 400 | u16 *buf = (u16 *)drv_data->rx; |
| 401 | for (loop = 0; loop < n_bytes / 2; loop++) | 401 | for (loop = 0; loop < n_bytes / 2; loop++) |
| 402 | *buf++ = bfin_read(&drv_data->regs->rdbr); | 402 | *buf++ = bfin_read(&drv_data->regs->rdbr); |
| @@ -424,7 +424,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) | |||
| 424 | if (drv_data->rx && drv_data->tx) { | 424 | if (drv_data->rx && drv_data->tx) { |
| 425 | /* duplex */ | 425 | /* duplex */ |
| 426 | dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); | 426 | dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); |
| 427 | if (n_bytes % 2) { | 427 | if (!(n_bytes % 2)) { |
| 428 | u16 *buf = (u16 *)drv_data->rx; | 428 | u16 *buf = (u16 *)drv_data->rx; |
| 429 | u16 *buf2 = (u16 *)drv_data->tx; | 429 | u16 *buf2 = (u16 *)drv_data->tx; |
| 430 | for (loop = 0; loop < n_bytes / 2; loop++) { | 430 | for (loop = 0; loop < n_bytes / 2; loop++) { |
| @@ -442,7 +442,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) | |||
| 442 | } else if (drv_data->rx) { | 442 | } else if (drv_data->rx) { |
| 443 | /* read */ | 443 | /* read */ |
| 444 | dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); | 444 | dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); |
| 445 | if (n_bytes % 2) { | 445 | if (!(n_bytes % 2)) { |
| 446 | u16 *buf = (u16 *)drv_data->rx; | 446 | u16 *buf = (u16 *)drv_data->rx; |
| 447 | for (loop = 0; loop < n_bytes / 2; loop++) { | 447 | for (loop = 0; loop < n_bytes / 2; loop++) { |
| 448 | *buf++ = bfin_read(&drv_data->regs->rdbr); | 448 | *buf++ = bfin_read(&drv_data->regs->rdbr); |
| @@ -458,7 +458,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) | |||
| 458 | } else if (drv_data->tx) { | 458 | } else if (drv_data->tx) { |
| 459 | /* write */ | 459 | /* write */ |
| 460 | dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); | 460 | dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); |
| 461 | if (n_bytes % 2) { | 461 | if (!(n_bytes % 2)) { |
| 462 | u16 *buf = (u16 *)drv_data->tx; | 462 | u16 *buf = (u16 *)drv_data->tx; |
| 463 | for (loop = 0; loop < n_bytes / 2; loop++) { | 463 | for (loop = 0; loop < n_bytes / 2; loop++) { |
| 464 | bfin_read(&drv_data->regs->rdbr); | 464 | bfin_read(&drv_data->regs->rdbr); |
| @@ -587,6 +587,7 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
| 587 | if (message->state == DONE_STATE) { | 587 | if (message->state == DONE_STATE) { |
| 588 | dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n"); | 588 | dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n"); |
| 589 | message->status = 0; | 589 | message->status = 0; |
| 590 | bfin_spi_flush(drv_data); | ||
| 590 | bfin_spi_giveback(drv_data); | 591 | bfin_spi_giveback(drv_data); |
| 591 | return; | 592 | return; |
| 592 | } | 593 | } |
| @@ -870,8 +871,10 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
| 870 | message->actual_length += drv_data->len_in_bytes; | 871 | message->actual_length += drv_data->len_in_bytes; |
| 871 | /* Move to next transfer of this msg */ | 872 | /* Move to next transfer of this msg */ |
| 872 | message->state = bfin_spi_next_transfer(drv_data); | 873 | message->state = bfin_spi_next_transfer(drv_data); |
| 873 | if (drv_data->cs_change) | 874 | if (drv_data->cs_change && message->state != DONE_STATE) { |
| 875 | bfin_spi_flush(drv_data); | ||
| 874 | bfin_spi_cs_deactive(drv_data, chip); | 876 | bfin_spi_cs_deactive(drv_data, chip); |
| 877 | } | ||
| 875 | } | 878 | } |
| 876 | 879 | ||
| 877 | /* Schedule next transfer tasklet */ | 880 | /* Schedule next transfer tasklet */ |
| @@ -1026,7 +1029,6 @@ static int bfin_spi_setup(struct spi_device *spi) | |||
| 1026 | chip->cs_chg_udelay = chip_info->cs_chg_udelay; | 1029 | chip->cs_chg_udelay = chip_info->cs_chg_udelay; |
| 1027 | chip->idle_tx_val = chip_info->idle_tx_val; | 1030 | chip->idle_tx_val = chip_info->idle_tx_val; |
| 1028 | chip->pio_interrupt = chip_info->pio_interrupt; | 1031 | chip->pio_interrupt = chip_info->pio_interrupt; |
| 1029 | spi->bits_per_word = chip_info->bits_per_word; | ||
| 1030 | } else { | 1032 | } else { |
| 1031 | /* force a default base state */ | 1033 | /* force a default base state */ |
| 1032 | chip->ctl_reg &= bfin_ctl_reg; | 1034 | chip->ctl_reg &= bfin_ctl_reg; |
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index 6db2887852d6..e8055073e84d 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c | |||
| @@ -545,13 +545,12 @@ static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) | |||
| 545 | * in case of failure. | 545 | * in case of failure. |
| 546 | */ | 546 | */ |
| 547 | static struct dma_async_tx_descriptor * | 547 | static struct dma_async_tx_descriptor * |
| 548 | ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | 548 | ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir) |
| 549 | { | 549 | { |
| 550 | struct spi_transfer *t = espi->current_msg->state; | 550 | struct spi_transfer *t = espi->current_msg->state; |
| 551 | struct dma_async_tx_descriptor *txd; | 551 | struct dma_async_tx_descriptor *txd; |
| 552 | enum dma_slave_buswidth buswidth; | 552 | enum dma_slave_buswidth buswidth; |
| 553 | struct dma_slave_config conf; | 553 | struct dma_slave_config conf; |
| 554 | enum dma_transfer_direction slave_dirn; | ||
| 555 | struct scatterlist *sg; | 554 | struct scatterlist *sg; |
| 556 | struct sg_table *sgt; | 555 | struct sg_table *sgt; |
| 557 | struct dma_chan *chan; | 556 | struct dma_chan *chan; |
| @@ -567,14 +566,13 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | |||
| 567 | memset(&conf, 0, sizeof(conf)); | 566 | memset(&conf, 0, sizeof(conf)); |
| 568 | conf.direction = dir; | 567 | conf.direction = dir; |
| 569 | 568 | ||
| 570 | if (dir == DMA_FROM_DEVICE) { | 569 | if (dir == DMA_DEV_TO_MEM) { |
| 571 | chan = espi->dma_rx; | 570 | chan = espi->dma_rx; |
| 572 | buf = t->rx_buf; | 571 | buf = t->rx_buf; |
| 573 | sgt = &espi->rx_sgt; | 572 | sgt = &espi->rx_sgt; |
| 574 | 573 | ||
| 575 | conf.src_addr = espi->sspdr_phys; | 574 | conf.src_addr = espi->sspdr_phys; |
| 576 | conf.src_addr_width = buswidth; | 575 | conf.src_addr_width = buswidth; |
| 577 | slave_dirn = DMA_DEV_TO_MEM; | ||
| 578 | } else { | 576 | } else { |
| 579 | chan = espi->dma_tx; | 577 | chan = espi->dma_tx; |
| 580 | buf = t->tx_buf; | 578 | buf = t->tx_buf; |
| @@ -582,7 +580,6 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | |||
| 582 | 580 | ||
| 583 | conf.dst_addr = espi->sspdr_phys; | 581 | conf.dst_addr = espi->sspdr_phys; |
| 584 | conf.dst_addr_width = buswidth; | 582 | conf.dst_addr_width = buswidth; |
| 585 | slave_dirn = DMA_MEM_TO_DEV; | ||
| 586 | } | 583 | } |
| 587 | 584 | ||
| 588 | ret = dmaengine_slave_config(chan, &conf); | 585 | ret = dmaengine_slave_config(chan, &conf); |
| @@ -633,8 +630,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | |||
| 633 | if (!nents) | 630 | if (!nents) |
| 634 | return ERR_PTR(-ENOMEM); | 631 | return ERR_PTR(-ENOMEM); |
| 635 | 632 | ||
| 636 | txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, | 633 | txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK); |
| 637 | slave_dirn, DMA_CTRL_ACK); | ||
| 638 | if (!txd) { | 634 | if (!txd) { |
| 639 | dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); | 635 | dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); |
| 640 | return ERR_PTR(-ENOMEM); | 636 | return ERR_PTR(-ENOMEM); |
| @@ -651,12 +647,12 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | |||
| 651 | * unmapped. | 647 | * unmapped. |
| 652 | */ | 648 | */ |
| 653 | static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, | 649 | static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, |
| 654 | enum dma_data_direction dir) | 650 | enum dma_transfer_direction dir) |
| 655 | { | 651 | { |
| 656 | struct dma_chan *chan; | 652 | struct dma_chan *chan; |
| 657 | struct sg_table *sgt; | 653 | struct sg_table *sgt; |
| 658 | 654 | ||
| 659 | if (dir == DMA_FROM_DEVICE) { | 655 | if (dir == DMA_DEV_TO_MEM) { |
| 660 | chan = espi->dma_rx; | 656 | chan = espi->dma_rx; |
| 661 | sgt = &espi->rx_sgt; | 657 | sgt = &espi->rx_sgt; |
| 662 | } else { | 658 | } else { |
| @@ -677,16 +673,16 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) | |||
| 677 | struct spi_message *msg = espi->current_msg; | 673 | struct spi_message *msg = espi->current_msg; |
| 678 | struct dma_async_tx_descriptor *rxd, *txd; | 674 | struct dma_async_tx_descriptor *rxd, *txd; |
| 679 | 675 | ||
| 680 | rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE); | 676 | rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM); |
| 681 | if (IS_ERR(rxd)) { | 677 | if (IS_ERR(rxd)) { |
| 682 | dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); | 678 | dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); |
| 683 | msg->status = PTR_ERR(rxd); | 679 | msg->status = PTR_ERR(rxd); |
| 684 | return; | 680 | return; |
| 685 | } | 681 | } |
| 686 | 682 | ||
| 687 | txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE); | 683 | txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV); |
| 688 | if (IS_ERR(txd)) { | 684 | if (IS_ERR(txd)) { |
| 689 | ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); | 685 | ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); |
| 690 | dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); | 686 | dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); |
| 691 | msg->status = PTR_ERR(txd); | 687 | msg->status = PTR_ERR(txd); |
| 692 | return; | 688 | return; |
| @@ -705,8 +701,8 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) | |||
| 705 | 701 | ||
| 706 | wait_for_completion(&espi->wait); | 702 | wait_for_completion(&espi->wait); |
| 707 | 703 | ||
| 708 | ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE); | 704 | ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV); |
| 709 | ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); | 705 | ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); |
| 710 | } | 706 | } |
| 711 | 707 | ||
| 712 | /** | 708 | /** |
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 09c925aaf320..400ae2121a2a 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
| @@ -1667,9 +1667,15 @@ static int calculate_effective_freq(struct pl022 *pl022, int freq, struct | |||
| 1667 | /* cpsdvsr = 254 & scr = 255 */ | 1667 | /* cpsdvsr = 254 & scr = 255 */ |
| 1668 | min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX); | 1668 | min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX); |
| 1669 | 1669 | ||
| 1670 | if (!((freq <= max_tclk) && (freq >= min_tclk))) { | 1670 | if (freq > max_tclk) |
| 1671 | dev_warn(&pl022->adev->dev, | ||
| 1672 | "Max speed that can be programmed is %d Hz, you requested %d\n", | ||
| 1673 | max_tclk, freq); | ||
| 1674 | |||
| 1675 | if (freq < min_tclk) { | ||
| 1671 | dev_err(&pl022->adev->dev, | 1676 | dev_err(&pl022->adev->dev, |
| 1672 | "controller data is incorrect: out of range frequency"); | 1677 | "Requested frequency: %d Hz is less than minimum possible %d Hz\n", |
| 1678 | freq, min_tclk); | ||
| 1673 | return -EINVAL; | 1679 | return -EINVAL; |
| 1674 | } | 1680 | } |
| 1675 | 1681 | ||
| @@ -1681,26 +1687,37 @@ static int calculate_effective_freq(struct pl022 *pl022, int freq, struct | |||
| 1681 | while (scr <= SCR_MAX) { | 1687 | while (scr <= SCR_MAX) { |
| 1682 | tmp = spi_rate(rate, cpsdvsr, scr); | 1688 | tmp = spi_rate(rate, cpsdvsr, scr); |
| 1683 | 1689 | ||
| 1684 | if (tmp > freq) | 1690 | if (tmp > freq) { |
| 1691 | /* we need lower freq */ | ||
| 1685 | scr++; | 1692 | scr++; |
| 1693 | continue; | ||
| 1694 | } | ||
| 1695 | |||
| 1686 | /* | 1696 | /* |
| 1687 | * If found exact value, update and break. | 1697 | * If found exact value, mark found and break. |
| 1688 | * If found more closer value, update and continue. | 1698 | * If found more closer value, update and break. |
| 1689 | */ | 1699 | */ |
| 1690 | else if ((tmp == freq) || (tmp > best_freq)) { | 1700 | if (tmp > best_freq) { |
| 1691 | best_freq = tmp; | 1701 | best_freq = tmp; |
| 1692 | best_cpsdvsr = cpsdvsr; | 1702 | best_cpsdvsr = cpsdvsr; |
| 1693 | best_scr = scr; | 1703 | best_scr = scr; |
| 1694 | 1704 | ||
| 1695 | if (tmp == freq) | 1705 | if (tmp == freq) |
| 1696 | break; | 1706 | found = 1; |
| 1697 | } | 1707 | } |
| 1698 | scr++; | 1708 | /* |
| 1709 | * increased scr will give lower rates, which are not | ||
| 1710 | * required | ||
| 1711 | */ | ||
| 1712 | break; | ||
| 1699 | } | 1713 | } |
| 1700 | cpsdvsr += 2; | 1714 | cpsdvsr += 2; |
| 1701 | scr = SCR_MIN; | 1715 | scr = SCR_MIN; |
| 1702 | } | 1716 | } |
| 1703 | 1717 | ||
| 1718 | WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n", | ||
| 1719 | freq); | ||
| 1720 | |||
| 1704 | clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF); | 1721 | clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF); |
| 1705 | clk_freq->scr = (u8) (best_scr & 0xFF); | 1722 | clk_freq->scr = (u8) (best_scr & 0xFF); |
| 1706 | dev_dbg(&pl022->adev->dev, | 1723 | dev_dbg(&pl022->adev->dev, |
| @@ -1823,9 +1840,12 @@ static int pl022_setup(struct spi_device *spi) | |||
| 1823 | } else | 1840 | } else |
| 1824 | chip->cs_control = chip_info->cs_control; | 1841 | chip->cs_control = chip_info->cs_control; |
| 1825 | 1842 | ||
| 1826 | if (bits <= 3) { | 1843 | /* Check bits per word with vendor specific range */ |
| 1827 | /* PL022 doesn't support less than 4-bits */ | 1844 | if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) { |
| 1828 | status = -ENOTSUPP; | 1845 | status = -ENOTSUPP; |
| 1846 | dev_err(&spi->dev, "illegal data size for this controller!\n"); | ||
| 1847 | dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n", | ||
| 1848 | pl022->vendor->max_bpw); | ||
| 1829 | goto err_config_params; | 1849 | goto err_config_params; |
| 1830 | } else if (bits <= 8) { | 1850 | } else if (bits <= 8) { |
| 1831 | dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); | 1851 | dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); |
| @@ -1838,20 +1858,10 @@ static int pl022_setup(struct spi_device *spi) | |||
| 1838 | chip->read = READING_U16; | 1858 | chip->read = READING_U16; |
| 1839 | chip->write = WRITING_U16; | 1859 | chip->write = WRITING_U16; |
| 1840 | } else { | 1860 | } else { |
| 1841 | if (pl022->vendor->max_bpw >= 32) { | 1861 | dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); |
| 1842 | dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); | 1862 | chip->n_bytes = 4; |
| 1843 | chip->n_bytes = 4; | 1863 | chip->read = READING_U32; |
| 1844 | chip->read = READING_U32; | 1864 | chip->write = WRITING_U32; |
| 1845 | chip->write = WRITING_U32; | ||
| 1846 | } else { | ||
| 1847 | dev_err(&spi->dev, | ||
| 1848 | "illegal data size for this controller!\n"); | ||
| 1849 | dev_err(&spi->dev, | ||
| 1850 | "a standard pl022 can only handle " | ||
| 1851 | "1 <= n <= 16 bit words\n"); | ||
| 1852 | status = -ENOTSUPP; | ||
| 1853 | goto err_config_params; | ||
| 1854 | } | ||
| 1855 | } | 1865 | } |
| 1856 | 1866 | ||
| 1857 | /* Now Initialize all register settings required for this chip */ | 1867 | /* Now Initialize all register settings required for this chip */ |
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 400df8cbee53..d91751f9ffe8 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/prefetch.h> | 36 | #include <linux/prefetch.h> |
| 37 | #include <linux/ratelimit.h> | 37 | #include <linux/ratelimit.h> |
| 38 | #include <linux/smp.h> | 38 | #include <linux/smp.h> |
| 39 | #include <linux/interrupt.h> | ||
| 39 | #include <net/dst.h> | 40 | #include <net/dst.h> |
| 40 | #ifdef CONFIG_XFRM | 41 | #ifdef CONFIG_XFRM |
| 41 | #include <linux/xfrm.h> | 42 | #include <linux/xfrm.h> |
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 56d74dc2fbd5..91a97b3e45c6 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/ip.h> | 32 | #include <linux/ip.h> |
| 33 | #include <linux/ratelimit.h> | 33 | #include <linux/ratelimit.h> |
| 34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
| 35 | #include <linux/interrupt.h> | ||
| 35 | #include <net/dst.h> | 36 | #include <net/dst.h> |
| 36 | #ifdef CONFIG_XFRM | 37 | #ifdef CONFIG_XFRM |
| 37 | #include <linux/xfrm.h> | 38 | #include <linux/xfrm.h> |
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 9112cd882154..60cba8194de3 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/etherdevice.h> | 31 | #include <linux/etherdevice.h> |
| 32 | #include <linux/phy.h> | 32 | #include <linux/phy.h> |
| 33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
| 34 | #include <linux/interrupt.h> | ||
| 34 | 35 | ||
| 35 | #include <net/dst.h> | 36 | #include <net/dst.h> |
| 36 | 37 | ||
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c index 2b45d3d1800c..04cd57f2a6da 100644 --- a/drivers/staging/ozwpan/ozpd.c +++ b/drivers/staging/ozwpan/ozpd.c | |||
| @@ -383,8 +383,6 @@ static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f) | |||
| 383 | pd->tx_pool = &f->link; | 383 | pd->tx_pool = &f->link; |
| 384 | pd->tx_pool_count++; | 384 | pd->tx_pool_count++; |
| 385 | f = 0; | 385 | f = 0; |
| 386 | } else { | ||
| 387 | kfree(f); | ||
| 388 | } | 386 | } |
| 389 | spin_unlock_bh(&pd->tx_frame_lock); | 387 | spin_unlock_bh(&pd->tx_frame_lock); |
| 390 | if (f) | 388 | if (f) |
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c index 7862513cc295..9cf29fcea11e 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430.c +++ b/drivers/staging/tidspbridge/core/tiomap3430.c | |||
| @@ -79,10 +79,6 @@ | |||
| 79 | #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) | 79 | #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) |
| 80 | #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) | 80 | #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) |
| 81 | 81 | ||
| 82 | #define OMAP343X_CTRL_REGADDR(reg) \ | ||
| 83 | OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg)) | ||
| 84 | |||
| 85 | |||
| 86 | /* Forward Declarations: */ | 82 | /* Forward Declarations: */ |
| 87 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); | 83 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); |
| 88 | static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, | 84 | static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, |
| @@ -418,19 +414,27 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
| 418 | 414 | ||
| 419 | /* Assert RST1 i.e only the RST only for DSP megacell */ | 415 | /* Assert RST1 i.e only the RST only for DSP megacell */ |
| 420 | if (!status) { | 416 | if (!status) { |
| 417 | /* | ||
| 418 | * XXX: ioremapping MUST be removed once ctrl | ||
| 419 | * function is made available. | ||
| 420 | */ | ||
| 421 | void __iomem *ctrl = ioremap(OMAP343X_CTRL_BASE, SZ_4K); | ||
| 422 | if (!ctrl) | ||
| 423 | return -ENOMEM; | ||
| 424 | |||
| 421 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, | 425 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, |
| 422 | OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, | 426 | OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, |
| 423 | OMAP2_RM_RSTCTRL); | 427 | OMAP2_RM_RSTCTRL); |
| 424 | /* Mask address with 1K for compatibility */ | 428 | /* Mask address with 1K for compatibility */ |
| 425 | __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK, | 429 | __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK, |
| 426 | OMAP343X_CTRL_REGADDR( | 430 | ctrl + OMAP343X_CONTROL_IVA2_BOOTADDR); |
| 427 | OMAP343X_CONTROL_IVA2_BOOTADDR)); | ||
| 428 | /* | 431 | /* |
| 429 | * Set bootmode to self loop if dsp_debug flag is true | 432 | * Set bootmode to self loop if dsp_debug flag is true |
| 430 | */ | 433 | */ |
| 431 | __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0, | 434 | __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0, |
| 432 | OMAP343X_CTRL_REGADDR( | 435 | ctrl + OMAP343X_CONTROL_IVA2_BOOTMOD); |
| 433 | OMAP343X_CONTROL_IVA2_BOOTMOD)); | 436 | |
| 437 | iounmap(ctrl); | ||
| 434 | } | 438 | } |
| 435 | } | 439 | } |
| 436 | if (!status) { | 440 | if (!status) { |
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c index 70055c8111ed..870f934f4f3b 100644 --- a/drivers/staging/tidspbridge/core/wdt.c +++ b/drivers/staging/tidspbridge/core/wdt.c | |||
| @@ -53,7 +53,10 @@ int dsp_wdt_init(void) | |||
| 53 | int ret = 0; | 53 | int ret = 0; |
| 54 | 54 | ||
| 55 | dsp_wdt.sm_wdt = NULL; | 55 | dsp_wdt.sm_wdt = NULL; |
| 56 | dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE); | 56 | dsp_wdt.reg_base = ioremap(OMAP34XX_WDT3_BASE, SZ_4K); |
| 57 | if (!dsp_wdt.reg_base) | ||
| 58 | return -ENOMEM; | ||
| 59 | |||
| 57 | tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0); | 60 | tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0); |
| 58 | 61 | ||
| 59 | dsp_wdt.fclk = clk_get(NULL, "wdt3_fck"); | 62 | dsp_wdt.fclk = clk_get(NULL, "wdt3_fck"); |
| @@ -99,6 +102,9 @@ void dsp_wdt_exit(void) | |||
| 99 | dsp_wdt.fclk = NULL; | 102 | dsp_wdt.fclk = NULL; |
| 100 | dsp_wdt.iclk = NULL; | 103 | dsp_wdt.iclk = NULL; |
| 101 | dsp_wdt.sm_wdt = NULL; | 104 | dsp_wdt.sm_wdt = NULL; |
| 105 | |||
| 106 | if (dsp_wdt.reg_base) | ||
| 107 | iounmap(dsp_wdt.reg_base); | ||
| 102 | dsp_wdt.reg_base = NULL; | 108 | dsp_wdt.reg_base = NULL; |
| 103 | } | 109 | } |
| 104 | 110 | ||
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig index 3ed2c8f656a5..7048e01f0817 100644 --- a/drivers/staging/zcache/Kconfig +++ b/drivers/staging/zcache/Kconfig | |||
| @@ -2,7 +2,7 @@ config ZCACHE | |||
| 2 | bool "Dynamic compression of swap pages and clean pagecache pages" | 2 | bool "Dynamic compression of swap pages and clean pagecache pages" |
| 3 | # X86 dependency is because zsmalloc uses non-portable pte/tlb | 3 | # X86 dependency is because zsmalloc uses non-portable pte/tlb |
| 4 | # functions | 4 | # functions |
| 5 | depends on (CLEANCACHE || FRONTSWAP) && CRYPTO && X86 | 5 | depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y && X86 |
| 6 | select ZSMALLOC | 6 | select ZSMALLOC |
| 7 | select CRYPTO_LZO | 7 | select CRYPTO_LZO |
| 8 | default n | 8 | default n |
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index 08ebe901bb59..654755a990df 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c | |||
| @@ -469,7 +469,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id) | |||
| 469 | tty = NULL; | 469 | tty = NULL; |
| 470 | if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { | 470 | if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { |
| 471 | if (!ZS_IS_OPEN(uap_a)) { | 471 | if (!ZS_IS_OPEN(uap_a)) { |
| 472 | pmz_debug("ChanA interrupt while open !\n"); | 472 | pmz_debug("ChanA interrupt while not open !\n"); |
| 473 | goto skip_a; | 473 | goto skip_a; |
| 474 | } | 474 | } |
| 475 | write_zsreg(uap_a, R0, RES_H_IUS); | 475 | write_zsreg(uap_a, R0, RES_H_IUS); |
| @@ -493,8 +493,8 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id) | |||
| 493 | spin_lock(&uap_b->port.lock); | 493 | spin_lock(&uap_b->port.lock); |
| 494 | tty = NULL; | 494 | tty = NULL; |
| 495 | if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { | 495 | if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { |
| 496 | if (!ZS_IS_OPEN(uap_a)) { | 496 | if (!ZS_IS_OPEN(uap_b)) { |
| 497 | pmz_debug("ChanB interrupt while open !\n"); | 497 | pmz_debug("ChanB interrupt while not open !\n"); |
| 498 | goto skip_b; | 498 | goto skip_b; |
| 499 | } | 499 | } |
| 500 | write_zsreg(uap_b, R0, RES_H_IUS); | 500 | write_zsreg(uap_b, R0, RES_H_IUS); |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index c6f6560d436c..0bb2b3248dad 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
| @@ -157,8 +157,9 @@ static void wdm_out_callback(struct urb *urb) | |||
| 157 | spin_lock(&desc->iuspin); | 157 | spin_lock(&desc->iuspin); |
| 158 | desc->werr = urb->status; | 158 | desc->werr = urb->status; |
| 159 | spin_unlock(&desc->iuspin); | 159 | spin_unlock(&desc->iuspin); |
| 160 | clear_bit(WDM_IN_USE, &desc->flags); | ||
| 161 | kfree(desc->outbuf); | 160 | kfree(desc->outbuf); |
| 161 | desc->outbuf = NULL; | ||
| 162 | clear_bit(WDM_IN_USE, &desc->flags); | ||
| 162 | wake_up(&desc->wait); | 163 | wake_up(&desc->wait); |
| 163 | } | 164 | } |
| 164 | 165 | ||
| @@ -338,7 +339,7 @@ static ssize_t wdm_write | |||
| 338 | if (we < 0) | 339 | if (we < 0) |
| 339 | return -EIO; | 340 | return -EIO; |
| 340 | 341 | ||
| 341 | desc->outbuf = buf = kmalloc(count, GFP_KERNEL); | 342 | buf = kmalloc(count, GFP_KERNEL); |
| 342 | if (!buf) { | 343 | if (!buf) { |
| 343 | rv = -ENOMEM; | 344 | rv = -ENOMEM; |
| 344 | goto outnl; | 345 | goto outnl; |
| @@ -406,10 +407,12 @@ static ssize_t wdm_write | |||
| 406 | req->wIndex = desc->inum; | 407 | req->wIndex = desc->inum; |
| 407 | req->wLength = cpu_to_le16(count); | 408 | req->wLength = cpu_to_le16(count); |
| 408 | set_bit(WDM_IN_USE, &desc->flags); | 409 | set_bit(WDM_IN_USE, &desc->flags); |
| 410 | desc->outbuf = buf; | ||
| 409 | 411 | ||
| 410 | rv = usb_submit_urb(desc->command, GFP_KERNEL); | 412 | rv = usb_submit_urb(desc->command, GFP_KERNEL); |
| 411 | if (rv < 0) { | 413 | if (rv < 0) { |
| 412 | kfree(buf); | 414 | kfree(buf); |
| 415 | desc->outbuf = NULL; | ||
| 413 | clear_bit(WDM_IN_USE, &desc->flags); | 416 | clear_bit(WDM_IN_USE, &desc->flags); |
| 414 | dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); | 417 | dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); |
| 415 | } else { | 418 | } else { |
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 622b4a48e732..57ed9e400c06 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c | |||
| @@ -493,6 +493,15 @@ static int hcd_pci_suspend_noirq(struct device *dev) | |||
| 493 | 493 | ||
| 494 | pci_save_state(pci_dev); | 494 | pci_save_state(pci_dev); |
| 495 | 495 | ||
| 496 | /* | ||
| 497 | * Some systems crash if an EHCI controller is in D3 during | ||
| 498 | * a sleep transition. We have to leave such controllers in D0. | ||
| 499 | */ | ||
| 500 | if (hcd->broken_pci_sleep) { | ||
| 501 | dev_dbg(dev, "Staying in PCI D0\n"); | ||
| 502 | return retval; | ||
| 503 | } | ||
| 504 | |||
| 496 | /* If the root hub is dead rather than suspended, disallow remote | 505 | /* If the root hub is dead rather than suspended, disallow remote |
| 497 | * wakeup. usb_hc_died() should ensure that both hosts are marked as | 506 | * wakeup. usb_hc_died() should ensure that both hosts are marked as |
| 498 | * dying, so we only need to check the primary roothub. | 507 | * dying, so we only need to check the primary roothub. |
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index a6dfd2164166..170cbe89d9f8 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c | |||
| @@ -927,7 +927,6 @@ static int dummy_udc_stop(struct usb_gadget *g, | |||
| 927 | 927 | ||
| 928 | dum->driver = NULL; | 928 | dum->driver = NULL; |
| 929 | 929 | ||
| 930 | dummy_pullup(&dum->gadget, 0); | ||
| 931 | return 0; | 930 | return 0; |
| 932 | } | 931 | } |
| 933 | 932 | ||
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index a371e966425f..cb8c162cae5a 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c | |||
| @@ -2189,7 +2189,7 @@ unknown_cmnd: | |||
| 2189 | common->data_size_from_cmnd = 0; | 2189 | common->data_size_from_cmnd = 0; |
| 2190 | sprintf(unknown, "Unknown x%02x", common->cmnd[0]); | 2190 | sprintf(unknown, "Unknown x%02x", common->cmnd[0]); |
| 2191 | reply = check_command(common, common->cmnd_size, | 2191 | reply = check_command(common, common->cmnd_size, |
| 2192 | DATA_DIR_UNKNOWN, 0xff, 0, unknown); | 2192 | DATA_DIR_UNKNOWN, ~0, 0, unknown); |
| 2193 | if (reply == 0) { | 2193 | if (reply == 0) { |
| 2194 | common->curlun->sense_data = SS_INVALID_COMMAND; | 2194 | common->curlun->sense_data = SS_INVALID_COMMAND; |
| 2195 | reply = -EINVAL; | 2195 | reply = -EINVAL; |
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c index 4fac56927741..a896d73f7a93 100644 --- a/drivers/usb/gadget/file_storage.c +++ b/drivers/usb/gadget/file_storage.c | |||
| @@ -2579,7 +2579,7 @@ static int do_scsi_command(struct fsg_dev *fsg) | |||
| 2579 | fsg->data_size_from_cmnd = 0; | 2579 | fsg->data_size_from_cmnd = 0; |
| 2580 | sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]); | 2580 | sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]); |
| 2581 | if ((reply = check_command(fsg, fsg->cmnd_size, | 2581 | if ((reply = check_command(fsg, fsg->cmnd_size, |
| 2582 | DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) { | 2582 | DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) { |
| 2583 | fsg->curlun->sense_data = SS_INVALID_COMMAND; | 2583 | fsg->curlun->sense_data = SS_INVALID_COMMAND; |
| 2584 | reply = -EINVAL; | 2584 | reply = -EINVAL; |
| 2585 | } | 2585 | } |
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c index 2fa9865babed..e5e44f8cde9a 100644 --- a/drivers/usb/gadget/udc-core.c +++ b/drivers/usb/gadget/udc-core.c | |||
| @@ -263,8 +263,8 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) | |||
| 263 | 263 | ||
| 264 | if (udc_is_newstyle(udc)) { | 264 | if (udc_is_newstyle(udc)) { |
| 265 | udc->driver->disconnect(udc->gadget); | 265 | udc->driver->disconnect(udc->gadget); |
| 266 | udc->driver->unbind(udc->gadget); | ||
| 267 | usb_gadget_disconnect(udc->gadget); | 266 | usb_gadget_disconnect(udc->gadget); |
| 267 | udc->driver->unbind(udc->gadget); | ||
| 268 | usb_gadget_udc_stop(udc->gadget, udc->driver); | 268 | usb_gadget_udc_stop(udc->gadget, udc->driver); |
| 269 | } else { | 269 | } else { |
| 270 | usb_gadget_stop(udc->gadget, udc->driver); | 270 | usb_gadget_stop(udc->gadget, udc->driver); |
| @@ -415,9 +415,9 @@ static ssize_t usb_udc_softconn_store(struct device *dev, | |||
| 415 | usb_gadget_udc_start(udc->gadget, udc->driver); | 415 | usb_gadget_udc_start(udc->gadget, udc->driver); |
| 416 | usb_gadget_connect(udc->gadget); | 416 | usb_gadget_connect(udc->gadget); |
| 417 | } else if (sysfs_streq(buf, "disconnect")) { | 417 | } else if (sysfs_streq(buf, "disconnect")) { |
| 418 | usb_gadget_disconnect(udc->gadget); | ||
| 418 | if (udc_is_newstyle(udc)) | 419 | if (udc_is_newstyle(udc)) |
| 419 | usb_gadget_udc_stop(udc->gadget, udc->driver); | 420 | usb_gadget_udc_stop(udc->gadget, udc->driver); |
| 420 | usb_gadget_disconnect(udc->gadget); | ||
| 421 | } else { | 421 | } else { |
| 422 | dev_err(dev, "unsupported command '%s'\n", buf); | 422 | dev_err(dev, "unsupported command '%s'\n", buf); |
| 423 | return -EINVAL; | 423 | return -EINVAL; |
diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h index bc78c606c12b..ca4e03a1c73a 100644 --- a/drivers/usb/gadget/uvc.h +++ b/drivers/usb/gadget/uvc.h | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | 28 | ||
| 29 | struct uvc_request_data | 29 | struct uvc_request_data |
| 30 | { | 30 | { |
| 31 | unsigned int length; | 31 | __s32 length; |
| 32 | __u8 data[60]; | 32 | __u8 data[60]; |
| 33 | }; | 33 | }; |
| 34 | 34 | ||
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c index f6e083b50191..54d7ca559cb2 100644 --- a/drivers/usb/gadget/uvc_v4l2.c +++ b/drivers/usb/gadget/uvc_v4l2.c | |||
| @@ -39,7 +39,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data) | |||
| 39 | if (data->length < 0) | 39 | if (data->length < 0) |
| 40 | return usb_ep_set_halt(cdev->gadget->ep0); | 40 | return usb_ep_set_halt(cdev->gadget->ep0); |
| 41 | 41 | ||
| 42 | req->length = min(uvc->event_length, data->length); | 42 | req->length = min_t(unsigned int, uvc->event_length, data->length); |
| 43 | req->zero = data->length < uvc->event_length; | 43 | req->zero = data->length < uvc->event_length; |
| 44 | req->dma = DMA_ADDR_INVALID; | 44 | req->dma = DMA_ADDR_INVALID; |
| 45 | 45 | ||
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 01bb7241d6ef..fe8dc069164e 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
| @@ -144,6 +144,14 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
| 144 | hcd->has_tt = 1; | 144 | hcd->has_tt = 1; |
| 145 | tdi_reset(ehci); | 145 | tdi_reset(ehci); |
| 146 | } | 146 | } |
| 147 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) { | ||
| 148 | /* EHCI #1 or #2 on 6 Series/C200 Series chipset */ | ||
| 149 | if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) { | ||
| 150 | ehci_info(ehci, "broken D3 during system sleep on ASUS\n"); | ||
| 151 | hcd->broken_pci_sleep = 1; | ||
| 152 | device_set_wakeup_capable(&pdev->dev, false); | ||
| 153 | } | ||
| 154 | } | ||
| 147 | break; | 155 | break; |
| 148 | case PCI_VENDOR_ID_TDI: | 156 | case PCI_VENDOR_ID_TDI: |
| 149 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { | 157 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { |
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index 97ab975fa442..768b4b55c816 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c | |||
| @@ -386,7 +386,7 @@ static int davinci_musb_init(struct musb *musb) | |||
| 386 | usb_nop_xceiv_register(); | 386 | usb_nop_xceiv_register(); |
| 387 | musb->xceiv = usb_get_transceiver(); | 387 | musb->xceiv = usb_get_transceiver(); |
| 388 | if (!musb->xceiv) | 388 | if (!musb->xceiv) |
| 389 | return -ENODEV; | 389 | goto unregister; |
| 390 | 390 | ||
| 391 | musb->mregs += DAVINCI_BASE_OFFSET; | 391 | musb->mregs += DAVINCI_BASE_OFFSET; |
| 392 | 392 | ||
| @@ -444,6 +444,7 @@ static int davinci_musb_init(struct musb *musb) | |||
| 444 | 444 | ||
| 445 | fail: | 445 | fail: |
| 446 | usb_put_transceiver(musb->xceiv); | 446 | usb_put_transceiver(musb->xceiv); |
| 447 | unregister: | ||
| 447 | usb_nop_xceiv_unregister(); | 448 | usb_nop_xceiv_unregister(); |
| 448 | return -ENODEV; | 449 | return -ENODEV; |
| 449 | } | 450 | } |
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 93de517a32a0..f4a40f001c88 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
| @@ -449,7 +449,7 @@ struct musb { | |||
| 449 | * We added this flag to forcefully disable double | 449 | * We added this flag to forcefully disable double |
| 450 | * buffering until we get it working. | 450 | * buffering until we get it working. |
| 451 | */ | 451 | */ |
| 452 | unsigned double_buffer_not_ok:1 __deprecated; | 452 | unsigned double_buffer_not_ok:1; |
| 453 | 453 | ||
| 454 | struct musb_hdrc_config *config; | 454 | struct musb_hdrc_config *config; |
| 455 | 455 | ||
diff --git a/drivers/usb/otg/gpio_vbus.c b/drivers/usb/otg/gpio_vbus.c index 3ece43a2e4c1..a0a2178974fe 100644 --- a/drivers/usb/otg/gpio_vbus.c +++ b/drivers/usb/otg/gpio_vbus.c | |||
| @@ -96,7 +96,7 @@ static void gpio_vbus_work(struct work_struct *work) | |||
| 96 | struct gpio_vbus_data *gpio_vbus = | 96 | struct gpio_vbus_data *gpio_vbus = |
| 97 | container_of(work, struct gpio_vbus_data, work); | 97 | container_of(work, struct gpio_vbus_data, work); |
| 98 | struct gpio_vbus_mach_info *pdata = gpio_vbus->dev->platform_data; | 98 | struct gpio_vbus_mach_info *pdata = gpio_vbus->dev->platform_data; |
| 99 | int gpio; | 99 | int gpio, status; |
| 100 | 100 | ||
| 101 | if (!gpio_vbus->phy.otg->gadget) | 101 | if (!gpio_vbus->phy.otg->gadget) |
| 102 | return; | 102 | return; |
| @@ -108,7 +108,9 @@ static void gpio_vbus_work(struct work_struct *work) | |||
| 108 | */ | 108 | */ |
| 109 | gpio = pdata->gpio_pullup; | 109 | gpio = pdata->gpio_pullup; |
| 110 | if (is_vbus_powered(pdata)) { | 110 | if (is_vbus_powered(pdata)) { |
| 111 | status = USB_EVENT_VBUS; | ||
| 111 | gpio_vbus->phy.state = OTG_STATE_B_PERIPHERAL; | 112 | gpio_vbus->phy.state = OTG_STATE_B_PERIPHERAL; |
| 113 | gpio_vbus->phy.last_event = status; | ||
| 112 | usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget); | 114 | usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget); |
| 113 | 115 | ||
| 114 | /* drawing a "unit load" is *always* OK, except for OTG */ | 116 | /* drawing a "unit load" is *always* OK, except for OTG */ |
| @@ -117,6 +119,9 @@ static void gpio_vbus_work(struct work_struct *work) | |||
| 117 | /* optionally enable D+ pullup */ | 119 | /* optionally enable D+ pullup */ |
| 118 | if (gpio_is_valid(gpio)) | 120 | if (gpio_is_valid(gpio)) |
| 119 | gpio_set_value(gpio, !pdata->gpio_pullup_inverted); | 121 | gpio_set_value(gpio, !pdata->gpio_pullup_inverted); |
| 122 | |||
| 123 | atomic_notifier_call_chain(&gpio_vbus->phy.notifier, | ||
| 124 | status, gpio_vbus->phy.otg->gadget); | ||
| 120 | } else { | 125 | } else { |
| 121 | /* optionally disable D+ pullup */ | 126 | /* optionally disable D+ pullup */ |
| 122 | if (gpio_is_valid(gpio)) | 127 | if (gpio_is_valid(gpio)) |
| @@ -125,7 +130,12 @@ static void gpio_vbus_work(struct work_struct *work) | |||
| 125 | set_vbus_draw(gpio_vbus, 0); | 130 | set_vbus_draw(gpio_vbus, 0); |
| 126 | 131 | ||
| 127 | usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget); | 132 | usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget); |
| 133 | status = USB_EVENT_NONE; | ||
| 128 | gpio_vbus->phy.state = OTG_STATE_B_IDLE; | 134 | gpio_vbus->phy.state = OTG_STATE_B_IDLE; |
| 135 | gpio_vbus->phy.last_event = status; | ||
| 136 | |||
| 137 | atomic_notifier_call_chain(&gpio_vbus->phy.notifier, | ||
| 138 | status, gpio_vbus->phy.otg->gadget); | ||
| 129 | } | 139 | } |
| 130 | } | 140 | } |
| 131 | 141 | ||
| @@ -287,6 +297,9 @@ static int __init gpio_vbus_probe(struct platform_device *pdev) | |||
| 287 | irq, err); | 297 | irq, err); |
| 288 | goto err_irq; | 298 | goto err_irq; |
| 289 | } | 299 | } |
| 300 | |||
| 301 | ATOMIC_INIT_NOTIFIER_HEAD(&gpio_vbus->phy.notifier); | ||
| 302 | |||
| 290 | INIT_WORK(&gpio_vbus->work, gpio_vbus_work); | 303 | INIT_WORK(&gpio_vbus->work, gpio_vbus_work); |
| 291 | 304 | ||
| 292 | gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw"); | 305 | gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw"); |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index f0da2c32fbde..1f21d2a1e528 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
| @@ -238,7 +238,7 @@ static void handle_tx(struct vhost_net *net) | |||
| 238 | 238 | ||
| 239 | vq->heads[vq->upend_idx].len = len; | 239 | vq->heads[vq->upend_idx].len = len; |
| 240 | ubuf->callback = vhost_zerocopy_callback; | 240 | ubuf->callback = vhost_zerocopy_callback; |
| 241 | ubuf->arg = vq->ubufs; | 241 | ubuf->ctx = vq->ubufs; |
| 242 | ubuf->desc = vq->upend_idx; | 242 | ubuf->desc = vq->upend_idx; |
| 243 | msg.msg_control = ubuf; | 243 | msg.msg_control = ubuf; |
| 244 | msg.msg_controllen = sizeof(ubuf); | 244 | msg.msg_controllen = sizeof(ubuf); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 947f00d8e091..51e4c1eeec4f 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -1598,10 +1598,9 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs) | |||
| 1598 | kfree(ubufs); | 1598 | kfree(ubufs); |
| 1599 | } | 1599 | } |
| 1600 | 1600 | ||
| 1601 | void vhost_zerocopy_callback(void *arg) | 1601 | void vhost_zerocopy_callback(struct ubuf_info *ubuf) |
| 1602 | { | 1602 | { |
| 1603 | struct ubuf_info *ubuf = arg; | 1603 | struct vhost_ubuf_ref *ubufs = ubuf->ctx; |
| 1604 | struct vhost_ubuf_ref *ubufs = ubuf->arg; | ||
| 1605 | struct vhost_virtqueue *vq = ubufs->vq; | 1604 | struct vhost_virtqueue *vq = ubufs->vq; |
| 1606 | 1605 | ||
| 1607 | /* set len = 1 to mark this desc buffers done DMA */ | 1606 | /* set len = 1 to mark this desc buffers done DMA */ |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 8dcf4cca6bf2..8de1fd5b8efb 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
| @@ -188,7 +188,7 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); | |||
| 188 | 188 | ||
| 189 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, | 189 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, |
| 190 | unsigned int log_num, u64 len); | 190 | unsigned int log_num, u64 len); |
| 191 | void vhost_zerocopy_callback(void *arg); | 191 | void vhost_zerocopy_callback(struct ubuf_info *); |
| 192 | int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq); | 192 | int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq); |
| 193 | 193 | ||
| 194 | #define vq_err(vq, fmt, ...) do { \ | 194 | #define vq_err(vq, fmt, ...) do { \ |
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c index 86922ac84412..353c02fe8a95 100644 --- a/drivers/video/bfin-lq035q1-fb.c +++ b/drivers/video/bfin-lq035q1-fb.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
| 14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
| 15 | #include <linux/fb.h> | 15 | #include <linux/fb.h> |
| 16 | #include <linux/gpio.h> | ||
| 16 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
| 17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
| 18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index cbc7ceef2786..9f13b897fd64 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
| @@ -435,16 +435,16 @@ static void hpwdt_start(void) | |||
| 435 | { | 435 | { |
| 436 | reload = SECS_TO_TICKS(soft_margin); | 436 | reload = SECS_TO_TICKS(soft_margin); |
| 437 | iowrite16(reload, hpwdt_timer_reg); | 437 | iowrite16(reload, hpwdt_timer_reg); |
| 438 | iowrite16(0x85, hpwdt_timer_con); | 438 | iowrite8(0x85, hpwdt_timer_con); |
| 439 | } | 439 | } |
| 440 | 440 | ||
| 441 | static void hpwdt_stop(void) | 441 | static void hpwdt_stop(void) |
| 442 | { | 442 | { |
| 443 | unsigned long data; | 443 | unsigned long data; |
| 444 | 444 | ||
| 445 | data = ioread16(hpwdt_timer_con); | 445 | data = ioread8(hpwdt_timer_con); |
| 446 | data &= 0xFE; | 446 | data &= 0xFE; |
| 447 | iowrite16(data, hpwdt_timer_con); | 447 | iowrite8(data, hpwdt_timer_con); |
| 448 | } | 448 | } |
| 449 | 449 | ||
| 450 | static void hpwdt_ping(void) | 450 | static void hpwdt_ping(void) |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 4b33acd8ed4e..0a8a17cd80be 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
| @@ -274,7 +274,7 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn) | |||
| 274 | 274 | ||
| 275 | static bool pirq_check_eoi_map(unsigned irq) | 275 | static bool pirq_check_eoi_map(unsigned irq) |
| 276 | { | 276 | { |
| 277 | return test_bit(irq, pirq_eoi_map); | 277 | return test_bit(pirq_from_irq(irq), pirq_eoi_map); |
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | static bool pirq_needs_eoi_flag(unsigned irq) | 280 | static bool pirq_needs_eoi_flag(unsigned irq) |
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 174b5653cd8a..0b48579a9cd6 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c | |||
| @@ -128,7 +128,10 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr) | |||
| 128 | pr_debug(" C%d: %s %d uS\n", | 128 | pr_debug(" C%d: %s %d uS\n", |
| 129 | cx->type, cx->desc, (u32)cx->latency); | 129 | cx->type, cx->desc, (u32)cx->latency); |
| 130 | } | 130 | } |
| 131 | } else | 131 | } else if (ret != -EINVAL) |
| 132 | /* EINVAL means the ACPI ID is incorrect - meaning the ACPI | ||
| 133 | * table is referencing a non-existing CPU - which can happen | ||
| 134 | * with broken ACPI tables. */ | ||
| 132 | pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n", | 135 | pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n", |
| 133 | ret, _pr->acpi_id); | 136 | ret, _pr->acpi_id); |
| 134 | 137 | ||
