diff options
Diffstat (limited to 'drivers')
91 files changed, 726 insertions, 574 deletions
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 0599854e2217..118ec12d2d5f 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
@@ -34,8 +34,8 @@ struct gpio_bank { | |||
34 | u16 irq; | 34 | u16 irq; |
35 | u16 virtual_irq_start; | 35 | u16 virtual_irq_start; |
36 | int method; | 36 | int method; |
37 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) | ||
38 | u32 suspend_wakeup; | 37 | u32 suspend_wakeup; |
38 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) | ||
39 | u32 saved_wakeup; | 39 | u32 saved_wakeup; |
40 | #endif | 40 | #endif |
41 | u32 non_wakeup_gpios; | 41 | u32 non_wakeup_gpios; |
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index c43b8ff626a7..0550dcb85814 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c | |||
@@ -577,6 +577,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) | |||
577 | void | 577 | void |
578 | pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) | 578 | pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) |
579 | { | 579 | { |
580 | *gpio_base = -1; | ||
580 | } | 581 | } |
581 | #endif | 582 | #endif |
582 | 583 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ce045a8cf82c..f07e4252b708 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | |||
67 | MODULE_PARM_DESC(i915_enable_rc6, | 67 | MODULE_PARM_DESC(i915_enable_rc6, |
68 | "Enable power-saving render C-state 6 (default: true)"); | 68 | "Enable power-saving render C-state 6 (default: true)"); |
69 | 69 | ||
70 | unsigned int i915_enable_fbc __read_mostly = 1; | 70 | unsigned int i915_enable_fbc __read_mostly = -1; |
71 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); | 71 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
72 | MODULE_PARM_DESC(i915_enable_fbc, | 72 | MODULE_PARM_DESC(i915_enable_fbc, |
73 | "Enable frame buffer compression for power savings " | 73 | "Enable frame buffer compression for power savings " |
74 | "(default: false)"); | 74 | "(default: -1 (use per-chip default))"); |
75 | 75 | ||
76 | unsigned int i915_lvds_downclock __read_mostly = 0; | 76 | unsigned int i915_lvds_downclock __read_mostly = 0; |
77 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 77 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 56a8554d9039..04411ad2e779 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1799 | struct drm_framebuffer *fb; | 1799 | struct drm_framebuffer *fb; |
1800 | struct intel_framebuffer *intel_fb; | 1800 | struct intel_framebuffer *intel_fb; |
1801 | struct drm_i915_gem_object *obj; | 1801 | struct drm_i915_gem_object *obj; |
1802 | int enable_fbc; | ||
1802 | 1803 | ||
1803 | DRM_DEBUG_KMS("\n"); | 1804 | DRM_DEBUG_KMS("\n"); |
1804 | 1805 | ||
@@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1839 | intel_fb = to_intel_framebuffer(fb); | 1840 | intel_fb = to_intel_framebuffer(fb); |
1840 | obj = intel_fb->obj; | 1841 | obj = intel_fb->obj; |
1841 | 1842 | ||
1842 | if (!i915_enable_fbc) { | 1843 | enable_fbc = i915_enable_fbc; |
1843 | DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); | 1844 | if (enable_fbc < 0) { |
1845 | DRM_DEBUG_KMS("fbc set to per-chip default\n"); | ||
1846 | enable_fbc = 1; | ||
1847 | if (INTEL_INFO(dev)->gen <= 5) | ||
1848 | enable_fbc = 0; | ||
1849 | } | ||
1850 | if (!enable_fbc) { | ||
1851 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | ||
1844 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; | 1852 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; |
1845 | goto out_disable; | 1853 | goto out_disable; |
1846 | } | 1854 | } |
@@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4687 | bpc = 6; /* min is 18bpp */ | 4695 | bpc = 6; /* min is 18bpp */ |
4688 | break; | 4696 | break; |
4689 | case 24: | 4697 | case 24: |
4690 | bpc = min((unsigned int)8, display_bpc); | 4698 | bpc = 8; |
4691 | break; | 4699 | break; |
4692 | case 30: | 4700 | case 30: |
4693 | bpc = min((unsigned int)10, display_bpc); | 4701 | bpc = 10; |
4694 | break; | 4702 | break; |
4695 | case 48: | 4703 | case 48: |
4696 | bpc = min((unsigned int)12, display_bpc); | 4704 | bpc = 12; |
4697 | break; | 4705 | break; |
4698 | default: | 4706 | default: |
4699 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); | 4707 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); |
@@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4701 | break; | 4709 | break; |
4702 | } | 4710 | } |
4703 | 4711 | ||
4712 | display_bpc = min(display_bpc, bpc); | ||
4713 | |||
4704 | DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", | 4714 | DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", |
4705 | bpc, display_bpc); | 4715 | bpc, display_bpc); |
4706 | 4716 | ||
4707 | *pipe_bpp = bpc * 3; | 4717 | *pipe_bpp = display_bpc * 3; |
4708 | 4718 | ||
4709 | return display_bpc != bpc; | 4719 | return display_bpc != bpc; |
4710 | } | 4720 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0b2ee9d39980..fe1099d8817e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
337 | struct drm_connector *connector, | 337 | struct drm_connector *connector, |
338 | struct intel_load_detect_pipe *old); | 338 | struct intel_load_detect_pipe *old); |
339 | 339 | ||
340 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | ||
341 | extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); | ||
342 | extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); | ||
343 | extern void intelfb_restore(void); | 340 | extern void intelfb_restore(void); |
344 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 341 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
345 | u16 blue, int regno); | 342 | u16 blue, int regno); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 30fe554d8936..6348c499616f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -92,6 +92,11 @@ struct intel_sdvo { | |||
92 | */ | 92 | */ |
93 | uint16_t attached_output; | 93 | uint16_t attached_output; |
94 | 94 | ||
95 | /* | ||
96 | * Hotplug activation bits for this device | ||
97 | */ | ||
98 | uint8_t hotplug_active[2]; | ||
99 | |||
95 | /** | 100 | /** |
96 | * This is used to select the color range of RBG outputs in HDMI mode. | 101 | * This is used to select the color range of RBG outputs in HDMI mode. |
97 | * It is only valid when using TMDS encoding and 8 bit per color mode. | 102 | * It is only valid when using TMDS encoding and 8 bit per color mode. |
@@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in | |||
1208 | return true; | 1213 | return true; |
1209 | } | 1214 | } |
1210 | 1215 | ||
1211 | /* No use! */ | 1216 | static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) |
1212 | #if 0 | ||
1213 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | ||
1214 | { | ||
1215 | struct drm_connector *connector = NULL; | ||
1216 | struct intel_sdvo *iout = NULL; | ||
1217 | struct intel_sdvo *sdvo; | ||
1218 | |||
1219 | /* find the sdvo connector */ | ||
1220 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1221 | iout = to_intel_sdvo(connector); | ||
1222 | |||
1223 | if (iout->type != INTEL_OUTPUT_SDVO) | ||
1224 | continue; | ||
1225 | |||
1226 | sdvo = iout->dev_priv; | ||
1227 | |||
1228 | if (sdvo->sdvo_reg == SDVOB && sdvoB) | ||
1229 | return connector; | ||
1230 | |||
1231 | if (sdvo->sdvo_reg == SDVOC && !sdvoB) | ||
1232 | return connector; | ||
1233 | |||
1234 | } | ||
1235 | |||
1236 | return NULL; | ||
1237 | } | ||
1238 | |||
1239 | int intel_sdvo_supports_hotplug(struct drm_connector *connector) | ||
1240 | { | 1217 | { |
1241 | u8 response[2]; | 1218 | u8 response[2]; |
1242 | u8 status; | ||
1243 | struct intel_sdvo *intel_sdvo; | ||
1244 | DRM_DEBUG_KMS("\n"); | ||
1245 | |||
1246 | if (!connector) | ||
1247 | return 0; | ||
1248 | |||
1249 | intel_sdvo = to_intel_sdvo(connector); | ||
1250 | 1219 | ||
1251 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, | 1220 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, |
1252 | &response, 2) && response[0]; | 1221 | &response, 2) && response[0]; |
1253 | } | 1222 | } |
1254 | 1223 | ||
1255 | void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | 1224 | static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) |
1256 | { | 1225 | { |
1257 | u8 response[2]; | 1226 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); |
1258 | u8 status; | ||
1259 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); | ||
1260 | |||
1261 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | ||
1262 | intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1263 | |||
1264 | if (on) { | ||
1265 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | ||
1266 | status = intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1267 | |||
1268 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1269 | } else { | ||
1270 | response[0] = 0; | ||
1271 | response[1] = 0; | ||
1272 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1273 | } | ||
1274 | 1227 | ||
1275 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1228 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); |
1276 | intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1277 | } | 1229 | } |
1278 | #endif | ||
1279 | 1230 | ||
1280 | static bool | 1231 | static bool |
1281 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) | 1232 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) |
@@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2045 | { | 1996 | { |
2046 | struct drm_encoder *encoder = &intel_sdvo->base.base; | 1997 | struct drm_encoder *encoder = &intel_sdvo->base.base; |
2047 | struct drm_connector *connector; | 1998 | struct drm_connector *connector; |
1999 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||
2048 | struct intel_connector *intel_connector; | 2000 | struct intel_connector *intel_connector; |
2049 | struct intel_sdvo_connector *intel_sdvo_connector; | 2001 | struct intel_sdvo_connector *intel_sdvo_connector; |
2050 | 2002 | ||
@@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2062 | 2014 | ||
2063 | intel_connector = &intel_sdvo_connector->base; | 2015 | intel_connector = &intel_sdvo_connector->base; |
2064 | connector = &intel_connector->base; | 2016 | connector = &intel_connector->base; |
2065 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | 2017 | if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { |
2018 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
2019 | intel_sdvo->hotplug_active[0] |= 1 << device; | ||
2020 | /* Some SDVO devices have one-shot hotplug interrupts. | ||
2021 | * Ensure that they get re-enabled when an interrupt happens. | ||
2022 | */ | ||
2023 | intel_encoder->hot_plug = intel_sdvo_enable_hotplug; | ||
2024 | intel_sdvo_enable_hotplug(intel_encoder); | ||
2025 | } | ||
2026 | else | ||
2027 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | ||
2066 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2028 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2067 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2029 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2068 | 2030 | ||
@@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2569 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) | 2531 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) |
2570 | goto err; | 2532 | goto err; |
2571 | 2533 | ||
2534 | /* Set up hotplug command - note paranoia about contents of reply. | ||
2535 | * We assume that the hardware is in a sane state, and only touch | ||
2536 | * the bits we think we understand. | ||
2537 | */ | ||
2538 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, | ||
2539 | &intel_sdvo->hotplug_active, 2); | ||
2540 | intel_sdvo->hotplug_active[0] &= ~0x3; | ||
2541 | |||
2572 | if (intel_sdvo_output_setup(intel_sdvo, | 2542 | if (intel_sdvo_output_setup(intel_sdvo, |
2573 | intel_sdvo->caps.output_flags) != true) { | 2543 | intel_sdvo->caps.output_flags) != true) { |
2574 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2544 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index e88c64417a8a..14cc88aaf3a7 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, | |||
277 | case ATOM_ARG_FB: | 277 | case ATOM_ARG_FB: |
278 | idx = U8(*ptr); | 278 | idx = U8(*ptr); |
279 | (*ptr)++; | 279 | (*ptr)++; |
280 | val = gctx->scratch[((gctx->fb_base + idx) / 4)]; | 280 | if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { |
281 | DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", | ||
282 | gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); | ||
283 | val = 0; | ||
284 | } else | ||
285 | val = gctx->scratch[(gctx->fb_base / 4) + idx]; | ||
281 | if (print) | 286 | if (print) |
282 | DEBUG("FB[0x%02X]", idx); | 287 | DEBUG("FB[0x%02X]", idx); |
283 | break; | 288 | break; |
@@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, | |||
531 | case ATOM_ARG_FB: | 536 | case ATOM_ARG_FB: |
532 | idx = U8(*ptr); | 537 | idx = U8(*ptr); |
533 | (*ptr)++; | 538 | (*ptr)++; |
534 | gctx->scratch[((gctx->fb_base + idx) / 4)] = val; | 539 | if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { |
540 | DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", | ||
541 | gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); | ||
542 | } else | ||
543 | gctx->scratch[(gctx->fb_base / 4) + idx] = val; | ||
535 | DEBUG("FB[0x%02X]", idx); | 544 | DEBUG("FB[0x%02X]", idx); |
536 | break; | 545 | break; |
537 | case ATOM_ARG_PLL: | 546 | case ATOM_ARG_PLL: |
@@ -1370,11 +1379,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx) | |||
1370 | 1379 | ||
1371 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; | 1380 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; |
1372 | } | 1381 | } |
1382 | ctx->scratch_size_bytes = 0; | ||
1373 | if (usage_bytes == 0) | 1383 | if (usage_bytes == 0) |
1374 | usage_bytes = 20 * 1024; | 1384 | usage_bytes = 20 * 1024; |
1375 | /* allocate some scratch memory */ | 1385 | /* allocate some scratch memory */ |
1376 | ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); | 1386 | ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); |
1377 | if (!ctx->scratch) | 1387 | if (!ctx->scratch) |
1378 | return -ENOMEM; | 1388 | return -ENOMEM; |
1389 | ctx->scratch_size_bytes = usage_bytes; | ||
1379 | return 0; | 1390 | return 0; |
1380 | } | 1391 | } |
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index a589a55b223e..93cfe2086ba0 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h | |||
@@ -137,6 +137,7 @@ struct atom_context { | |||
137 | int cs_equal, cs_above; | 137 | int cs_equal, cs_above; |
138 | int io_mode; | 138 | int io_mode; |
139 | uint32_t *scratch; | 139 | uint32_t *scratch; |
140 | int scratch_size_bytes; | ||
140 | }; | 141 | }; |
141 | 142 | ||
142 | extern int atom_debug; | 143 | extern int atom_debug; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c742944d3805..a515b2a09d85 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -466,7 +466,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, | |||
466 | return; | 466 | return; |
467 | } | 467 | } |
468 | args.v2.ucEnable = enable; | 468 | args.v2.ucEnable = enable; |
469 | if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) | 469 | if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev)) |
470 | args.v2.ucEnable = ATOM_DISABLE; | 470 | args.v2.ucEnable = ATOM_DISABLE; |
471 | } else if (ASIC_IS_DCE3(rdev)) { | 471 | } else if (ASIC_IS_DCE3(rdev)) { |
472 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | 472 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 7ad43c6b1db7..79e8ebc05307 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
115 | u8 msg[20]; | 115 | u8 msg[20]; |
116 | int msg_bytes = send_bytes + 4; | 116 | int msg_bytes = send_bytes + 4; |
117 | u8 ack; | 117 | u8 ack; |
118 | unsigned retry; | ||
118 | 119 | ||
119 | if (send_bytes > 16) | 120 | if (send_bytes > 16) |
120 | return -1; | 121 | return -1; |
@@ -125,20 +126,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
125 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); | 126 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); |
126 | memcpy(&msg[4], send, send_bytes); | 127 | memcpy(&msg[4], send, send_bytes); |
127 | 128 | ||
128 | while (1) { | 129 | for (retry = 0; retry < 4; retry++) { |
129 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 130 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
130 | msg, msg_bytes, NULL, 0, delay, &ack); | 131 | msg, msg_bytes, NULL, 0, delay, &ack); |
131 | if (ret < 0) | 132 | if (ret == -EBUSY) |
133 | continue; | ||
134 | else if (ret < 0) | ||
132 | return ret; | 135 | return ret; |
133 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 136 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
134 | break; | 137 | return send_bytes; |
135 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 138 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
136 | udelay(400); | 139 | udelay(400); |
137 | else | 140 | else |
138 | return -EIO; | 141 | return -EIO; |
139 | } | 142 | } |
140 | 143 | ||
141 | return send_bytes; | 144 | return -EIO; |
142 | } | 145 | } |
143 | 146 | ||
144 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | 147 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, |
@@ -149,26 +152,31 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | |||
149 | int msg_bytes = 4; | 152 | int msg_bytes = 4; |
150 | u8 ack; | 153 | u8 ack; |
151 | int ret; | 154 | int ret; |
155 | unsigned retry; | ||
152 | 156 | ||
153 | msg[0] = address; | 157 | msg[0] = address; |
154 | msg[1] = address >> 8; | 158 | msg[1] = address >> 8; |
155 | msg[2] = AUX_NATIVE_READ << 4; | 159 | msg[2] = AUX_NATIVE_READ << 4; |
156 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); | 160 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); |
157 | 161 | ||
158 | while (1) { | 162 | for (retry = 0; retry < 4; retry++) { |
159 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 163 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
160 | msg, msg_bytes, recv, recv_bytes, delay, &ack); | 164 | msg, msg_bytes, recv, recv_bytes, delay, &ack); |
161 | if (ret == 0) | 165 | if (ret == -EBUSY) |
162 | return -EPROTO; | 166 | continue; |
163 | if (ret < 0) | 167 | else if (ret < 0) |
164 | return ret; | 168 | return ret; |
165 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 169 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
166 | return ret; | 170 | return ret; |
167 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 171 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
168 | udelay(400); | 172 | udelay(400); |
173 | else if (ret == 0) | ||
174 | return -EPROTO; | ||
169 | else | 175 | else |
170 | return -EIO; | 176 | return -EIO; |
171 | } | 177 | } |
178 | |||
179 | return -EIO; | ||
172 | } | 180 | } |
173 | 181 | ||
174 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, | 182 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, |
@@ -232,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
232 | for (retry = 0; retry < 4; retry++) { | 240 | for (retry = 0; retry < 4; retry++) { |
233 | ret = radeon_process_aux_ch(auxch, | 241 | ret = radeon_process_aux_ch(auxch, |
234 | msg, msg_bytes, reply, reply_bytes, 0, &ack); | 242 | msg, msg_bytes, reply, reply_bytes, 0, &ack); |
235 | if (ret < 0) { | 243 | if (ret == -EBUSY) |
244 | continue; | ||
245 | else if (ret < 0) { | ||
236 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | 246 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); |
237 | return ret; | 247 | return ret; |
238 | } | 248 | } |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e8a746712b5b..c4ffa14fb2f4 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
1590 | return backend_map; | 1590 | return backend_map; |
1591 | } | 1591 | } |
1592 | 1592 | ||
1593 | static void evergreen_program_channel_remap(struct radeon_device *rdev) | ||
1594 | { | ||
1595 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
1596 | |||
1597 | tmp = RREG32(MC_SHARED_CHMAP); | ||
1598 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
1599 | case 0: | ||
1600 | case 1: | ||
1601 | case 2: | ||
1602 | case 3: | ||
1603 | default: | ||
1604 | /* default mapping */ | ||
1605 | mc_shared_chremap = 0x00fac688; | ||
1606 | break; | ||
1607 | } | ||
1608 | |||
1609 | switch (rdev->family) { | ||
1610 | case CHIP_HEMLOCK: | ||
1611 | case CHIP_CYPRESS: | ||
1612 | case CHIP_BARTS: | ||
1613 | tcp_chan_steer_lo = 0x54763210; | ||
1614 | tcp_chan_steer_hi = 0x0000ba98; | ||
1615 | break; | ||
1616 | case CHIP_JUNIPER: | ||
1617 | case CHIP_REDWOOD: | ||
1618 | case CHIP_CEDAR: | ||
1619 | case CHIP_PALM: | ||
1620 | case CHIP_SUMO: | ||
1621 | case CHIP_SUMO2: | ||
1622 | case CHIP_TURKS: | ||
1623 | case CHIP_CAICOS: | ||
1624 | default: | ||
1625 | tcp_chan_steer_lo = 0x76543210; | ||
1626 | tcp_chan_steer_hi = 0x0000ba98; | ||
1627 | break; | ||
1628 | } | ||
1629 | |||
1630 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
1631 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
1632 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
1633 | } | ||
1634 | |||
1635 | static void evergreen_gpu_init(struct radeon_device *rdev) | 1593 | static void evergreen_gpu_init(struct radeon_device *rdev) |
1636 | { | 1594 | { |
1637 | u32 cc_rb_backend_disable = 0; | 1595 | u32 cc_rb_backend_disable = 0; |
@@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
2078 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 2036 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
2079 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 2037 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
2080 | 2038 | ||
2081 | evergreen_program_channel_remap(rdev); | ||
2082 | |||
2083 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; | 2039 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; |
2084 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; | 2040 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; |
2085 | 2041 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 99fbd793c08c..8c79ca97753d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
569 | return backend_map; | 569 | return backend_map; |
570 | } | 570 | } |
571 | 571 | ||
572 | static void cayman_program_channel_remap(struct radeon_device *rdev) | ||
573 | { | ||
574 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
575 | |||
576 | tmp = RREG32(MC_SHARED_CHMAP); | ||
577 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
578 | case 0: | ||
579 | case 1: | ||
580 | case 2: | ||
581 | case 3: | ||
582 | default: | ||
583 | /* default mapping */ | ||
584 | mc_shared_chremap = 0x00fac688; | ||
585 | break; | ||
586 | } | ||
587 | |||
588 | switch (rdev->family) { | ||
589 | case CHIP_CAYMAN: | ||
590 | default: | ||
591 | //tcp_chan_steer_lo = 0x54763210 | ||
592 | tcp_chan_steer_lo = 0x76543210; | ||
593 | tcp_chan_steer_hi = 0x0000ba98; | ||
594 | break; | ||
595 | } | ||
596 | |||
597 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
598 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
599 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
600 | } | ||
601 | |||
602 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, | 572 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, |
603 | u32 disable_mask_per_se, | 573 | u32 disable_mask_per_se, |
604 | u32 max_disable_mask_per_se, | 574 | u32 max_disable_mask_per_se, |
@@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
842 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 812 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
843 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 813 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
844 | 814 | ||
845 | cayman_program_channel_remap(rdev); | ||
846 | |||
847 | /* primary versions */ | 815 | /* primary versions */ |
848 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 816 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
849 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 817 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index c4b8741dbf58..449c3d8c6836 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector) | |||
68 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | 68 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { |
69 | int saved_dpms = connector->dpms; | 69 | int saved_dpms = connector->dpms; |
70 | 70 | ||
71 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && | 71 | /* Only turn off the display it it's physically disconnected */ |
72 | radeon_dp_needs_link_train(radeon_connector)) | 72 | if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) |
73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
74 | else | ||
75 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
74 | else if (radeon_dp_needs_link_train(radeon_connector)) | ||
75 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
76 | connector->dpms = saved_dpms; | 76 | connector->dpms = saved_dpms; |
77 | } | 77 | } |
78 | } | 78 | } |
@@ -1303,23 +1303,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1303 | /* get the DPCD from the bridge */ | 1303 | /* get the DPCD from the bridge */ |
1304 | radeon_dp_getdpcd(radeon_connector); | 1304 | radeon_dp_getdpcd(radeon_connector); |
1305 | 1305 | ||
1306 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | 1306 | if (encoder) { |
1307 | ret = connector_status_connected; | 1307 | /* setup ddc on the bridge */ |
1308 | else { | 1308 | radeon_atom_ext_encoder_setup_ddc(encoder); |
1309 | /* need to setup ddc on the bridge */ | ||
1310 | if (encoder) | ||
1311 | radeon_atom_ext_encoder_setup_ddc(encoder); | ||
1312 | if (radeon_ddc_probe(radeon_connector, | 1309 | if (radeon_ddc_probe(radeon_connector, |
1313 | radeon_connector->requires_extended_probe)) | 1310 | radeon_connector->requires_extended_probe)) /* try DDC */ |
1314 | ret = connector_status_connected; | 1311 | ret = connector_status_connected; |
1315 | } | 1312 | else if (radeon_connector->dac_load_detect) { /* try load detection */ |
1316 | 1313 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | |
1317 | if ((ret == connector_status_disconnected) && | ||
1318 | radeon_connector->dac_load_detect) { | ||
1319 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | ||
1320 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
1321 | if (encoder) { | ||
1322 | encoder_funcs = encoder->helper_private; | ||
1323 | ret = encoder_funcs->detect(encoder, connector); | 1314 | ret = encoder_funcs->detect(encoder, connector); |
1324 | } | 1315 | } |
1325 | } | 1316 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 3189a7efb2e9..fde25c0d65a0 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
208 | int xorigin = 0, yorigin = 0; | 208 | int xorigin = 0, yorigin = 0; |
209 | int w = radeon_crtc->cursor_width; | 209 | int w = radeon_crtc->cursor_width; |
210 | 210 | ||
211 | if (x < 0) | ||
212 | xorigin = -x + 1; | ||
213 | if (y < 0) | ||
214 | yorigin = -y + 1; | ||
215 | if (xorigin >= CURSOR_WIDTH) | ||
216 | xorigin = CURSOR_WIDTH - 1; | ||
217 | if (yorigin >= CURSOR_HEIGHT) | ||
218 | yorigin = CURSOR_HEIGHT - 1; | ||
219 | |||
220 | if (ASIC_IS_AVIVO(rdev)) { | 211 | if (ASIC_IS_AVIVO(rdev)) { |
221 | int i = 0; | ||
222 | struct drm_crtc *crtc_p; | ||
223 | |||
224 | /* avivo cursor are offset into the total surface */ | 212 | /* avivo cursor are offset into the total surface */ |
225 | x += crtc->x; | 213 | x += crtc->x; |
226 | y += crtc->y; | 214 | y += crtc->y; |
227 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | 215 | } |
216 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | ||
217 | |||
218 | if (x < 0) { | ||
219 | xorigin = min(-x, CURSOR_WIDTH - 1); | ||
220 | x = 0; | ||
221 | } | ||
222 | if (y < 0) { | ||
223 | yorigin = min(-y, CURSOR_HEIGHT - 1); | ||
224 | y = 0; | ||
225 | } | ||
226 | |||
227 | if (ASIC_IS_AVIVO(rdev)) { | ||
228 | int i = 0; | ||
229 | struct drm_crtc *crtc_p; | ||
228 | 230 | ||
229 | /* avivo cursor image can't end on 128 pixel boundary or | 231 | /* avivo cursor image can't end on 128 pixel boundary or |
230 | * go past the end of the frame if both crtcs are enabled | 232 | * go past the end of the frame if both crtcs are enabled |
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
253 | 255 | ||
254 | radeon_lock_cursor(crtc, true); | 256 | radeon_lock_cursor(crtc, true); |
255 | if (ASIC_IS_DCE4(rdev)) { | 257 | if (ASIC_IS_DCE4(rdev)) { |
256 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, | 258 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
257 | ((xorigin ? 0 : x) << 16) | | ||
258 | (yorigin ? 0 : y)); | ||
259 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | 259 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
260 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, | 260 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, |
261 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | 261 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
262 | } else if (ASIC_IS_AVIVO(rdev)) { | 262 | } else if (ASIC_IS_AVIVO(rdev)) { |
263 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, | 263 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
264 | ((xorigin ? 0 : x) << 16) | | ||
265 | (yorigin ? 0 : y)); | ||
266 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | 264 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
267 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, | 265 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, |
268 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | 266 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
276 | | yorigin)); | 274 | | yorigin)); |
277 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, | 275 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, |
278 | (RADEON_CUR_LOCK | 276 | (RADEON_CUR_LOCK |
279 | | ((xorigin ? 0 : x) << 16) | 277 | | (x << 16) |
280 | | (yorigin ? 0 : y))); | 278 | | y)); |
281 | /* offset is from DISP(2)_BASE_ADDRESS */ | 279 | /* offset is from DISP(2)_BASE_ADDRESS */ |
282 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + | 280 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + |
283 | (yorigin * 256))); | 281 | (yorigin * 256))); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 13690f3eb4a4..eb3f6dc6df83 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -1638,7 +1638,17 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1638 | break; | 1638 | break; |
1639 | case 2: | 1639 | case 2: |
1640 | args.v2.ucCRTC = radeon_crtc->crtc_id; | 1640 | args.v2.ucCRTC = radeon_crtc->crtc_id; |
1641 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | 1641 | if (radeon_encoder_is_dp_bridge(encoder)) { |
1642 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1643 | |||
1644 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) | ||
1645 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; | ||
1646 | else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) | ||
1647 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; | ||
1648 | else | ||
1649 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
1650 | } else | ||
1651 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
1642 | switch (radeon_encoder->encoder_id) { | 1652 | switch (radeon_encoder->encoder_id) { |
1643 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1653 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1644 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 1654 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
@@ -1755,9 +1765,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | |||
1755 | /* DCE4/5 */ | 1765 | /* DCE4/5 */ |
1756 | if (ASIC_IS_DCE4(rdev)) { | 1766 | if (ASIC_IS_DCE4(rdev)) { |
1757 | dig = radeon_encoder->enc_priv; | 1767 | dig = radeon_encoder->enc_priv; |
1758 | if (ASIC_IS_DCE41(rdev)) | 1768 | if (ASIC_IS_DCE41(rdev)) { |
1759 | return radeon_crtc->crtc_id; | 1769 | /* ontario follows DCE4 */ |
1760 | else { | 1770 | if (rdev->family == CHIP_PALM) { |
1771 | if (dig->linkb) | ||
1772 | return 1; | ||
1773 | else | ||
1774 | return 0; | ||
1775 | } else | ||
1776 | /* llano follows DCE3.2 */ | ||
1777 | return radeon_crtc->crtc_id; | ||
1778 | } else { | ||
1761 | switch (radeon_encoder->encoder_id) { | 1779 | switch (radeon_encoder->encoder_id) { |
1762 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1780 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1763 | if (dig->linkb) | 1781 | if (dig->linkb) |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4720d000d440..b13c2eedc321 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
536 | return backend_map; | 536 | return backend_map; |
537 | } | 537 | } |
538 | 538 | ||
539 | static void rv770_program_channel_remap(struct radeon_device *rdev) | ||
540 | { | ||
541 | u32 tcp_chan_steer, mc_shared_chremap, tmp; | ||
542 | bool force_no_swizzle; | ||
543 | |||
544 | switch (rdev->family) { | ||
545 | case CHIP_RV770: | ||
546 | case CHIP_RV730: | ||
547 | force_no_swizzle = false; | ||
548 | break; | ||
549 | case CHIP_RV710: | ||
550 | case CHIP_RV740: | ||
551 | default: | ||
552 | force_no_swizzle = true; | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | tmp = RREG32(MC_SHARED_CHMAP); | ||
557 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
558 | case 0: | ||
559 | case 1: | ||
560 | default: | ||
561 | /* default mapping */ | ||
562 | mc_shared_chremap = 0x00fac688; | ||
563 | break; | ||
564 | case 2: | ||
565 | case 3: | ||
566 | if (force_no_swizzle) | ||
567 | mc_shared_chremap = 0x00fac688; | ||
568 | else | ||
569 | mc_shared_chremap = 0x00bbc298; | ||
570 | break; | ||
571 | } | ||
572 | |||
573 | if (rdev->family == CHIP_RV740) | ||
574 | tcp_chan_steer = 0x00ef2a60; | ||
575 | else | ||
576 | tcp_chan_steer = 0x00fac688; | ||
577 | |||
578 | /* RV770 CE has special chremap setup */ | ||
579 | if (rdev->pdev->device == 0x944e) { | ||
580 | tcp_chan_steer = 0x00b08b08; | ||
581 | mc_shared_chremap = 0x00b08b08; | ||
582 | } | ||
583 | |||
584 | WREG32(TCP_CHAN_STEER, tcp_chan_steer); | ||
585 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
586 | } | ||
587 | |||
588 | static void rv770_gpu_init(struct radeon_device *rdev) | 539 | static void rv770_gpu_init(struct radeon_device *rdev) |
589 | { | 540 | { |
590 | int i, j, num_qd_pipes; | 541 | int i, j, num_qd_pipes; |
@@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
785 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 736 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
786 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 737 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
787 | 738 | ||
788 | rv770_program_channel_remap(rdev); | ||
789 | |||
790 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 739 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
791 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 740 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
792 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 741 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ae3c6f5dd2b7..082fcaea583f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -321,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
321 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | 321 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
322 | struct ttm_tt *ttm = bo->ttm; | 322 | struct ttm_tt *ttm = bo->ttm; |
323 | struct ttm_mem_reg *old_mem = &bo->mem; | 323 | struct ttm_mem_reg *old_mem = &bo->mem; |
324 | struct ttm_mem_reg old_copy; | 324 | struct ttm_mem_reg old_copy = *old_mem; |
325 | void *old_iomap; | 325 | void *old_iomap; |
326 | void *new_iomap; | 326 | void *new_iomap; |
327 | int ret; | 327 | int ret; |
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 44b23917d4cc..932383786642 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -377,9 +377,9 @@ exit_free: | |||
377 | } | 377 | } |
378 | 378 | ||
379 | 379 | ||
380 | static int __devinit chk_ucode_version(struct platform_device *pdev) | 380 | static int __cpuinit chk_ucode_version(unsigned int cpu) |
381 | { | 381 | { |
382 | struct cpuinfo_x86 *c = &cpu_data(pdev->id); | 382 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
383 | int err; | 383 | int err; |
384 | u32 edx; | 384 | u32 edx; |
385 | 385 | ||
@@ -390,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev) | |||
390 | */ | 390 | */ |
391 | if (c->x86_model == 0xe && c->x86_mask < 0xc) { | 391 | if (c->x86_model == 0xe && c->x86_mask < 0xc) { |
392 | /* check for microcode update */ | 392 | /* check for microcode update */ |
393 | err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, | 393 | err = smp_call_function_single(cpu, get_ucode_rev_on_cpu, |
394 | &edx, 1); | 394 | &edx, 1); |
395 | if (err) { | 395 | if (err) { |
396 | dev_err(&pdev->dev, | 396 | pr_err("Cannot determine microcode revision of " |
397 | "Cannot determine microcode revision of " | 397 | "CPU#%u (%d)!\n", cpu, err); |
398 | "CPU#%u (%d)!\n", pdev->id, err); | ||
399 | return -ENODEV; | 398 | return -ENODEV; |
400 | } else if (edx < 0x39) { | 399 | } else if (edx < 0x39) { |
401 | dev_err(&pdev->dev, | 400 | pr_err("Errata AE18 not fixed, update BIOS or " |
402 | "Errata AE18 not fixed, update BIOS or " | 401 | "microcode of the CPU!\n"); |
403 | "microcode of the CPU!\n"); | ||
404 | return -ENODEV; | 402 | return -ENODEV; |
405 | } | 403 | } |
406 | } | 404 | } |
@@ -508,6 +506,7 @@ static int create_core_data(struct platform_device *pdev, | |||
508 | 506 | ||
509 | return 0; | 507 | return 0; |
510 | exit_free: | 508 | exit_free: |
509 | pdata->core_data[attr_no] = NULL; | ||
511 | kfree(tdata); | 510 | kfree(tdata); |
512 | return err; | 511 | return err; |
513 | } | 512 | } |
@@ -544,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev) | |||
544 | struct platform_data *pdata; | 543 | struct platform_data *pdata; |
545 | int err; | 544 | int err; |
546 | 545 | ||
547 | /* Check the microcode version of the CPU */ | ||
548 | err = chk_ucode_version(pdev); | ||
549 | if (err) | ||
550 | return err; | ||
551 | |||
552 | /* Initialize the per-package data structures */ | 546 | /* Initialize the per-package data structures */ |
553 | pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); | 547 | pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); |
554 | if (!pdata) | 548 | if (!pdata) |
@@ -630,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) | |||
630 | } | 624 | } |
631 | 625 | ||
632 | pdev_entry->pdev = pdev; | 626 | pdev_entry->pdev = pdev; |
633 | pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); | 627 | pdev_entry->phys_proc_id = pdev->id; |
634 | 628 | ||
635 | list_add_tail(&pdev_entry->list, &pdev_list); | 629 | list_add_tail(&pdev_entry->list, &pdev_list); |
636 | mutex_unlock(&pdev_list_mutex); | 630 | mutex_unlock(&pdev_list_mutex); |
@@ -691,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu) | |||
691 | return; | 685 | return; |
692 | 686 | ||
693 | if (!pdev) { | 687 | if (!pdev) { |
688 | /* Check the microcode version of the CPU */ | ||
689 | if (chk_ucode_version(cpu)) | ||
690 | return; | ||
691 | |||
694 | /* | 692 | /* |
695 | * Alright, we have DTS support. | 693 | * Alright, we have DTS support. |
696 | * We are bringing the _first_ core in this pkg | 694 | * We are bringing the _first_ core in this pkg |
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index f2b377c56a3a..36d7f270b14d 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c | |||
@@ -390,7 +390,7 @@ temp_from_reg(u16 reg, s16 regval) | |||
390 | { | 390 | { |
391 | if (is_word_sized(reg)) | 391 | if (is_word_sized(reg)) |
392 | return LM75_TEMP_FROM_REG(regval); | 392 | return LM75_TEMP_FROM_REG(regval); |
393 | return regval * 1000; | 393 | return ((s8)regval) * 1000; |
394 | } | 394 | } |
395 | 395 | ||
396 | static inline u16 | 396 | static inline u16 |
@@ -398,7 +398,8 @@ temp_to_reg(u16 reg, long temp) | |||
398 | { | 398 | { |
399 | if (is_word_sized(reg)) | 399 | if (is_word_sized(reg)) |
400 | return LM75_TEMP_TO_REG(temp); | 400 | return LM75_TEMP_TO_REG(temp); |
401 | return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000); | 401 | return (s8)DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), |
402 | 1000); | ||
402 | } | 403 | } |
403 | 404 | ||
404 | /* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */ | 405 | /* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */ |
@@ -1715,7 +1716,8 @@ static void w83627ehf_device_remove_files(struct device *dev) | |||
1715 | } | 1716 | } |
1716 | 1717 | ||
1717 | /* Get the monitoring functions started */ | 1718 | /* Get the monitoring functions started */ |
1718 | static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) | 1719 | static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data, |
1720 | enum kinds kind) | ||
1719 | { | 1721 | { |
1720 | int i; | 1722 | int i; |
1721 | u8 tmp, diode; | 1723 | u8 tmp, diode; |
@@ -1746,10 +1748,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) | |||
1746 | w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01); | 1748 | w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01); |
1747 | 1749 | ||
1748 | /* Get thermal sensor types */ | 1750 | /* Get thermal sensor types */ |
1749 | diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); | 1751 | switch (kind) { |
1752 | case w83627ehf: | ||
1753 | diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); | ||
1754 | break; | ||
1755 | default: | ||
1756 | diode = 0x70; | ||
1757 | } | ||
1750 | for (i = 0; i < 3; i++) { | 1758 | for (i = 0; i < 3; i++) { |
1751 | if ((tmp & (0x02 << i))) | 1759 | if ((tmp & (0x02 << i))) |
1752 | data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2; | 1760 | data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3; |
1753 | else | 1761 | else |
1754 | data->temp_type[i] = 4; /* thermistor */ | 1762 | data->temp_type[i] = 4; /* thermistor */ |
1755 | } | 1763 | } |
@@ -2016,7 +2024,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) | |||
2016 | } | 2024 | } |
2017 | 2025 | ||
2018 | /* Initialize the chip */ | 2026 | /* Initialize the chip */ |
2019 | w83627ehf_init_device(data); | 2027 | w83627ehf_init_device(data, sio_data->kind); |
2020 | 2028 | ||
2021 | data->vrm = vid_which_vrm(); | 2029 | data->vrm = vid_which_vrm(); |
2022 | superio_enter(sio_data->sioreg); | 2030 | superio_enter(sio_data->sioreg); |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 9827c5e686cb..811dbbd9306c 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -327,7 +327,7 @@ config BLK_DEV_OPTI621 | |||
327 | select BLK_DEV_IDEPCI | 327 | select BLK_DEV_IDEPCI |
328 | help | 328 | help |
329 | This is a driver for the OPTi 82C621 EIDE controller. | 329 | This is a driver for the OPTi 82C621 EIDE controller. |
330 | Please read the comments at the top of <file:drivers/ide/pci/opti621.c>. | 330 | Please read the comments at the top of <file:drivers/ide/opti621.c>. |
331 | 331 | ||
332 | config BLK_DEV_RZ1000 | 332 | config BLK_DEV_RZ1000 |
333 | tristate "RZ1000 chipset bugfix/support" | 333 | tristate "RZ1000 chipset bugfix/support" |
@@ -365,7 +365,7 @@ config BLK_DEV_ALI15X3 | |||
365 | normal dual channel support. | 365 | normal dual channel support. |
366 | 366 | ||
367 | Please read the comments at the top of | 367 | Please read the comments at the top of |
368 | <file:drivers/ide/pci/alim15x3.c>. | 368 | <file:drivers/ide/alim15x3.c>. |
369 | 369 | ||
370 | If unsure, say N. | 370 | If unsure, say N. |
371 | 371 | ||
@@ -528,7 +528,7 @@ config BLK_DEV_NS87415 | |||
528 | This driver adds detection and support for the NS87415 chip | 528 | This driver adds detection and support for the NS87415 chip |
529 | (used mainly on SPARC64 and PA-RISC machines). | 529 | (used mainly on SPARC64 and PA-RISC machines). |
530 | 530 | ||
531 | Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>. | 531 | Please read the comments at the top of <file:drivers/ide/ns87415.c>. |
532 | 532 | ||
533 | config BLK_DEV_PDC202XX_OLD | 533 | config BLK_DEV_PDC202XX_OLD |
534 | tristate "PROMISE PDC202{46|62|65|67} support" | 534 | tristate "PROMISE PDC202{46|62|65|67} support" |
@@ -547,7 +547,7 @@ config BLK_DEV_PDC202XX_OLD | |||
547 | for more than one card. | 547 | for more than one card. |
548 | 548 | ||
549 | Please read the comments at the top of | 549 | Please read the comments at the top of |
550 | <file:drivers/ide/pci/pdc202xx_old.c>. | 550 | <file:drivers/ide/pdc202xx_old.c>. |
551 | 551 | ||
552 | If unsure, say N. | 552 | If unsure, say N. |
553 | 553 | ||
@@ -593,7 +593,7 @@ config BLK_DEV_SIS5513 | |||
593 | ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740, | 593 | ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740, |
594 | SiS745, SiS750 | 594 | SiS745, SiS750 |
595 | 595 | ||
596 | Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>. | 596 | Please read the comments at the top of <file:drivers/ide/sis5513.c>. |
597 | 597 | ||
598 | config BLK_DEV_SL82C105 | 598 | config BLK_DEV_SL82C105 |
599 | tristate "Winbond SL82c105 support" | 599 | tristate "Winbond SL82c105 support" |
@@ -616,7 +616,7 @@ config BLK_DEV_SLC90E66 | |||
616 | look-a-like to the PIIX4 it should be a nice addition. | 616 | look-a-like to the PIIX4 it should be a nice addition. |
617 | 617 | ||
618 | Please read the comments at the top of | 618 | Please read the comments at the top of |
619 | <file:drivers/ide/pci/slc90e66.c>. | 619 | <file:drivers/ide/slc90e66.c>. |
620 | 620 | ||
621 | config BLK_DEV_TRM290 | 621 | config BLK_DEV_TRM290 |
622 | tristate "Tekram TRM290 chipset support" | 622 | tristate "Tekram TRM290 chipset support" |
@@ -625,7 +625,7 @@ config BLK_DEV_TRM290 | |||
625 | This driver adds support for bus master DMA transfers | 625 | This driver adds support for bus master DMA transfers |
626 | using the Tekram TRM290 PCI IDE chip. Volunteers are | 626 | using the Tekram TRM290 PCI IDE chip. Volunteers are |
627 | needed for further tweaking and development. | 627 | needed for further tweaking and development. |
628 | Please read the comments at the top of <file:drivers/ide/pci/trm290.c>. | 628 | Please read the comments at the top of <file:drivers/ide/trm290.c>. |
629 | 629 | ||
630 | config BLK_DEV_VIA82CXXX | 630 | config BLK_DEV_VIA82CXXX |
631 | tristate "VIA82CXXX chipset support" | 631 | tristate "VIA82CXXX chipset support" |
@@ -836,7 +836,7 @@ config BLK_DEV_ALI14XX | |||
836 | of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster | 836 | of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster |
837 | I/O speeds to be set as well. | 837 | I/O speeds to be set as well. |
838 | See the files <file:Documentation/ide/ide.txt> and | 838 | See the files <file:Documentation/ide/ide.txt> and |
839 | <file:drivers/ide/legacy/ali14xx.c> for more info. | 839 | <file:drivers/ide/ali14xx.c> for more info. |
840 | 840 | ||
841 | config BLK_DEV_DTC2278 | 841 | config BLK_DEV_DTC2278 |
842 | tristate "DTC-2278 support" | 842 | tristate "DTC-2278 support" |
@@ -847,7 +847,7 @@ config BLK_DEV_DTC2278 | |||
847 | boot parameter. It enables support for the secondary IDE interface | 847 | boot parameter. It enables support for the secondary IDE interface |
848 | of the DTC-2278 card, and permits faster I/O speeds to be set as | 848 | of the DTC-2278 card, and permits faster I/O speeds to be set as |
849 | well. See the <file:Documentation/ide/ide.txt> and | 849 | well. See the <file:Documentation/ide/ide.txt> and |
850 | <file:drivers/ide/legacy/dtc2278.c> files for more info. | 850 | <file:drivers/ide/dtc2278.c> files for more info. |
851 | 851 | ||
852 | config BLK_DEV_HT6560B | 852 | config BLK_DEV_HT6560B |
853 | tristate "Holtek HT6560B support" | 853 | tristate "Holtek HT6560B support" |
@@ -858,7 +858,7 @@ config BLK_DEV_HT6560B | |||
858 | boot parameter. It enables support for the secondary IDE interface | 858 | boot parameter. It enables support for the secondary IDE interface |
859 | of the Holtek card, and permits faster I/O speeds to be set as well. | 859 | of the Holtek card, and permits faster I/O speeds to be set as well. |
860 | See the <file:Documentation/ide/ide.txt> and | 860 | See the <file:Documentation/ide/ide.txt> and |
861 | <file:drivers/ide/legacy/ht6560b.c> files for more info. | 861 | <file:drivers/ide/ht6560b.c> files for more info. |
862 | 862 | ||
863 | config BLK_DEV_QD65XX | 863 | config BLK_DEV_QD65XX |
864 | tristate "QDI QD65xx support" | 864 | tristate "QDI QD65xx support" |
@@ -867,7 +867,7 @@ config BLK_DEV_QD65XX | |||
867 | help | 867 | help |
868 | This driver is enabled at runtime using the "qd65xx.probe" kernel | 868 | This driver is enabled at runtime using the "qd65xx.probe" kernel |
869 | boot parameter. It permits faster I/O speeds to be set. See the | 869 | boot parameter. It permits faster I/O speeds to be set. See the |
870 | <file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> | 870 | <file:Documentation/ide/ide.txt> and <file:drivers/ide/qd65xx.c> |
871 | for more info. | 871 | for more info. |
872 | 872 | ||
873 | config BLK_DEV_UMC8672 | 873 | config BLK_DEV_UMC8672 |
@@ -879,7 +879,7 @@ config BLK_DEV_UMC8672 | |||
879 | boot parameter. It enables support for the secondary IDE interface | 879 | boot parameter. It enables support for the secondary IDE interface |
880 | of the UMC-8672, and permits faster I/O speeds to be set as well. | 880 | of the UMC-8672, and permits faster I/O speeds to be set as well. |
881 | See the files <file:Documentation/ide/ide.txt> and | 881 | See the files <file:Documentation/ide/ide.txt> and |
882 | <file:drivers/ide/legacy/umc8672.c> for more info. | 882 | <file:drivers/ide/umc8672.c> for more info. |
883 | 883 | ||
884 | endif | 884 | endif |
885 | 885 | ||
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 274798068a54..16f69be820c7 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq) | |||
435 | if (!(rq->cmd_flags & REQ_FLUSH)) | 435 | if (!(rq->cmd_flags & REQ_FLUSH)) |
436 | return BLKPREP_OK; | 436 | return BLKPREP_OK; |
437 | 437 | ||
438 | cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); | 438 | if (rq->special) { |
439 | cmd = rq->special; | ||
440 | memset(cmd, 0, sizeof(*cmd)); | ||
441 | } else { | ||
442 | cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); | ||
443 | } | ||
439 | 444 | ||
440 | /* FIXME: map struct ide_taskfile on rq->cmd[] */ | 445 | /* FIXME: map struct ide_taskfile on rq->cmd[] */ |
441 | BUG_ON(cmd == NULL); | 446 | BUG_ON(cmd == NULL); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 17bf9d95463c..6cd642aaa4de 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -287,7 +287,7 @@ void __free_ep(struct kref *kref) | |||
287 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { | 287 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { |
288 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); | 288 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); |
289 | dst_release(ep->dst); | 289 | dst_release(ep->dst); |
290 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 290 | l2t_release(ep->com.tdev, ep->l2t); |
291 | } | 291 | } |
292 | kfree(ep); | 292 | kfree(ep); |
293 | } | 293 | } |
@@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1178 | release_tid(ep->com.tdev, GET_TID(rpl), NULL); | 1178 | release_tid(ep->com.tdev, GET_TID(rpl), NULL); |
1179 | cxgb3_free_atid(ep->com.tdev, ep->atid); | 1179 | cxgb3_free_atid(ep->com.tdev, ep->atid); |
1180 | dst_release(ep->dst); | 1180 | dst_release(ep->dst); |
1181 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 1181 | l2t_release(ep->com.tdev, ep->l2t); |
1182 | put_ep(&ep->com); | 1182 | put_ep(&ep->com); |
1183 | return CPL_RET_BUF_DONE; | 1183 | return CPL_RET_BUF_DONE; |
1184 | } | 1184 | } |
@@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1377 | if (!child_ep) { | 1377 | if (!child_ep) { |
1378 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", | 1378 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", |
1379 | __func__); | 1379 | __func__); |
1380 | l2t_release(L2DATA(tdev), l2t); | 1380 | l2t_release(tdev, l2t); |
1381 | dst_release(dst); | 1381 | dst_release(dst); |
1382 | goto reject; | 1382 | goto reject; |
1383 | } | 1383 | } |
@@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1956 | if (!err) | 1956 | if (!err) |
1957 | goto out; | 1957 | goto out; |
1958 | 1958 | ||
1959 | l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); | 1959 | l2t_release(h->rdev.t3cdev_p, ep->l2t); |
1960 | fail4: | 1960 | fail4: |
1961 | dst_release(ep->dst); | 1961 | dst_release(ep->dst); |
1962 | fail3: | 1962 | fail3: |
@@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, | |||
2127 | PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, | 2127 | PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, |
2128 | l2t); | 2128 | l2t); |
2129 | dst_hold(new); | 2129 | dst_hold(new); |
2130 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 2130 | l2t_release(ep->com.tdev, ep->l2t); |
2131 | ep->l2t = l2t; | 2131 | ep->l2t = l2t; |
2132 | dst_release(old); | 2132 | dst_release(old); |
2133 | ep->dst = new; | 2133 | ep->dst = new; |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 0dc97ec15c28..9dea71849f40 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
1124 | for (i = 0; i < 8; i++) | 1124 | for (i = 0; i < 8; i++) |
1125 | __set_bit(BTN_0 + i, input_dev->keybit); | 1125 | __set_bit(BTN_0 + i, input_dev->keybit); |
1126 | 1126 | ||
1127 | if (wacom_wac->features.type != WACOM_21UX2) { | 1127 | input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); |
1128 | input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); | 1128 | input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); |
1129 | input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); | ||
1130 | } | ||
1131 | |||
1132 | input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); | 1129 | input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); |
1133 | 1130 | ||
1134 | __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); | 1131 | __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index c621c98c99da..a88f3cbb100b 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
306 | return (pte->val & 3) != 0; | 306 | return (pte->val & 3) != 0; |
307 | } | 307 | } |
308 | 308 | ||
309 | static inline bool dma_pte_superpage(struct dma_pte *pte) | ||
310 | { | ||
311 | return (pte->val & (1 << 7)); | ||
312 | } | ||
313 | |||
309 | static inline int first_pte_in_page(struct dma_pte *pte) | 314 | static inline int first_pte_in_page(struct dma_pte *pte) |
310 | { | 315 | { |
311 | return !((unsigned long)pte & ~VTD_PAGE_MASK); | 316 | return !((unsigned long)pte & ~VTD_PAGE_MASK); |
@@ -404,6 +409,9 @@ static int dmar_forcedac; | |||
404 | static int intel_iommu_strict; | 409 | static int intel_iommu_strict; |
405 | static int intel_iommu_superpage = 1; | 410 | static int intel_iommu_superpage = 1; |
406 | 411 | ||
412 | int intel_iommu_gfx_mapped; | ||
413 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); | ||
414 | |||
407 | #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) | 415 | #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) |
408 | static DEFINE_SPINLOCK(device_domain_lock); | 416 | static DEFINE_SPINLOCK(device_domain_lock); |
409 | static LIST_HEAD(device_domain_list); | 417 | static LIST_HEAD(device_domain_list); |
@@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain) | |||
577 | 585 | ||
578 | static void domain_update_iommu_superpage(struct dmar_domain *domain) | 586 | static void domain_update_iommu_superpage(struct dmar_domain *domain) |
579 | { | 587 | { |
580 | int i, mask = 0xf; | 588 | struct dmar_drhd_unit *drhd; |
589 | struct intel_iommu *iommu = NULL; | ||
590 | int mask = 0xf; | ||
581 | 591 | ||
582 | if (!intel_iommu_superpage) { | 592 | if (!intel_iommu_superpage) { |
583 | domain->iommu_superpage = 0; | 593 | domain->iommu_superpage = 0; |
584 | return; | 594 | return; |
585 | } | 595 | } |
586 | 596 | ||
587 | domain->iommu_superpage = 4; /* 1TiB */ | 597 | /* set iommu_superpage to the smallest common denominator */ |
588 | 598 | for_each_active_iommu(iommu, drhd) { | |
589 | for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { | 599 | mask &= cap_super_page_val(iommu->cap); |
590 | mask |= cap_super_page_val(g_iommus[i]->cap); | ||
591 | if (!mask) { | 600 | if (!mask) { |
592 | break; | 601 | break; |
593 | } | 602 | } |
@@ -730,29 +739,23 @@ out: | |||
730 | } | 739 | } |
731 | 740 | ||
732 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | 741 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
733 | unsigned long pfn, int large_level) | 742 | unsigned long pfn, int target_level) |
734 | { | 743 | { |
735 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 744 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
736 | struct dma_pte *parent, *pte = NULL; | 745 | struct dma_pte *parent, *pte = NULL; |
737 | int level = agaw_to_level(domain->agaw); | 746 | int level = agaw_to_level(domain->agaw); |
738 | int offset, target_level; | 747 | int offset; |
739 | 748 | ||
740 | BUG_ON(!domain->pgd); | 749 | BUG_ON(!domain->pgd); |
741 | BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); | 750 | BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); |
742 | parent = domain->pgd; | 751 | parent = domain->pgd; |
743 | 752 | ||
744 | /* Search pte */ | ||
745 | if (!large_level) | ||
746 | target_level = 1; | ||
747 | else | ||
748 | target_level = large_level; | ||
749 | |||
750 | while (level > 0) { | 753 | while (level > 0) { |
751 | void *tmp_page; | 754 | void *tmp_page; |
752 | 755 | ||
753 | offset = pfn_level_offset(pfn, level); | 756 | offset = pfn_level_offset(pfn, level); |
754 | pte = &parent[offset]; | 757 | pte = &parent[offset]; |
755 | if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) | 758 | if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) |
756 | break; | 759 | break; |
757 | if (level == target_level) | 760 | if (level == target_level) |
758 | break; | 761 | break; |
@@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, | |||
816 | } | 819 | } |
817 | 820 | ||
818 | /* clear last level pte, a tlb flush should be followed */ | 821 | /* clear last level pte, a tlb flush should be followed */ |
819 | static void dma_pte_clear_range(struct dmar_domain *domain, | 822 | static int dma_pte_clear_range(struct dmar_domain *domain, |
820 | unsigned long start_pfn, | 823 | unsigned long start_pfn, |
821 | unsigned long last_pfn) | 824 | unsigned long last_pfn) |
822 | { | 825 | { |
823 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 826 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
824 | unsigned int large_page = 1; | 827 | unsigned int large_page = 1; |
825 | struct dma_pte *first_pte, *pte; | 828 | struct dma_pte *first_pte, *pte; |
829 | int order; | ||
826 | 830 | ||
827 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | 831 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); |
828 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | 832 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
@@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain, | |||
846 | (void *)pte - (void *)first_pte); | 850 | (void *)pte - (void *)first_pte); |
847 | 851 | ||
848 | } while (start_pfn && start_pfn <= last_pfn); | 852 | } while (start_pfn && start_pfn <= last_pfn); |
853 | |||
854 | order = (large_page - 1) * 9; | ||
855 | return order; | ||
849 | } | 856 | } |
850 | 857 | ||
851 | /* free page table pages. last level pte should already be cleared */ | 858 | /* free page table pages. last level pte should already be cleared */ |
@@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void) | |||
3226 | } | 3233 | } |
3227 | } | 3234 | } |
3228 | 3235 | ||
3229 | if (dmar_map_gfx) | ||
3230 | return; | ||
3231 | |||
3232 | for_each_drhd_unit(drhd) { | 3236 | for_each_drhd_unit(drhd) { |
3233 | int i; | 3237 | int i; |
3234 | if (drhd->ignored || drhd->include_all) | 3238 | if (drhd->ignored || drhd->include_all) |
@@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void) | |||
3236 | 3240 | ||
3237 | for (i = 0; i < drhd->devices_cnt; i++) | 3241 | for (i = 0; i < drhd->devices_cnt; i++) |
3238 | if (drhd->devices[i] && | 3242 | if (drhd->devices[i] && |
3239 | !IS_GFX_DEVICE(drhd->devices[i])) | 3243 | !IS_GFX_DEVICE(drhd->devices[i])) |
3240 | break; | 3244 | break; |
3241 | 3245 | ||
3242 | if (i < drhd->devices_cnt) | 3246 | if (i < drhd->devices_cnt) |
3243 | continue; | 3247 | continue; |
3244 | 3248 | ||
3245 | /* bypass IOMMU if it is just for gfx devices */ | 3249 | /* This IOMMU has *only* gfx devices. Either bypass it or |
3246 | drhd->ignored = 1; | 3250 | set the gfx_mapped flag, as appropriate */ |
3247 | for (i = 0; i < drhd->devices_cnt; i++) { | 3251 | if (dmar_map_gfx) { |
3248 | if (!drhd->devices[i]) | 3252 | intel_iommu_gfx_mapped = 1; |
3249 | continue; | 3253 | } else { |
3250 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | 3254 | drhd->ignored = 1; |
3255 | for (i = 0; i < drhd->devices_cnt; i++) { | ||
3256 | if (!drhd->devices[i]) | ||
3257 | continue; | ||
3258 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | ||
3259 | } | ||
3251 | } | 3260 | } |
3252 | } | 3261 | } |
3253 | } | 3262 | } |
@@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
3568 | found = 1; | 3577 | found = 1; |
3569 | } | 3578 | } |
3570 | 3579 | ||
3580 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
3581 | |||
3571 | if (found == 0) { | 3582 | if (found == 0) { |
3572 | unsigned long tmp_flags; | 3583 | unsigned long tmp_flags; |
3573 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); | 3584 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); |
@@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
3584 | spin_unlock_irqrestore(&iommu->lock, tmp_flags); | 3595 | spin_unlock_irqrestore(&iommu->lock, tmp_flags); |
3585 | } | 3596 | } |
3586 | } | 3597 | } |
3587 | |||
3588 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
3589 | } | 3598 | } |
3590 | 3599 | ||
3591 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) | 3600 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) |
@@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) | |||
3739 | vm_domain_exit(dmar_domain); | 3748 | vm_domain_exit(dmar_domain); |
3740 | return -ENOMEM; | 3749 | return -ENOMEM; |
3741 | } | 3750 | } |
3751 | domain_update_iommu_cap(dmar_domain); | ||
3742 | domain->priv = dmar_domain; | 3752 | domain->priv = dmar_domain; |
3743 | 3753 | ||
3744 | return 0; | 3754 | return 0; |
@@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain, | |||
3864 | { | 3874 | { |
3865 | struct dmar_domain *dmar_domain = domain->priv; | 3875 | struct dmar_domain *dmar_domain = domain->priv; |
3866 | size_t size = PAGE_SIZE << gfp_order; | 3876 | size_t size = PAGE_SIZE << gfp_order; |
3877 | int order; | ||
3867 | 3878 | ||
3868 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 3879 | order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
3869 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 3880 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
3870 | 3881 | ||
3871 | if (dmar_domain->max_addr == iova + size) | 3882 | if (dmar_domain->max_addr == iova + size) |
3872 | dmar_domain->max_addr = iova; | 3883 | dmar_domain->max_addr = iova; |
3873 | 3884 | ||
3874 | return gfp_order; | 3885 | return order; |
3875 | } | 3886 | } |
3876 | 3887 | ||
3877 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 3888 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
@@ -3950,7 +3961,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) | |||
3950 | if (!(ggc & GGC_MEMORY_VT_ENABLED)) { | 3961 | if (!(ggc & GGC_MEMORY_VT_ENABLED)) { |
3951 | printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); | 3962 | printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); |
3952 | dmar_map_gfx = 0; | 3963 | dmar_map_gfx = 0; |
3953 | } | 3964 | } else if (dmar_map_gfx) { |
3965 | /* we have to ensure the gfx device is idle before we flush */ | ||
3966 | printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n"); | ||
3967 | intel_iommu_strict = 1; | ||
3968 | } | ||
3954 | } | 3969 | } |
3955 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); | 3970 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); |
3956 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); | 3971 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 49da55c1528a..8c2a000cf3f5 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1698 | } | 1698 | } |
1699 | 1699 | ||
1700 | ti->num_flush_requests = 1; | 1700 | ti->num_flush_requests = 1; |
1701 | ti->discard_zeroes_data_unsupported = 1; | ||
1702 | |||
1701 | return 0; | 1703 | return 0; |
1702 | 1704 | ||
1703 | bad: | 1705 | bad: |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 89f73ca22cfa..f84c08029b21 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, | |||
81 | * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> | 81 | * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> |
82 | */ | 82 | */ |
83 | if (!strcasecmp(arg_name, "corrupt_bio_byte")) { | 83 | if (!strcasecmp(arg_name, "corrupt_bio_byte")) { |
84 | if (!argc) | 84 | if (!argc) { |
85 | ti->error = "Feature corrupt_bio_byte requires parameters"; | 85 | ti->error = "Feature corrupt_bio_byte requires parameters"; |
86 | return -EINVAL; | ||
87 | } | ||
86 | 88 | ||
87 | r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); | 89 | r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); |
88 | if (r) | 90 | if (r) |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index f82147029636..32ac70861d66 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -628,6 +628,7 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc, | |||
628 | job->kc = kc; | 628 | job->kc = kc; |
629 | job->fn = fn; | 629 | job->fn = fn; |
630 | job->context = context; | 630 | job->context = context; |
631 | job->master_job = job; | ||
631 | 632 | ||
632 | atomic_inc(&kc->nr_jobs); | 633 | atomic_inc(&kc->nr_jobs); |
633 | 634 | ||
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index a002dd85db1e..86df8b2cf927 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
449 | rs->ti->error = "write_mostly option is only valid for RAID1"; | 449 | rs->ti->error = "write_mostly option is only valid for RAID1"; |
450 | return -EINVAL; | 450 | return -EINVAL; |
451 | } | 451 | } |
452 | if (value > rs->md.raid_disks) { | 452 | if (value >= rs->md.raid_disks) { |
453 | rs->ti->error = "Invalid write_mostly drive index given"; | 453 | rs->ti->error = "Invalid write_mostly drive index given"; |
454 | return -EINVAL; | 454 | return -EINVAL; |
455 | } | 455 | } |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 986b8754bb08..bc04518e9d8b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t) | |||
1238 | return; | 1238 | return; |
1239 | 1239 | ||
1240 | template_disk = dm_table_get_integrity_disk(t, true); | 1240 | template_disk = dm_table_get_integrity_disk(t, true); |
1241 | if (!template_disk && | 1241 | if (template_disk) |
1242 | blk_integrity_is_initialized(dm_disk(t->md))) { | 1242 | blk_integrity_register(dm_disk(t->md), |
1243 | blk_get_integrity(template_disk)); | ||
1244 | else if (blk_integrity_is_initialized(dm_disk(t->md))) | ||
1243 | DMWARN("%s: device no longer has a valid integrity profile", | 1245 | DMWARN("%s: device no longer has a valid integrity profile", |
1244 | dm_device_name(t->md)); | 1246 | dm_device_name(t->md)); |
1245 | return; | 1247 | else |
1246 | } | 1248 | DMWARN("%s: unable to establish an integrity profile", |
1247 | blk_integrity_register(dm_disk(t->md), | 1249 | dm_device_name(t->md)); |
1248 | blk_get_integrity(template_disk)); | ||
1249 | } | 1250 | } |
1250 | 1251 | ||
1251 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, | 1252 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, |
@@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) | |||
1282 | return 0; | 1283 | return 0; |
1283 | } | 1284 | } |
1284 | 1285 | ||
1286 | static bool dm_table_discard_zeroes_data(struct dm_table *t) | ||
1287 | { | ||
1288 | struct dm_target *ti; | ||
1289 | unsigned i = 0; | ||
1290 | |||
1291 | /* Ensure that all targets supports discard_zeroes_data. */ | ||
1292 | while (i < dm_table_get_num_targets(t)) { | ||
1293 | ti = dm_table_get_target(t, i++); | ||
1294 | |||
1295 | if (ti->discard_zeroes_data_unsupported) | ||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | return 1; | ||
1300 | } | ||
1301 | |||
1285 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1302 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1286 | struct queue_limits *limits) | 1303 | struct queue_limits *limits) |
1287 | { | 1304 | { |
@@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1304 | } | 1321 | } |
1305 | blk_queue_flush(q, flush); | 1322 | blk_queue_flush(q, flush); |
1306 | 1323 | ||
1324 | if (!dm_table_discard_zeroes_data(t)) | ||
1325 | q->limits.discard_zeroes_data = 0; | ||
1326 | |||
1307 | dm_table_set_integrity(t); | 1327 | dm_table_set_integrity(t); |
1308 | 1328 | ||
1309 | /* | 1329 | /* |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 5404b2295820..5c95ccb59500 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -61,6 +61,11 @@ | |||
61 | static void autostart_arrays(int part); | 61 | static void autostart_arrays(int part); |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | /* pers_list is a list of registered personalities protected | ||
65 | * by pers_lock. | ||
66 | * pers_lock does extra service to protect accesses to | ||
67 | * mddev->thread when the mutex cannot be held. | ||
68 | */ | ||
64 | static LIST_HEAD(pers_list); | 69 | static LIST_HEAD(pers_list); |
65 | static DEFINE_SPINLOCK(pers_lock); | 70 | static DEFINE_SPINLOCK(pers_lock); |
66 | 71 | ||
@@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev) | |||
739 | } else | 744 | } else |
740 | mutex_unlock(&mddev->reconfig_mutex); | 745 | mutex_unlock(&mddev->reconfig_mutex); |
741 | 746 | ||
747 | /* was we've dropped the mutex we need a spinlock to | ||
748 | * make sur the thread doesn't disappear | ||
749 | */ | ||
750 | spin_lock(&pers_lock); | ||
742 | md_wakeup_thread(mddev->thread); | 751 | md_wakeup_thread(mddev->thread); |
752 | spin_unlock(&pers_lock); | ||
743 | } | 753 | } |
744 | 754 | ||
745 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) | 755 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) |
@@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, | |||
6429 | return thread; | 6439 | return thread; |
6430 | } | 6440 | } |
6431 | 6441 | ||
6432 | void md_unregister_thread(mdk_thread_t *thread) | 6442 | void md_unregister_thread(mdk_thread_t **threadp) |
6433 | { | 6443 | { |
6444 | mdk_thread_t *thread = *threadp; | ||
6434 | if (!thread) | 6445 | if (!thread) |
6435 | return; | 6446 | return; |
6436 | dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); | 6447 | dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); |
6448 | /* Locking ensures that mddev_unlock does not wake_up a | ||
6449 | * non-existent thread | ||
6450 | */ | ||
6451 | spin_lock(&pers_lock); | ||
6452 | *threadp = NULL; | ||
6453 | spin_unlock(&pers_lock); | ||
6437 | 6454 | ||
6438 | kthread_stop(thread->tsk); | 6455 | kthread_stop(thread->tsk); |
6439 | kfree(thread); | 6456 | kfree(thread); |
@@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev) | |||
7340 | mdk_rdev_t *rdev; | 7357 | mdk_rdev_t *rdev; |
7341 | 7358 | ||
7342 | /* resync has finished, collect result */ | 7359 | /* resync has finished, collect result */ |
7343 | md_unregister_thread(mddev->sync_thread); | 7360 | md_unregister_thread(&mddev->sync_thread); |
7344 | mddev->sync_thread = NULL; | ||
7345 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | 7361 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && |
7346 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | 7362 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { |
7347 | /* success...*/ | 7363 | /* success...*/ |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 1e586bb4452e..0a309dc29b45 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p); | |||
560 | extern int unregister_md_personality(struct mdk_personality *p); | 560 | extern int unregister_md_personality(struct mdk_personality *p); |
561 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), | 561 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), |
562 | mddev_t *mddev, const char *name); | 562 | mddev_t *mddev, const char *name); |
563 | extern void md_unregister_thread(mdk_thread_t *thread); | 563 | extern void md_unregister_thread(mdk_thread_t **threadp); |
564 | extern void md_wakeup_thread(mdk_thread_t *thread); | 564 | extern void md_wakeup_thread(mdk_thread_t *thread); |
565 | extern void md_check_recovery(mddev_t *mddev); | 565 | extern void md_check_recovery(mddev_t *mddev); |
566 | extern void md_write_start(mddev_t *mddev, struct bio *bi); | 566 | extern void md_write_start(mddev_t *mddev, struct bio *bi); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 3535c23af288..d5b5fb300171 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev) | |||
514 | { | 514 | { |
515 | multipath_conf_t *conf = mddev->private; | 515 | multipath_conf_t *conf = mddev->private; |
516 | 516 | ||
517 | md_unregister_thread(mddev->thread); | 517 | md_unregister_thread(&mddev->thread); |
518 | mddev->thread = NULL; | ||
519 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 518 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
520 | mempool_destroy(conf->pool); | 519 | mempool_destroy(conf->pool); |
521 | kfree(conf->multipaths); | 520 | kfree(conf->multipaths); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f4622dd8fc59..d9587dffe533 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev) | |||
2562 | raise_barrier(conf); | 2562 | raise_barrier(conf); |
2563 | lower_barrier(conf); | 2563 | lower_barrier(conf); |
2564 | 2564 | ||
2565 | md_unregister_thread(mddev->thread); | 2565 | md_unregister_thread(&mddev->thread); |
2566 | mddev->thread = NULL; | ||
2567 | if (conf->r1bio_pool) | 2566 | if (conf->r1bio_pool) |
2568 | mempool_destroy(conf->r1bio_pool); | 2567 | mempool_destroy(conf->r1bio_pool); |
2569 | kfree(conf->mirrors); | 2568 | kfree(conf->mirrors); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d7a8468ddeab..0cd9672cf9cb 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev) | |||
2955 | return 0; | 2955 | return 0; |
2956 | 2956 | ||
2957 | out_free_conf: | 2957 | out_free_conf: |
2958 | md_unregister_thread(mddev->thread); | 2958 | md_unregister_thread(&mddev->thread); |
2959 | if (conf->r10bio_pool) | 2959 | if (conf->r10bio_pool) |
2960 | mempool_destroy(conf->r10bio_pool); | 2960 | mempool_destroy(conf->r10bio_pool); |
2961 | safe_put_page(conf->tmppage); | 2961 | safe_put_page(conf->tmppage); |
@@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev) | |||
2973 | raise_barrier(conf, 0); | 2973 | raise_barrier(conf, 0); |
2974 | lower_barrier(conf); | 2974 | lower_barrier(conf); |
2975 | 2975 | ||
2976 | md_unregister_thread(mddev->thread); | 2976 | md_unregister_thread(&mddev->thread); |
2977 | mddev->thread = NULL; | ||
2978 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 2977 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
2979 | if (conf->r10bio_pool) | 2978 | if (conf->r10bio_pool) |
2980 | mempool_destroy(conf->r10bio_pool); | 2979 | mempool_destroy(conf->r10bio_pool); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 43709fa6b6df..ac5e8b57e50f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev) | |||
4941 | 4941 | ||
4942 | return 0; | 4942 | return 0; |
4943 | abort: | 4943 | abort: |
4944 | md_unregister_thread(mddev->thread); | 4944 | md_unregister_thread(&mddev->thread); |
4945 | mddev->thread = NULL; | ||
4946 | if (conf) { | 4945 | if (conf) { |
4947 | print_raid5_conf(conf); | 4946 | print_raid5_conf(conf); |
4948 | free_conf(conf); | 4947 | free_conf(conf); |
@@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev) | |||
4956 | { | 4955 | { |
4957 | raid5_conf_t *conf = mddev->private; | 4956 | raid5_conf_t *conf = mddev->private; |
4958 | 4957 | ||
4959 | md_unregister_thread(mddev->thread); | 4958 | md_unregister_thread(&mddev->thread); |
4960 | mddev->thread = NULL; | ||
4961 | if (mddev->queue) | 4959 | if (mddev->queue) |
4962 | mddev->queue->backing_dev_info.congested_fn = NULL; | 4960 | mddev->queue->backing_dev_info.congested_fn = NULL; |
4963 | free_conf(conf); | 4961 | free_conf(conf); |
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c index b5ef36222440..b3a5ecdb33ac 100644 --- a/drivers/media/video/omap/omap_vout.c +++ b/drivers/media/video/omap/omap_vout.c | |||
@@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev) | |||
2194 | "'%s' Display already enabled\n", | 2194 | "'%s' Display already enabled\n", |
2195 | def_display->name); | 2195 | def_display->name); |
2196 | } | 2196 | } |
2197 | /* set the update mode */ | ||
2198 | if (def_display->caps & | ||
2199 | OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { | ||
2200 | if (dssdrv->enable_te) | ||
2201 | dssdrv->enable_te(def_display, 0); | ||
2202 | if (dssdrv->set_update_mode) | ||
2203 | dssdrv->set_update_mode(def_display, | ||
2204 | OMAP_DSS_UPDATE_MANUAL); | ||
2205 | } else { | ||
2206 | if (dssdrv->set_update_mode) | ||
2207 | dssdrv->set_update_mode(def_display, | ||
2208 | OMAP_DSS_UPDATE_AUTO); | ||
2209 | } | ||
2210 | } | 2197 | } |
2211 | } | 2198 | } |
2212 | 2199 | ||
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c index 9d3459de04b2..80796eb0c53e 100644 --- a/drivers/media/video/omap3isp/ispccdc.c +++ b/drivers/media/video/omap3isp/ispccdc.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/slab.h> | ||
34 | #include <media/v4l2-event.h> | 35 | #include <media/v4l2-event.h> |
35 | 36 | ||
36 | #include "isp.h" | 37 | #include "isp.h" |
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index d29f9c2d0854..e4100b1f68df 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c | |||
@@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset) | |||
1961 | 1961 | ||
1962 | list_for_each_entry(stream, &dev->streams, list) { | 1962 | list_for_each_entry(stream, &dev->streams, list) { |
1963 | if (stream->intf == intf) | 1963 | if (stream->intf == intf) |
1964 | return uvc_video_resume(stream); | 1964 | return uvc_video_resume(stream, reset); |
1965 | } | 1965 | } |
1966 | 1966 | ||
1967 | uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " | 1967 | uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " |
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c index 48fea373c25a..29e239911d0e 100644 --- a/drivers/media/video/uvc/uvc_entity.c +++ b/drivers/media/video/uvc/uvc_entity.c | |||
@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain, | |||
49 | if (remote == NULL) | 49 | if (remote == NULL) |
50 | return -EINVAL; | 50 | return -EINVAL; |
51 | 51 | ||
52 | source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) | 52 | source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) |
53 | ? (remote->vdev ? &remote->vdev->entity : NULL) | 53 | ? (remote->vdev ? &remote->vdev->entity : NULL) |
54 | : &remote->subdev.entity; | 54 | : &remote->subdev.entity; |
55 | if (source == NULL) | 55 | if (source == NULL) |
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 8244167c8915..ffd1158628b6 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c | |||
@@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream) | |||
1104 | * buffers, making sure userspace applications are notified of the problem | 1104 | * buffers, making sure userspace applications are notified of the problem |
1105 | * instead of waiting forever. | 1105 | * instead of waiting forever. |
1106 | */ | 1106 | */ |
1107 | int uvc_video_resume(struct uvc_streaming *stream) | 1107 | int uvc_video_resume(struct uvc_streaming *stream, int reset) |
1108 | { | 1108 | { |
1109 | int ret; | 1109 | int ret; |
1110 | 1110 | ||
1111 | /* If the bus has been reset on resume, set the alternate setting to 0. | ||
1112 | * This should be the default value, but some devices crash or otherwise | ||
1113 | * misbehave if they don't receive a SET_INTERFACE request before any | ||
1114 | * other video control request. | ||
1115 | */ | ||
1116 | if (reset) | ||
1117 | usb_set_interface(stream->dev->udev, stream->intfnum, 0); | ||
1118 | |||
1111 | stream->frozen = 0; | 1119 | stream->frozen = 0; |
1112 | 1120 | ||
1113 | ret = uvc_commit_video(stream, &stream->ctrl); | 1121 | ret = uvc_commit_video(stream, &stream->ctrl); |
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index df32a43ca86a..cbdd49bf8b67 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h | |||
@@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity); | |||
638 | /* Video */ | 638 | /* Video */ |
639 | extern int uvc_video_init(struct uvc_streaming *stream); | 639 | extern int uvc_video_init(struct uvc_streaming *stream); |
640 | extern int uvc_video_suspend(struct uvc_streaming *stream); | 640 | extern int uvc_video_suspend(struct uvc_streaming *stream); |
641 | extern int uvc_video_resume(struct uvc_streaming *stream); | 641 | extern int uvc_video_resume(struct uvc_streaming *stream, int reset); |
642 | extern int uvc_video_enable(struct uvc_streaming *stream, int enable); | 642 | extern int uvc_video_enable(struct uvc_streaming *stream, int enable); |
643 | extern int uvc_probe_video(struct uvc_streaming *stream, | 643 | extern int uvc_probe_video(struct uvc_streaming *stream, |
644 | struct uvc_streaming_control *probe); | 644 | struct uvc_streaming_control *probe); |
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 06f14008b346..a5c9ed128b97 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c | |||
@@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd) | |||
173 | media_device_unregister_entity(&vdev->entity); | 173 | media_device_unregister_entity(&vdev->entity); |
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | /* Do not call v4l2_device_put if there is no release callback set. | ||
177 | * Drivers that have no v4l2_device release callback might free the | ||
178 | * v4l2_dev instance in the video_device release callback below, so we | ||
179 | * must perform this check here. | ||
180 | * | ||
181 | * TODO: In the long run all drivers that use v4l2_device should use the | ||
182 | * v4l2_device release callback. This check will then be unnecessary. | ||
183 | */ | ||
184 | if (v4l2_dev && v4l2_dev->release == NULL) | ||
185 | v4l2_dev = NULL; | ||
186 | |||
176 | /* Release video_device and perform other | 187 | /* Release video_device and perform other |
177 | cleanups as needed. */ | 188 | cleanups as needed. */ |
178 | vdev->release(vdev); | 189 | vdev->release(vdev); |
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index c72856c41434..e6a2c3b302d4 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c | |||
@@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) | |||
38 | mutex_init(&v4l2_dev->ioctl_lock); | 38 | mutex_init(&v4l2_dev->ioctl_lock); |
39 | v4l2_prio_init(&v4l2_dev->prio); | 39 | v4l2_prio_init(&v4l2_dev->prio); |
40 | kref_init(&v4l2_dev->ref); | 40 | kref_init(&v4l2_dev->ref); |
41 | get_device(dev); | ||
41 | v4l2_dev->dev = dev; | 42 | v4l2_dev->dev = dev; |
42 | if (dev == NULL) { | 43 | if (dev == NULL) { |
43 | /* If dev == NULL, then name must be filled in by the caller */ | 44 | /* If dev == NULL, then name must be filled in by the caller */ |
@@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) | |||
93 | 94 | ||
94 | if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) | 95 | if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) |
95 | dev_set_drvdata(v4l2_dev->dev, NULL); | 96 | dev_set_drvdata(v4l2_dev->dev, NULL); |
97 | put_device(v4l2_dev->dev); | ||
96 | v4l2_dev->dev = NULL; | 98 | v4l2_dev->dev = NULL; |
97 | } | 99 | } |
98 | EXPORT_SYMBOL_GPL(v4l2_device_disconnect); | 100 | EXPORT_SYMBOL_GPL(v4l2_device_disconnect); |
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 21131c7b0f1e..563654c9b19e 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c | |||
@@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev) | |||
273 | ct->regs.ack = JZ_REG_ADC_STATUS; | 273 | ct->regs.ack = JZ_REG_ADC_STATUS; |
274 | ct->chip.irq_mask = irq_gc_mask_set_bit; | 274 | ct->chip.irq_mask = irq_gc_mask_set_bit; |
275 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; | 275 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; |
276 | ct->chip.irq_ack = irq_gc_ack; | 276 | ct->chip.irq_ack = irq_gc_ack_set_bit; |
277 | 277 | ||
278 | irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); | 278 | irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); |
279 | 279 | ||
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index b928bc14e97b..8b51cd62d067 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c | |||
@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3) | |||
375 | * both have been read. So the value read will always be correct. | 375 | * both have been read. So the value read will always be correct. |
376 | * Set BOOT bit to refresh factory tuning values. | 376 | * Set BOOT bit to refresh factory tuning values. |
377 | */ | 377 | */ |
378 | lis3->read(lis3, CTRL_REG2, ®); | 378 | if (lis3->pdata) { |
379 | if (lis3->whoami == WAI_12B) | 379 | lis3->read(lis3, CTRL_REG2, ®); |
380 | reg |= CTRL2_BDU | CTRL2_BOOT; | 380 | if (lis3->whoami == WAI_12B) |
381 | else | 381 | reg |= CTRL2_BDU | CTRL2_BOOT; |
382 | reg |= CTRL2_BOOT_8B; | 382 | else |
383 | lis3->write(lis3, CTRL_REG2, reg); | 383 | reg |= CTRL2_BOOT_8B; |
384 | lis3->write(lis3, CTRL_REG2, reg); | ||
385 | } | ||
384 | 386 | ||
385 | /* LIS3 power on delay is quite long */ | 387 | /* LIS3 power on delay is quite long */ |
386 | msleep(lis3->pwron_delay / lis3lv02d_get_odr()); | 388 | msleep(lis3->pwron_delay / lis3lv02d_get_odr()); |
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index e46df5331c55..9a7eb3b36cf3 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -239,13 +239,19 @@ void bnx2x_int_disable(struct bnx2x *bp); | |||
239 | * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X | 239 | * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X |
240 | * | 240 | * |
241 | */ | 241 | */ |
242 | /* iSCSI L2 */ | 242 | enum { |
243 | #define BNX2X_ISCSI_ETH_CL_ID_IDX 1 | 243 | BNX2X_ISCSI_ETH_CL_ID_IDX, |
244 | #define BNX2X_ISCSI_ETH_CID 49 | 244 | BNX2X_FCOE_ETH_CL_ID_IDX, |
245 | BNX2X_MAX_CNIC_ETH_CL_ID_IDX, | ||
246 | }; | ||
245 | 247 | ||
246 | /* FCoE L2 */ | 248 | #define BNX2X_CNIC_START_ETH_CID 48 |
247 | #define BNX2X_FCOE_ETH_CL_ID_IDX 2 | 249 | enum { |
248 | #define BNX2X_FCOE_ETH_CID 50 | 250 | /* iSCSI L2 */ |
251 | BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, | ||
252 | /* FCoE L2 */ | ||
253 | BNX2X_FCOE_ETH_CID, | ||
254 | }; | ||
249 | 255 | ||
250 | /** Additional rings budgeting */ | 256 | /** Additional rings budgeting */ |
251 | #ifdef BCM_CNIC | 257 | #ifdef BCM_CNIC |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 223bfeebc597..2dc1199239d0 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp, | |||
1297 | static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) | 1297 | static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) |
1298 | { | 1298 | { |
1299 | return bp->cnic_base_cl_id + cl_idx + | 1299 | return bp->cnic_base_cl_id + cl_idx + |
1300 | (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; | 1300 | (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) | 1303 | static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) |
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index a1e004a82f7a..0b4acf67e0c6 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c | |||
@@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) | |||
2120 | break; | 2120 | break; |
2121 | case DCB_CAP_ATTR_DCBX: | 2121 | case DCB_CAP_ATTR_DCBX: |
2122 | *cap = BNX2X_DCBX_CAPS; | 2122 | *cap = BNX2X_DCBX_CAPS; |
2123 | break; | ||
2123 | default: | 2124 | default: |
2124 | rval = -EINVAL; | 2125 | rval = -EINVAL; |
2125 | break; | 2126 | break; |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index c027e9341a1a..15f800085bb2 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -4943,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4943 | int igu_seg_id; | 4943 | int igu_seg_id; |
4944 | int port = BP_PORT(bp); | 4944 | int port = BP_PORT(bp); |
4945 | int func = BP_FUNC(bp); | 4945 | int func = BP_FUNC(bp); |
4946 | int reg_offset; | 4946 | int reg_offset, reg_offset_en5; |
4947 | u64 section; | 4947 | u64 section; |
4948 | int index; | 4948 | int index; |
4949 | struct hc_sp_status_block_data sp_sb_data; | 4949 | struct hc_sp_status_block_data sp_sb_data; |
@@ -4966,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4966 | 4966 | ||
4967 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 4967 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
4968 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 4968 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
4969 | reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : | ||
4970 | MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); | ||
4969 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { | 4971 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
4970 | int sindex; | 4972 | int sindex; |
4971 | /* take care of sig[0]..sig[4] */ | 4973 | /* take care of sig[0]..sig[4] */ |
@@ -4980,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4980 | * and not 16 between the different groups | 4982 | * and not 16 between the different groups |
4981 | */ | 4983 | */ |
4982 | bp->attn_group[index].sig[4] = REG_RD(bp, | 4984 | bp->attn_group[index].sig[4] = REG_RD(bp, |
4983 | reg_offset + 0x10 + 0x4*index); | 4985 | reg_offset_en5 + 0x4*index); |
4984 | else | 4986 | else |
4985 | bp->attn_group[index].sig[4] = 0; | 4987 | bp->attn_group[index].sig[4] = 0; |
4986 | } | 4988 | } |
@@ -7625,8 +7627,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7625 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 7627 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
7626 | u8 *mac_addr = bp->dev->dev_addr; | 7628 | u8 *mac_addr = bp->dev->dev_addr; |
7627 | u32 val; | 7629 | u32 val; |
7630 | u16 pmc; | ||
7631 | |||
7628 | /* The mac address is written to entries 1-4 to | 7632 | /* The mac address is written to entries 1-4 to |
7629 | preserve entry 0 which is used by the PMF */ | 7633 | * preserve entry 0 which is used by the PMF |
7634 | */ | ||
7630 | u8 entry = (BP_VN(bp) + 1)*8; | 7635 | u8 entry = (BP_VN(bp) + 1)*8; |
7631 | 7636 | ||
7632 | val = (mac_addr[0] << 8) | mac_addr[1]; | 7637 | val = (mac_addr[0] << 8) | mac_addr[1]; |
@@ -7636,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7636 | (mac_addr[4] << 8) | mac_addr[5]; | 7641 | (mac_addr[4] << 8) | mac_addr[5]; |
7637 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); | 7642 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); |
7638 | 7643 | ||
7644 | /* Enable the PME and clear the status */ | ||
7645 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); | ||
7646 | pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; | ||
7647 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); | ||
7648 | |||
7639 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | 7649 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
7640 | 7650 | ||
7641 | } else | 7651 | } else |
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 750e8445dac4..fc7bd0f23c0b 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
@@ -1384,6 +1384,18 @@ | |||
1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ | 1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ |
1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 | 1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 |
1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 | 1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 |
1387 | /* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped | ||
1388 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1389 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1390 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1391 | * parity; [31-10] Reserved; */ | ||
1392 | #define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 | ||
1393 | /* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped | ||
1394 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1395 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1396 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1397 | * parity; [31-10] Reserved; */ | ||
1398 | #define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 | ||
1387 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu | 1399 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu |
1388 | 128 bit vector */ | 1400 | 128 bit vector */ |
1389 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 | 1401 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a047eb973e3b..47b928ed08f8 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2168 | } | 2168 | } |
2169 | 2169 | ||
2170 | re_arm: | 2170 | re_arm: |
2171 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | 2171 | if (!bond->kill_timers) |
2172 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | ||
2172 | out: | 2173 | out: |
2173 | read_unlock(&bond->lock); | 2174 | read_unlock(&bond->lock); |
2174 | } | 2175 | } |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 7f8b20a34ee3..d4fbd2e62616 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work) | |||
1440 | } | 1440 | } |
1441 | 1441 | ||
1442 | re_arm: | 1442 | re_arm: |
1443 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); | 1443 | if (!bond->kill_timers) |
1444 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); | ||
1444 | out: | 1445 | out: |
1445 | read_unlock(&bond->lock); | 1446 | read_unlock(&bond->lock); |
1446 | } | 1447 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 43f2ea541088..de3d351ccb6b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) | |||
777 | 777 | ||
778 | read_lock(&bond->lock); | 778 | read_lock(&bond->lock); |
779 | 779 | ||
780 | if (bond->kill_timers) | ||
781 | goto out; | ||
782 | |||
780 | /* rejoin all groups on bond device */ | 783 | /* rejoin all groups on bond device */ |
781 | __bond_resend_igmp_join_requests(bond->dev); | 784 | __bond_resend_igmp_join_requests(bond->dev); |
782 | 785 | ||
@@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) | |||
790 | __bond_resend_igmp_join_requests(vlan_dev); | 793 | __bond_resend_igmp_join_requests(vlan_dev); |
791 | } | 794 | } |
792 | 795 | ||
793 | if (--bond->igmp_retrans > 0) | 796 | if ((--bond->igmp_retrans > 0) && !bond->kill_timers) |
794 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); | 797 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); |
795 | 798 | out: | |
796 | read_unlock(&bond->lock); | 799 | read_unlock(&bond->lock); |
797 | } | 800 | } |
798 | 801 | ||
@@ -1432,6 +1435,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1432 | struct sk_buff *skb = *pskb; | 1435 | struct sk_buff *skb = *pskb; |
1433 | struct slave *slave; | 1436 | struct slave *slave; |
1434 | struct bonding *bond; | 1437 | struct bonding *bond; |
1438 | void (*recv_probe)(struct sk_buff *, struct bonding *, | ||
1439 | struct slave *); | ||
1435 | 1440 | ||
1436 | skb = skb_share_check(skb, GFP_ATOMIC); | 1441 | skb = skb_share_check(skb, GFP_ATOMIC); |
1437 | if (unlikely(!skb)) | 1442 | if (unlikely(!skb)) |
@@ -1445,11 +1450,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1445 | if (bond->params.arp_interval) | 1450 | if (bond->params.arp_interval) |
1446 | slave->dev->last_rx = jiffies; | 1451 | slave->dev->last_rx = jiffies; |
1447 | 1452 | ||
1448 | if (bond->recv_probe) { | 1453 | recv_probe = ACCESS_ONCE(bond->recv_probe); |
1454 | if (recv_probe) { | ||
1449 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | 1455 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); |
1450 | 1456 | ||
1451 | if (likely(nskb)) { | 1457 | if (likely(nskb)) { |
1452 | bond->recv_probe(nskb, bond, slave); | 1458 | recv_probe(nskb, bond, slave); |
1453 | dev_kfree_skb(nskb); | 1459 | dev_kfree_skb(nskb); |
1454 | } | 1460 | } |
1455 | } | 1461 | } |
@@ -2538,7 +2544,7 @@ void bond_mii_monitor(struct work_struct *work) | |||
2538 | } | 2544 | } |
2539 | 2545 | ||
2540 | re_arm: | 2546 | re_arm: |
2541 | if (bond->params.miimon) | 2547 | if (bond->params.miimon && !bond->kill_timers) |
2542 | queue_delayed_work(bond->wq, &bond->mii_work, | 2548 | queue_delayed_work(bond->wq, &bond->mii_work, |
2543 | msecs_to_jiffies(bond->params.miimon)); | 2549 | msecs_to_jiffies(bond->params.miimon)); |
2544 | out: | 2550 | out: |
@@ -2886,7 +2892,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2886 | } | 2892 | } |
2887 | 2893 | ||
2888 | re_arm: | 2894 | re_arm: |
2889 | if (bond->params.arp_interval) | 2895 | if (bond->params.arp_interval && !bond->kill_timers) |
2890 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 2896 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
2891 | out: | 2897 | out: |
2892 | read_unlock(&bond->lock); | 2898 | read_unlock(&bond->lock); |
@@ -3154,7 +3160,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
3154 | bond_ab_arp_probe(bond); | 3160 | bond_ab_arp_probe(bond); |
3155 | 3161 | ||
3156 | re_arm: | 3162 | re_arm: |
3157 | if (bond->params.arp_interval) | 3163 | if (bond->params.arp_interval && !bond->kill_timers) |
3158 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 3164 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
3159 | out: | 3165 | out: |
3160 | read_unlock(&bond->lock); | 3166 | read_unlock(&bond->lock); |
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 92feac68b66e..4cc6f44c2ba2 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c | |||
@@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
261 | void __iomem *data = ®s->tx.dsr1_0; | 261 | void __iomem *data = ®s->tx.dsr1_0; |
262 | u16 *payload = (u16 *)frame->data; | 262 | u16 *payload = (u16 *)frame->data; |
263 | 263 | ||
264 | /* It is safe to write into dsr[dlc+1] */ | 264 | for (i = 0; i < frame->can_dlc / 2; i++) { |
265 | for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { | ||
266 | out_be16(data, *payload++); | 265 | out_be16(data, *payload++); |
267 | data += 2 + _MSCAN_RESERVED_DSR_SIZE; | 266 | data += 2 + _MSCAN_RESERVED_DSR_SIZE; |
268 | } | 267 | } |
268 | /* write remaining byte if necessary */ | ||
269 | if (frame->can_dlc & 1) | ||
270 | out_8(data, frame->data[frame->can_dlc - 1]); | ||
269 | } | 271 | } |
270 | 272 | ||
271 | out_8(®s->tx.dlr, frame->can_dlc); | 273 | out_8(®s->tx.dlr, frame->can_dlc); |
@@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) | |||
330 | void __iomem *data = ®s->rx.dsr1_0; | 332 | void __iomem *data = ®s->rx.dsr1_0; |
331 | u16 *payload = (u16 *)frame->data; | 333 | u16 *payload = (u16 *)frame->data; |
332 | 334 | ||
333 | for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { | 335 | for (i = 0; i < frame->can_dlc / 2; i++) { |
334 | *payload++ = in_be16(data); | 336 | *payload++ = in_be16(data); |
335 | data += 2 + _MSCAN_RESERVED_DSR_SIZE; | 337 | data += 2 + _MSCAN_RESERVED_DSR_SIZE; |
336 | } | 338 | } |
339 | /* read remaining byte if necessary */ | ||
340 | if (frame->can_dlc & 1) | ||
341 | frame->data[frame->can_dlc - 1] = in_8(data); | ||
337 | } | 342 | } |
338 | 343 | ||
339 | out_8(®s->canrflg, MSCAN_RXF); | 344 | out_8(®s->canrflg, MSCAN_RXF); |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 805076c54f1b..da5a5d9b8aff 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) | |||
1146 | if (te && te->ctx && te->client && te->client->redirect) { | 1146 | if (te && te->ctx && te->client && te->client->redirect) { |
1147 | update_tcb = te->client->redirect(te->ctx, old, new, e); | 1147 | update_tcb = te->client->redirect(te->ctx, old, new, e); |
1148 | if (update_tcb) { | 1148 | if (update_tcb) { |
1149 | rcu_read_lock(); | ||
1149 | l2t_hold(L2DATA(tdev), e); | 1150 | l2t_hold(L2DATA(tdev), e); |
1151 | rcu_read_unlock(); | ||
1150 | set_l2t_ix(tdev, tid, e); | 1152 | set_l2t_ix(tdev, tid, e); |
1151 | } | 1153 | } |
1152 | } | 1154 | } |
1153 | } | 1155 | } |
1154 | l2t_release(L2DATA(tdev), e); | 1156 | l2t_release(tdev, e); |
1155 | } | 1157 | } |
1156 | 1158 | ||
1157 | /* | 1159 | /* |
@@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter) | |||
1264 | goto out_free; | 1266 | goto out_free; |
1265 | 1267 | ||
1266 | err = -ENOMEM; | 1268 | err = -ENOMEM; |
1267 | L2DATA(dev) = t3_init_l2t(l2t_capacity); | 1269 | RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); |
1268 | if (!L2DATA(dev)) | 1270 | if (!L2DATA(dev)) |
1269 | goto out_free; | 1271 | goto out_free; |
1270 | 1272 | ||
@@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter) | |||
1298 | 1300 | ||
1299 | out_free_l2t: | 1301 | out_free_l2t: |
1300 | t3_free_l2t(L2DATA(dev)); | 1302 | t3_free_l2t(L2DATA(dev)); |
1301 | L2DATA(dev) = NULL; | 1303 | rcu_assign_pointer(dev->l2opt, NULL); |
1302 | out_free: | 1304 | out_free: |
1303 | kfree(t); | 1305 | kfree(t); |
1304 | return err; | 1306 | return err; |
1305 | } | 1307 | } |
1306 | 1308 | ||
1309 | static void clean_l2_data(struct rcu_head *head) | ||
1310 | { | ||
1311 | struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); | ||
1312 | t3_free_l2t(d); | ||
1313 | } | ||
1314 | |||
1315 | |||
1307 | void cxgb3_offload_deactivate(struct adapter *adapter) | 1316 | void cxgb3_offload_deactivate(struct adapter *adapter) |
1308 | { | 1317 | { |
1309 | struct t3cdev *tdev = &adapter->tdev; | 1318 | struct t3cdev *tdev = &adapter->tdev; |
1310 | struct t3c_data *t = T3C_DATA(tdev); | 1319 | struct t3c_data *t = T3C_DATA(tdev); |
1320 | struct l2t_data *d; | ||
1311 | 1321 | ||
1312 | remove_adapter(adapter); | 1322 | remove_adapter(adapter); |
1313 | if (list_empty(&adapter_list)) | 1323 | if (list_empty(&adapter_list)) |
@@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter) | |||
1315 | 1325 | ||
1316 | free_tid_maps(&t->tid_maps); | 1326 | free_tid_maps(&t->tid_maps); |
1317 | T3C_DATA(tdev) = NULL; | 1327 | T3C_DATA(tdev) = NULL; |
1318 | t3_free_l2t(L2DATA(tdev)); | 1328 | rcu_read_lock(); |
1319 | L2DATA(tdev) = NULL; | 1329 | d = L2DATA(tdev); |
1330 | rcu_read_unlock(); | ||
1331 | rcu_assign_pointer(tdev->l2opt, NULL); | ||
1332 | call_rcu(&d->rcu_head, clean_l2_data); | ||
1320 | if (t->nofail_skb) | 1333 | if (t->nofail_skb) |
1321 | kfree_skb(t->nofail_skb); | 1334 | kfree_skb(t->nofail_skb); |
1322 | kfree(t); | 1335 | kfree(t); |
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c index f452c4003253..41540978a173 100644 --- a/drivers/net/cxgb3/l2t.c +++ b/drivers/net/cxgb3/l2t.c | |||
@@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) | |||
300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | 300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, |
301 | struct net_device *dev) | 301 | struct net_device *dev) |
302 | { | 302 | { |
303 | struct l2t_entry *e; | 303 | struct l2t_entry *e = NULL; |
304 | struct l2t_data *d = L2DATA(cdev); | 304 | struct l2t_data *d; |
305 | int hash; | ||
305 | u32 addr = *(u32 *) neigh->primary_key; | 306 | u32 addr = *(u32 *) neigh->primary_key; |
306 | int ifidx = neigh->dev->ifindex; | 307 | int ifidx = neigh->dev->ifindex; |
307 | int hash = arp_hash(addr, ifidx, d); | ||
308 | struct port_info *p = netdev_priv(dev); | 308 | struct port_info *p = netdev_priv(dev); |
309 | int smt_idx = p->port_id; | 309 | int smt_idx = p->port_id; |
310 | 310 | ||
311 | rcu_read_lock(); | ||
312 | d = L2DATA(cdev); | ||
313 | if (!d) | ||
314 | goto done_rcu; | ||
315 | |||
316 | hash = arp_hash(addr, ifidx, d); | ||
317 | |||
311 | write_lock_bh(&d->lock); | 318 | write_lock_bh(&d->lock); |
312 | for (e = d->l2tab[hash].first; e; e = e->next) | 319 | for (e = d->l2tab[hash].first; e; e = e->next) |
313 | if (e->addr == addr && e->ifindex == ifidx && | 320 | if (e->addr == addr && e->ifindex == ifidx && |
@@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | |||
338 | } | 345 | } |
339 | done: | 346 | done: |
340 | write_unlock_bh(&d->lock); | 347 | write_unlock_bh(&d->lock); |
348 | done_rcu: | ||
349 | rcu_read_unlock(); | ||
341 | return e; | 350 | return e; |
342 | } | 351 | } |
343 | 352 | ||
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h index 7a12d52ed4fc..c5f54796e2cb 100644 --- a/drivers/net/cxgb3/l2t.h +++ b/drivers/net/cxgb3/l2t.h | |||
@@ -76,6 +76,7 @@ struct l2t_data { | |||
76 | atomic_t nfree; /* number of free entries */ | 76 | atomic_t nfree; /* number of free entries */ |
77 | rwlock_t lock; | 77 | rwlock_t lock; |
78 | struct l2t_entry l2tab[0]; | 78 | struct l2t_entry l2tab[0]; |
79 | struct rcu_head rcu_head; /* to handle rcu cleanup */ | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | typedef void (*arp_failure_handler_func)(struct t3cdev * dev, | 82 | typedef void (*arp_failure_handler_func)(struct t3cdev * dev, |
@@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb, | |||
99 | /* | 100 | /* |
100 | * Getting to the L2 data from an offload device. | 101 | * Getting to the L2 data from an offload device. |
101 | */ | 102 | */ |
102 | #define L2DATA(dev) ((dev)->l2opt) | 103 | #define L2DATA(cdev) (rcu_dereference((cdev)->l2opt)) |
103 | 104 | ||
104 | #define W_TCB_L2T_IX 0 | 105 | #define W_TCB_L2T_IX 0 |
105 | #define S_TCB_L2T_IX 7 | 106 | #define S_TCB_L2T_IX 7 |
@@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, | |||
126 | return t3_l2t_send_slow(dev, skb, e); | 127 | return t3_l2t_send_slow(dev, skb, e); |
127 | } | 128 | } |
128 | 129 | ||
129 | static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) | 130 | static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e) |
130 | { | 131 | { |
131 | if (atomic_dec_and_test(&e->refcnt)) | 132 | struct l2t_data *d; |
133 | |||
134 | rcu_read_lock(); | ||
135 | d = L2DATA(t); | ||
136 | |||
137 | if (atomic_dec_and_test(&e->refcnt) && d) | ||
132 | t3_l2e_free(d, e); | 138 | t3_l2e_free(d, e); |
139 | |||
140 | rcu_read_unlock(); | ||
133 | } | 141 | } |
134 | 142 | ||
135 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) | 143 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) |
136 | { | 144 | { |
137 | if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ | 145 | if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ |
138 | atomic_dec(&d->nfree); | 146 | atomic_dec(&d->nfree); |
139 | } | 147 | } |
140 | 148 | ||
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index c9957b7f17b5..b4efa292fd6f 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c | |||
@@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3712 | setup_debugfs(adapter); | 3712 | setup_debugfs(adapter); |
3713 | } | 3713 | } |
3714 | 3714 | ||
3715 | /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ | ||
3716 | pdev->needs_freset = 1; | ||
3717 | |||
3715 | if (is_offload(adapter)) | 3718 | if (is_offload(adapter)) |
3716 | attach_ulds(adapter); | 3719 | attach_ulds(adapter); |
3717 | 3720 | ||
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 8dd5fccef725..d393f1e764ed 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
636 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", | 636 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", |
637 | netdev->irq, rc); | 637 | netdev->irq, rc); |
638 | do { | 638 | do { |
639 | rc = h_free_logical_lan(adapter->vdev->unit_address); | 639 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); |
640 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | 640 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); |
641 | 641 | ||
642 | goto err_out; | 642 | goto err_out; |
643 | } | 643 | } |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 05172c39a0ce..376e3e94bae0 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) | |||
239 | dest = macvlan_hash_lookup(port, eth->h_dest); | 239 | dest = macvlan_hash_lookup(port, eth->h_dest); |
240 | if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { | 240 | if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { |
241 | /* send to lowerdev first for its network taps */ | 241 | /* send to lowerdev first for its network taps */ |
242 | vlan->forward(vlan->lowerdev, skb); | 242 | dev_forward_skb(vlan->lowerdev, skb); |
243 | 243 | ||
244 | return NET_XMIT_SUCCESS; | 244 | return NET_XMIT_SUCCESS; |
245 | } | 245 | } |
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 6e03de034ac7..f76ab6bf3096 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | |||
172 | memset(ring->buf, 0, ring->buf_size); | 172 | memset(ring->buf, 0, ring->buf_size); |
173 | 173 | ||
174 | ring->qp_state = MLX4_QP_STATE_RST; | 174 | ring->qp_state = MLX4_QP_STATE_RST; |
175 | ring->doorbell_qpn = swab32(ring->qp.qpn << 8); | 175 | ring->doorbell_qpn = ring->qp.qpn << 8; |
176 | 176 | ||
177 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, | 177 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, |
178 | ring->cqn, &ring->context); | 178 | ring->cqn, &ring->context); |
@@ -791,7 +791,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
791 | skb_orphan(skb); | 791 | skb_orphan(skb); |
792 | 792 | ||
793 | if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { | 793 | if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { |
794 | *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; | 794 | *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); |
795 | op_own |= htonl((bf_index & 0xffff) << 8); | 795 | op_own |= htonl((bf_index & 0xffff) << 8); |
796 | /* Ensure new descirptor hits memory | 796 | /* Ensure new descirptor hits memory |
797 | * before setting ownership of this descriptor to HW */ | 797 | * before setting ownership of this descriptor to HW */ |
@@ -812,7 +812,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
812 | wmb(); | 812 | wmb(); |
813 | tx_desc->ctrl.owner_opcode = op_own; | 813 | tx_desc->ctrl.owner_opcode = op_own; |
814 | wmb(); | 814 | wmb(); |
815 | writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); | 815 | iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); |
816 | } | 816 | } |
817 | 817 | ||
818 | /* Poll CQ here */ | 818 | /* Poll CQ here */ |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ed2a3977c6e7..e8882023576b 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
@@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt, | |||
307 | return err; | 307 | return err; |
308 | if (enabled < 0 || enabled > 1) | 308 | if (enabled < 0 || enabled > 1) |
309 | return -EINVAL; | 309 | return -EINVAL; |
310 | if (enabled == nt->enabled) { | ||
311 | printk(KERN_INFO "netconsole: network logging has already %s\n", | ||
312 | nt->enabled ? "started" : "stopped"); | ||
313 | return -EINVAL; | ||
314 | } | ||
310 | 315 | ||
311 | if (enabled) { /* 1 */ | 316 | if (enabled) { /* 1 */ |
312 | 317 | ||
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 567ff10889be..b8b4ba27b0e7 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -1199,6 +1199,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1199 | iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), | 1199 | iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), |
1200 | &hw->reg->INT_EN); | 1200 | &hw->reg->INT_EN); |
1201 | pch_gbe_stop_receive(adapter); | 1201 | pch_gbe_stop_receive(adapter); |
1202 | int_st |= ioread32(&hw->reg->INT_ST); | ||
1203 | int_st = int_st & ioread32(&hw->reg->INT_EN); | ||
1202 | } | 1204 | } |
1203 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) | 1205 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) |
1204 | adapter->stats.intr_rx_dma_err_count++; | 1206 | adapter->stats.intr_rx_dma_err_count++; |
@@ -1218,14 +1220,11 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1218 | /* Set Pause packet */ | 1220 | /* Set Pause packet */ |
1219 | pch_gbe_mac_set_pause_packet(hw); | 1221 | pch_gbe_mac_set_pause_packet(hw); |
1220 | } | 1222 | } |
1221 | if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) | ||
1222 | == 0) { | ||
1223 | return IRQ_HANDLED; | ||
1224 | } | ||
1225 | } | 1223 | } |
1226 | 1224 | ||
1227 | /* When request status is Receive interruption */ | 1225 | /* When request status is Receive interruption */ |
1228 | if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { | 1226 | if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || |
1227 | (adapter->rx_stop_flag == true)) { | ||
1229 | if (likely(napi_schedule_prep(&adapter->napi))) { | 1228 | if (likely(napi_schedule_prep(&adapter->napi))) { |
1230 | /* Enable only Rx Descriptor empty */ | 1229 | /* Enable only Rx Descriptor empty */ |
1231 | atomic_inc(&adapter->irq_sem); | 1230 | atomic_inc(&adapter->irq_sem); |
@@ -1385,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1385 | struct sk_buff *skb; | 1384 | struct sk_buff *skb; |
1386 | unsigned int i; | 1385 | unsigned int i; |
1387 | unsigned int cleaned_count = 0; | 1386 | unsigned int cleaned_count = 0; |
1388 | bool cleaned = false; | 1387 | bool cleaned = true; |
1389 | 1388 | ||
1390 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); | 1389 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); |
1391 | 1390 | ||
@@ -1396,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1396 | 1395 | ||
1397 | while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { | 1396 | while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { |
1398 | pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); | 1397 | pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); |
1399 | cleaned = true; | ||
1400 | buffer_info = &tx_ring->buffer_info[i]; | 1398 | buffer_info = &tx_ring->buffer_info[i]; |
1401 | skb = buffer_info->skb; | 1399 | skb = buffer_info->skb; |
1402 | 1400 | ||
@@ -1439,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1439 | tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); | 1437 | tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); |
1440 | 1438 | ||
1441 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | 1439 | /* weight of a sort for tx, to avoid endless transmit cleanup */ |
1442 | if (cleaned_count++ == PCH_GBE_TX_WEIGHT) | 1440 | if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { |
1441 | cleaned = false; | ||
1443 | break; | 1442 | break; |
1443 | } | ||
1444 | } | 1444 | } |
1445 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", | 1445 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", |
1446 | cleaned_count); | 1446 | cleaned_count); |
@@ -2168,7 +2168,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2168 | { | 2168 | { |
2169 | struct pch_gbe_adapter *adapter = | 2169 | struct pch_gbe_adapter *adapter = |
2170 | container_of(napi, struct pch_gbe_adapter, napi); | 2170 | container_of(napi, struct pch_gbe_adapter, napi); |
2171 | struct net_device *netdev = adapter->netdev; | ||
2172 | int work_done = 0; | 2171 | int work_done = 0; |
2173 | bool poll_end_flag = false; | 2172 | bool poll_end_flag = false; |
2174 | bool cleaned = false; | 2173 | bool cleaned = false; |
@@ -2176,33 +2175,32 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2176 | 2175 | ||
2177 | pr_debug("budget : %d\n", budget); | 2176 | pr_debug("budget : %d\n", budget); |
2178 | 2177 | ||
2179 | /* Keep link state information with original netdev */ | 2178 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); |
2180 | if (!netif_carrier_ok(netdev)) { | 2179 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); |
2180 | |||
2181 | if (!cleaned) | ||
2182 | work_done = budget; | ||
2183 | /* If no Tx and not enough Rx work done, | ||
2184 | * exit the polling mode | ||
2185 | */ | ||
2186 | if (work_done < budget) | ||
2181 | poll_end_flag = true; | 2187 | poll_end_flag = true; |
2182 | } else { | 2188 | |
2183 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); | 2189 | if (poll_end_flag) { |
2190 | napi_complete(napi); | ||
2191 | if (adapter->rx_stop_flag) { | ||
2192 | adapter->rx_stop_flag = false; | ||
2193 | pch_gbe_start_receive(&adapter->hw); | ||
2194 | } | ||
2195 | pch_gbe_irq_enable(adapter); | ||
2196 | } else | ||
2184 | if (adapter->rx_stop_flag) { | 2197 | if (adapter->rx_stop_flag) { |
2185 | adapter->rx_stop_flag = false; | 2198 | adapter->rx_stop_flag = false; |
2186 | pch_gbe_start_receive(&adapter->hw); | 2199 | pch_gbe_start_receive(&adapter->hw); |
2187 | int_en = ioread32(&adapter->hw.reg->INT_EN); | 2200 | int_en = ioread32(&adapter->hw.reg->INT_EN); |
2188 | iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), | 2201 | iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), |
2189 | &adapter->hw.reg->INT_EN); | 2202 | &adapter->hw.reg->INT_EN); |
2190 | } | 2203 | } |
2191 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); | ||
2192 | |||
2193 | if (cleaned) | ||
2194 | work_done = budget; | ||
2195 | /* If no Tx and not enough Rx work done, | ||
2196 | * exit the polling mode | ||
2197 | */ | ||
2198 | if ((work_done < budget) || !netif_running(netdev)) | ||
2199 | poll_end_flag = true; | ||
2200 | } | ||
2201 | |||
2202 | if (poll_end_flag) { | ||
2203 | napi_complete(napi); | ||
2204 | pch_gbe_irq_enable(adapter); | ||
2205 | } | ||
2206 | 2204 | ||
2207 | pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", | 2205 | pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", |
2208 | poll_end_flag, work_done, budget); | 2206 | poll_end_flag, work_done, budget); |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index cb6e0b486b1e..edd7304773eb 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640, | |||
589 | prune_rx_ts(dp83640); | 589 | prune_rx_ts(dp83640); |
590 | 590 | ||
591 | if (list_empty(&dp83640->rxpool)) { | 591 | if (list_empty(&dp83640->rxpool)) { |
592 | pr_warning("dp83640: rx timestamp pool is empty\n"); | 592 | pr_debug("dp83640: rx timestamp pool is empty\n"); |
593 | goto out; | 593 | goto out; |
594 | } | 594 | } |
595 | rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); | 595 | rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); |
@@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
612 | skb = skb_dequeue(&dp83640->tx_queue); | 612 | skb = skb_dequeue(&dp83640->tx_queue); |
613 | 613 | ||
614 | if (!skb) { | 614 | if (!skb) { |
615 | pr_warning("dp83640: have timestamp but tx_queue empty\n"); | 615 | pr_debug("dp83640: have timestamp but tx_queue empty\n"); |
616 | return; | 616 | return; |
617 | } | 617 | } |
618 | ns = phy2txts(phy_txts); | 618 | ns = phy2txts(phy_txts); |
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c index eae542a7e987..89f829f5f725 100644 --- a/drivers/net/pptp.c +++ b/drivers/net/pptp.c | |||
@@ -285,8 +285,10 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
285 | ip_send_check(iph); | 285 | ip_send_check(iph); |
286 | 286 | ||
287 | ip_local_out(skb); | 287 | ip_local_out(skb); |
288 | return 1; | ||
288 | 289 | ||
289 | tx_error: | 290 | tx_error: |
291 | kfree_skb(skb); | ||
290 | return 1; | 292 | return 1; |
291 | } | 293 | } |
292 | 294 | ||
@@ -305,11 +307,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) | |||
305 | } | 307 | } |
306 | 308 | ||
307 | header = (struct pptp_gre_header *)(skb->data); | 309 | header = (struct pptp_gre_header *)(skb->data); |
310 | headersize = sizeof(*header); | ||
308 | 311 | ||
309 | /* test if acknowledgement present */ | 312 | /* test if acknowledgement present */ |
310 | if (PPTP_GRE_IS_A(header->ver)) { | 313 | if (PPTP_GRE_IS_A(header->ver)) { |
311 | __u32 ack = (PPTP_GRE_IS_S(header->flags)) ? | 314 | __u32 ack; |
312 | header->ack : header->seq; /* ack in different place if S = 0 */ | 315 | |
316 | if (!pskb_may_pull(skb, headersize)) | ||
317 | goto drop; | ||
318 | header = (struct pptp_gre_header *)(skb->data); | ||
319 | |||
320 | /* ack in different place if S = 0 */ | ||
321 | ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq; | ||
313 | 322 | ||
314 | ack = ntohl(ack); | 323 | ack = ntohl(ack); |
315 | 324 | ||
@@ -318,21 +327,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) | |||
318 | /* also handle sequence number wrap-around */ | 327 | /* also handle sequence number wrap-around */ |
319 | if (WRAPPED(ack, opt->ack_recv)) | 328 | if (WRAPPED(ack, opt->ack_recv)) |
320 | opt->ack_recv = ack; | 329 | opt->ack_recv = ack; |
330 | } else { | ||
331 | headersize -= sizeof(header->ack); | ||
321 | } | 332 | } |
322 | |||
323 | /* test if payload present */ | 333 | /* test if payload present */ |
324 | if (!PPTP_GRE_IS_S(header->flags)) | 334 | if (!PPTP_GRE_IS_S(header->flags)) |
325 | goto drop; | 335 | goto drop; |
326 | 336 | ||
327 | headersize = sizeof(*header); | ||
328 | payload_len = ntohs(header->payload_len); | 337 | payload_len = ntohs(header->payload_len); |
329 | seq = ntohl(header->seq); | 338 | seq = ntohl(header->seq); |
330 | 339 | ||
331 | /* no ack present? */ | ||
332 | if (!PPTP_GRE_IS_A(header->ver)) | ||
333 | headersize -= sizeof(header->ack); | ||
334 | /* check for incomplete packet (length smaller than expected) */ | 340 | /* check for incomplete packet (length smaller than expected) */ |
335 | if (skb->len - headersize < payload_len) | 341 | if (!pskb_may_pull(skb, headersize + payload_len)) |
336 | goto drop; | 342 | goto drop; |
337 | 343 | ||
338 | payload = skb->data + headersize; | 344 | payload = skb->data + headersize; |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index c23667017922..6d657cabb951 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2859,7 +2859,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) | |||
2859 | rtl_writephy(tp, 0x1f, 0x0004); | 2859 | rtl_writephy(tp, 0x1f, 0x0004); |
2860 | rtl_writephy(tp, 0x1f, 0x0007); | 2860 | rtl_writephy(tp, 0x1f, 0x0007); |
2861 | rtl_writephy(tp, 0x1e, 0x0020); | 2861 | rtl_writephy(tp, 0x1e, 0x0020); |
2862 | rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); | 2862 | rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); |
2863 | rtl_writephy(tp, 0x1f, 0x0002); | 2863 | rtl_writephy(tp, 0x1f, 0x0002); |
2864 | rtl_writephy(tp, 0x1f, 0x0000); | 2864 | rtl_writephy(tp, 0x1f, 0x0000); |
2865 | rtl_writephy(tp, 0x0d, 0x0007); | 2865 | rtl_writephy(tp, 0x0d, 0x0007); |
@@ -3316,6 +3316,37 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) | |||
3316 | } | 3316 | } |
3317 | } | 3317 | } |
3318 | 3318 | ||
3319 | static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) | ||
3320 | { | ||
3321 | void __iomem *ioaddr = tp->mmio_addr; | ||
3322 | |||
3323 | switch (tp->mac_version) { | ||
3324 | case RTL_GIGA_MAC_VER_29: | ||
3325 | case RTL_GIGA_MAC_VER_30: | ||
3326 | case RTL_GIGA_MAC_VER_32: | ||
3327 | case RTL_GIGA_MAC_VER_33: | ||
3328 | case RTL_GIGA_MAC_VER_34: | ||
3329 | RTL_W32(RxConfig, RTL_R32(RxConfig) | | ||
3330 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); | ||
3331 | break; | ||
3332 | default: | ||
3333 | break; | ||
3334 | } | ||
3335 | } | ||
3336 | |||
3337 | static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) | ||
3338 | { | ||
3339 | if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) | ||
3340 | return false; | ||
3341 | |||
3342 | rtl_writephy(tp, 0x1f, 0x0000); | ||
3343 | rtl_writephy(tp, MII_BMCR, 0x0000); | ||
3344 | |||
3345 | rtl_wol_suspend_quirk(tp); | ||
3346 | |||
3347 | return true; | ||
3348 | } | ||
3349 | |||
3319 | static void r810x_phy_power_down(struct rtl8169_private *tp) | 3350 | static void r810x_phy_power_down(struct rtl8169_private *tp) |
3320 | { | 3351 | { |
3321 | rtl_writephy(tp, 0x1f, 0x0000); | 3352 | rtl_writephy(tp, 0x1f, 0x0000); |
@@ -3330,18 +3361,8 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) | |||
3330 | 3361 | ||
3331 | static void r810x_pll_power_down(struct rtl8169_private *tp) | 3362 | static void r810x_pll_power_down(struct rtl8169_private *tp) |
3332 | { | 3363 | { |
3333 | void __iomem *ioaddr = tp->mmio_addr; | 3364 | if (rtl_wol_pll_power_down(tp)) |
3334 | |||
3335 | if (__rtl8169_get_wol(tp) & WAKE_ANY) { | ||
3336 | rtl_writephy(tp, 0x1f, 0x0000); | ||
3337 | rtl_writephy(tp, MII_BMCR, 0x0000); | ||
3338 | |||
3339 | if (tp->mac_version == RTL_GIGA_MAC_VER_29 || | ||
3340 | tp->mac_version == RTL_GIGA_MAC_VER_30) | ||
3341 | RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | | ||
3342 | AcceptMulticast | AcceptMyPhys); | ||
3343 | return; | 3365 | return; |
3344 | } | ||
3345 | 3366 | ||
3346 | r810x_phy_power_down(tp); | 3367 | r810x_phy_power_down(tp); |
3347 | } | 3368 | } |
@@ -3430,17 +3451,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
3430 | tp->mac_version == RTL_GIGA_MAC_VER_33) | 3451 | tp->mac_version == RTL_GIGA_MAC_VER_33) |
3431 | rtl_ephy_write(ioaddr, 0x19, 0xff64); | 3452 | rtl_ephy_write(ioaddr, 0x19, 0xff64); |
3432 | 3453 | ||
3433 | if (__rtl8169_get_wol(tp) & WAKE_ANY) { | 3454 | if (rtl_wol_pll_power_down(tp)) |
3434 | rtl_writephy(tp, 0x1f, 0x0000); | ||
3435 | rtl_writephy(tp, MII_BMCR, 0x0000); | ||
3436 | |||
3437 | if (tp->mac_version == RTL_GIGA_MAC_VER_32 || | ||
3438 | tp->mac_version == RTL_GIGA_MAC_VER_33 || | ||
3439 | tp->mac_version == RTL_GIGA_MAC_VER_34) | ||
3440 | RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | | ||
3441 | AcceptMulticast | AcceptMyPhys); | ||
3442 | return; | 3455 | return; |
3443 | } | ||
3444 | 3456 | ||
3445 | r8168_phy_power_down(tp); | 3457 | r8168_phy_power_down(tp); |
3446 | 3458 | ||
@@ -5788,11 +5800,30 @@ static const struct dev_pm_ops rtl8169_pm_ops = { | |||
5788 | 5800 | ||
5789 | #endif /* !CONFIG_PM */ | 5801 | #endif /* !CONFIG_PM */ |
5790 | 5802 | ||
5803 | static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) | ||
5804 | { | ||
5805 | void __iomem *ioaddr = tp->mmio_addr; | ||
5806 | |||
5807 | /* WoL fails with 8168b when the receiver is disabled. */ | ||
5808 | switch (tp->mac_version) { | ||
5809 | case RTL_GIGA_MAC_VER_11: | ||
5810 | case RTL_GIGA_MAC_VER_12: | ||
5811 | case RTL_GIGA_MAC_VER_17: | ||
5812 | pci_clear_master(tp->pci_dev); | ||
5813 | |||
5814 | RTL_W8(ChipCmd, CmdRxEnb); | ||
5815 | /* PCI commit */ | ||
5816 | RTL_R8(ChipCmd); | ||
5817 | break; | ||
5818 | default: | ||
5819 | break; | ||
5820 | } | ||
5821 | } | ||
5822 | |||
5791 | static void rtl_shutdown(struct pci_dev *pdev) | 5823 | static void rtl_shutdown(struct pci_dev *pdev) |
5792 | { | 5824 | { |
5793 | struct net_device *dev = pci_get_drvdata(pdev); | 5825 | struct net_device *dev = pci_get_drvdata(pdev); |
5794 | struct rtl8169_private *tp = netdev_priv(dev); | 5826 | struct rtl8169_private *tp = netdev_priv(dev); |
5795 | void __iomem *ioaddr = tp->mmio_addr; | ||
5796 | 5827 | ||
5797 | rtl8169_net_suspend(dev); | 5828 | rtl8169_net_suspend(dev); |
5798 | 5829 | ||
@@ -5806,16 +5837,9 @@ static void rtl_shutdown(struct pci_dev *pdev) | |||
5806 | spin_unlock_irq(&tp->lock); | 5837 | spin_unlock_irq(&tp->lock); |
5807 | 5838 | ||
5808 | if (system_state == SYSTEM_POWER_OFF) { | 5839 | if (system_state == SYSTEM_POWER_OFF) { |
5809 | /* WoL fails with 8168b when the receiver is disabled. */ | 5840 | if (__rtl8169_get_wol(tp) & WAKE_ANY) { |
5810 | if ((tp->mac_version == RTL_GIGA_MAC_VER_11 || | 5841 | rtl_wol_suspend_quirk(tp); |
5811 | tp->mac_version == RTL_GIGA_MAC_VER_12 || | 5842 | rtl_wol_shutdown_quirk(tp); |
5812 | tp->mac_version == RTL_GIGA_MAC_VER_17) && | ||
5813 | (tp->features & RTL_FEATURE_WOL)) { | ||
5814 | pci_clear_master(pdev); | ||
5815 | |||
5816 | RTL_W8(ChipCmd, CmdRxEnb); | ||
5817 | /* PCI commit */ | ||
5818 | RTL_R8(ChipCmd); | ||
5819 | } | 5843 | } |
5820 | 5844 | ||
5821 | pci_wake_from_d3(pdev, true); | 5845 | pci_wake_from_d3(pdev, true); |
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index b9016a30cdc5..c90ddb61cc56 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
@@ -26,6 +26,7 @@ | |||
26 | * LAN9215, LAN9216, LAN9217, LAN9218 | 26 | * LAN9215, LAN9216, LAN9217, LAN9218 |
27 | * LAN9210, LAN9211 | 27 | * LAN9210, LAN9211 |
28 | * LAN9220, LAN9221 | 28 | * LAN9220, LAN9221 |
29 | * LAN89218 | ||
29 | * | 30 | * |
30 | */ | 31 | */ |
31 | 32 | ||
@@ -1983,6 +1984,7 @@ static int __devinit smsc911x_init(struct net_device *dev) | |||
1983 | case 0x01170000: | 1984 | case 0x01170000: |
1984 | case 0x01160000: | 1985 | case 0x01160000: |
1985 | case 0x01150000: | 1986 | case 0x01150000: |
1987 | case 0x218A0000: | ||
1986 | /* LAN911[5678] family */ | 1988 | /* LAN911[5678] family */ |
1987 | pdata->generation = pdata->idrev & 0x0000FFFF; | 1989 | pdata->generation = pdata->idrev & 0x0000FFFF; |
1988 | break; | 1990 | break; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 4a1374df6084..c11a2b8327f3 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -15577,7 +15577,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) | |||
15577 | 15577 | ||
15578 | cancel_work_sync(&tp->reset_task); | 15578 | cancel_work_sync(&tp->reset_task); |
15579 | 15579 | ||
15580 | if (!tg3_flag(tp, USE_PHYLIB)) { | 15580 | if (tg3_flag(tp, USE_PHYLIB)) { |
15581 | tg3_phy_fini(tp); | 15581 | tg3_phy_fini(tp); |
15582 | tg3_mdio_fini(tp); | 15582 | tg3_mdio_fini(tp); |
15583 | } | 15583 | } |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 2339728a7306..3e69c631ebb4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h | |||
@@ -1514,7 +1514,7 @@ static const u32 ar9300_2p2_mac_core[][2] = { | |||
1514 | {0x00008258, 0x00000000}, | 1514 | {0x00008258, 0x00000000}, |
1515 | {0x0000825c, 0x40000000}, | 1515 | {0x0000825c, 0x40000000}, |
1516 | {0x00008260, 0x00080922}, | 1516 | {0x00008260, 0x00080922}, |
1517 | {0x00008264, 0x9bc00010}, | 1517 | {0x00008264, 0x9d400010}, |
1518 | {0x00008268, 0xffffffff}, | 1518 | {0x00008268, 0xffffffff}, |
1519 | {0x0000826c, 0x0000ffff}, | 1519 | {0x0000826c, 0x0000ffff}, |
1520 | {0x00008270, 0x00000000}, | 1520 | {0x00008270, 0x00000000}, |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 9a4850154fb2..4c21f8cbdeb5 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -205,14 +205,22 @@ static void ath_rx_remove_buffer(struct ath_softc *sc, | |||
205 | 205 | ||
206 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | 206 | static void ath_rx_edma_cleanup(struct ath_softc *sc) |
207 | { | 207 | { |
208 | struct ath_hw *ah = sc->sc_ah; | ||
209 | struct ath_common *common = ath9k_hw_common(ah); | ||
208 | struct ath_buf *bf; | 210 | struct ath_buf *bf; |
209 | 211 | ||
210 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | 212 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); |
211 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | 213 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); |
212 | 214 | ||
213 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | 215 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
214 | if (bf->bf_mpdu) | 216 | if (bf->bf_mpdu) { |
217 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | ||
218 | common->rx_bufsize, | ||
219 | DMA_BIDIRECTIONAL); | ||
215 | dev_kfree_skb_any(bf->bf_mpdu); | 220 | dev_kfree_skb_any(bf->bf_mpdu); |
221 | bf->bf_buf_addr = 0; | ||
222 | bf->bf_mpdu = NULL; | ||
223 | } | ||
216 | } | 224 | } |
217 | 225 | ||
218 | INIT_LIST_HEAD(&sc->rx.rxbuf); | 226 | INIT_LIST_HEAD(&sc->rx.rxbuf); |
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index 35cd2537e7fd..e5971fe9d169 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c | |||
@@ -937,7 +937,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv) | |||
937 | &priv->contexts[IWL_RXON_CTX_BSS]); | 937 | &priv->contexts[IWL_RXON_CTX_BSS]); |
938 | #endif | 938 | #endif |
939 | 939 | ||
940 | wake_up_interruptible(&priv->wait_command_queue); | 940 | wake_up(&priv->wait_command_queue); |
941 | 941 | ||
942 | /* Keep the restart process from trying to send host | 942 | /* Keep the restart process from trying to send host |
943 | * commands by clearing the INIT status bit */ | 943 | * commands by clearing the INIT status bit */ |
@@ -1746,7 +1746,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external) | |||
1746 | 1746 | ||
1747 | /* Set the FW error flag -- cleared on iwl_down */ | 1747 | /* Set the FW error flag -- cleared on iwl_down */ |
1748 | set_bit(STATUS_FW_ERROR, &priv->status); | 1748 | set_bit(STATUS_FW_ERROR, &priv->status); |
1749 | wake_up_interruptible(&priv->wait_command_queue); | 1749 | wake_up(&priv->wait_command_queue); |
1750 | /* | 1750 | /* |
1751 | * Keep the restart process from trying to send host | 1751 | * Keep the restart process from trying to send host |
1752 | * commands by clearing the INIT status bit | 1752 | * commands by clearing the INIT status bit |
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c index 62b4b09122cb..ce1fc9feb61f 100644 --- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c +++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c | |||
@@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
167 | goto out; | 167 | goto out; |
168 | } | 168 | } |
169 | 169 | ||
170 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 170 | ret = wait_event_timeout(priv->wait_command_queue, |
171 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), | 171 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), |
172 | HOST_COMPLETE_TIMEOUT); | 172 | HOST_COMPLETE_TIMEOUT); |
173 | if (!ret) { | 173 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c index 4fff995c6f3e..ef9e268bf8a0 100644 --- a/drivers/net/wireless/iwlegacy/iwl-tx.c +++ b/drivers/net/wireless/iwlegacy/iwl-tx.c | |||
@@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
625 | cmd = txq->cmd[cmd_index]; | 625 | cmd = txq->cmd[cmd_index]; |
626 | meta = &txq->meta[cmd_index]; | 626 | meta = &txq->meta[cmd_index]; |
627 | 627 | ||
628 | txq->time_stamp = jiffies; | ||
629 | |||
628 | pci_unmap_single(priv->pci_dev, | 630 | pci_unmap_single(priv->pci_dev, |
629 | dma_unmap_addr(meta, mapping), | 631 | dma_unmap_addr(meta, mapping), |
630 | dma_unmap_len(meta, len), | 632 | dma_unmap_len(meta, len), |
@@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
645 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 647 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |
646 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", | 648 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", |
647 | iwl_legacy_get_cmd_string(cmd->hdr.cmd)); | 649 | iwl_legacy_get_cmd_string(cmd->hdr.cmd)); |
648 | wake_up_interruptible(&priv->wait_command_queue); | 650 | wake_up(&priv->wait_command_queue); |
649 | } | 651 | } |
650 | 652 | ||
651 | /* Mark as unmapped */ | 653 | /* Mark as unmapped */ |
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c index 795826a014ed..66ee15629a76 100644 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c | |||
@@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, | |||
841 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | 841 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, |
842 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | 842 | test_bit(STATUS_RF_KILL_HW, &priv->status)); |
843 | else | 843 | else |
844 | wake_up_interruptible(&priv->wait_command_queue); | 844 | wake_up(&priv->wait_command_queue); |
845 | } | 845 | } |
846 | 846 | ||
847 | /** | 847 | /** |
@@ -2269,7 +2269,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv) | |||
2269 | iwl3945_reg_txpower_periodic(priv); | 2269 | iwl3945_reg_txpower_periodic(priv); |
2270 | 2270 | ||
2271 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | 2271 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); |
2272 | wake_up_interruptible(&priv->wait_command_queue); | 2272 | wake_up(&priv->wait_command_queue); |
2273 | 2273 | ||
2274 | return; | 2274 | return; |
2275 | 2275 | ||
@@ -2300,7 +2300,7 @@ static void __iwl3945_down(struct iwl_priv *priv) | |||
2300 | iwl_legacy_clear_driver_stations(priv); | 2300 | iwl_legacy_clear_driver_stations(priv); |
2301 | 2301 | ||
2302 | /* Unblock any waiting calls */ | 2302 | /* Unblock any waiting calls */ |
2303 | wake_up_interruptible_all(&priv->wait_command_queue); | 2303 | wake_up_all(&priv->wait_command_queue); |
2304 | 2304 | ||
2305 | /* Wipe out the EXIT_PENDING status bit if we are not actually | 2305 | /* Wipe out the EXIT_PENDING status bit if we are not actually |
2306 | * exiting the module */ | 2306 | * exiting the module */ |
@@ -2853,7 +2853,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw) | |||
2853 | 2853 | ||
2854 | /* Wait for START_ALIVE from ucode. Otherwise callbacks from | 2854 | /* Wait for START_ALIVE from ucode. Otherwise callbacks from |
2855 | * mac80211 will not be run successfully. */ | 2855 | * mac80211 will not be run successfully. */ |
2856 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 2856 | ret = wait_event_timeout(priv->wait_command_queue, |
2857 | test_bit(STATUS_READY, &priv->status), | 2857 | test_bit(STATUS_READY, &priv->status), |
2858 | UCODE_READY_TIMEOUT); | 2858 | UCODE_READY_TIMEOUT); |
2859 | if (!ret) { | 2859 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c index 14334668034e..aa0c2539761e 100644 --- a/drivers/net/wireless/iwlegacy/iwl4965-base.c +++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c | |||
@@ -576,7 +576,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, | |||
576 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | 576 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, |
577 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | 577 | test_bit(STATUS_RF_KILL_HW, &priv->status)); |
578 | else | 578 | else |
579 | wake_up_interruptible(&priv->wait_command_queue); | 579 | wake_up(&priv->wait_command_queue); |
580 | } | 580 | } |
581 | 581 | ||
582 | /** | 582 | /** |
@@ -926,7 +926,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv) | |||
926 | handled |= CSR_INT_BIT_FH_TX; | 926 | handled |= CSR_INT_BIT_FH_TX; |
927 | /* Wake up uCode load routine, now that load is complete */ | 927 | /* Wake up uCode load routine, now that load is complete */ |
928 | priv->ucode_write_complete = 1; | 928 | priv->ucode_write_complete = 1; |
929 | wake_up_interruptible(&priv->wait_command_queue); | 929 | wake_up(&priv->wait_command_queue); |
930 | } | 930 | } |
931 | 931 | ||
932 | if (inta & ~handled) { | 932 | if (inta & ~handled) { |
@@ -1795,7 +1795,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv) | |||
1795 | iwl4965_rf_kill_ct_config(priv); | 1795 | iwl4965_rf_kill_ct_config(priv); |
1796 | 1796 | ||
1797 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | 1797 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); |
1798 | wake_up_interruptible(&priv->wait_command_queue); | 1798 | wake_up(&priv->wait_command_queue); |
1799 | 1799 | ||
1800 | iwl_legacy_power_update_mode(priv, true); | 1800 | iwl_legacy_power_update_mode(priv, true); |
1801 | IWL_DEBUG_INFO(priv, "Updated power mode\n"); | 1801 | IWL_DEBUG_INFO(priv, "Updated power mode\n"); |
@@ -1828,7 +1828,7 @@ static void __iwl4965_down(struct iwl_priv *priv) | |||
1828 | iwl_legacy_clear_driver_stations(priv); | 1828 | iwl_legacy_clear_driver_stations(priv); |
1829 | 1829 | ||
1830 | /* Unblock any waiting calls */ | 1830 | /* Unblock any waiting calls */ |
1831 | wake_up_interruptible_all(&priv->wait_command_queue); | 1831 | wake_up_all(&priv->wait_command_queue); |
1832 | 1832 | ||
1833 | /* Wipe out the EXIT_PENDING status bit if we are not actually | 1833 | /* Wipe out the EXIT_PENDING status bit if we are not actually |
1834 | * exiting the module */ | 1834 | * exiting the module */ |
@@ -2266,7 +2266,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw) | |||
2266 | 2266 | ||
2267 | /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from | 2267 | /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from |
2268 | * mac80211 will not be run successfully. */ | 2268 | * mac80211 will not be run successfully. */ |
2269 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 2269 | ret = wait_event_timeout(priv->wait_command_queue, |
2270 | test_bit(STATUS_READY, &priv->status), | 2270 | test_bit(STATUS_READY, &priv->status), |
2271 | UCODE_READY_TIMEOUT); | 2271 | UCODE_READY_TIMEOUT); |
2272 | if (!ret) { | 2272 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index dd6937e97055..77e528f5db88 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c | |||
@@ -405,31 +405,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, | |||
405 | 405 | ||
406 | mutex_lock(&priv->mutex); | 406 | mutex_lock(&priv->mutex); |
407 | 407 | ||
408 | if (test_bit(STATUS_SCANNING, &priv->status) && | ||
409 | priv->scan_type != IWL_SCAN_NORMAL) { | ||
410 | IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); | ||
411 | ret = -EAGAIN; | ||
412 | goto out_unlock; | ||
413 | } | ||
414 | |||
415 | /* mac80211 will only ask for one band at a time */ | ||
416 | priv->scan_request = req; | ||
417 | priv->scan_vif = vif; | ||
418 | |||
419 | /* | 408 | /* |
420 | * If an internal scan is in progress, just set | 409 | * If an internal scan is in progress, just set |
421 | * up the scan_request as per above. | 410 | * up the scan_request as per above. |
422 | */ | 411 | */ |
423 | if (priv->scan_type != IWL_SCAN_NORMAL) { | 412 | if (priv->scan_type != IWL_SCAN_NORMAL) { |
424 | IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); | 413 | IWL_DEBUG_SCAN(priv, |
414 | "SCAN request during internal scan - defer\n"); | ||
415 | priv->scan_request = req; | ||
416 | priv->scan_vif = vif; | ||
425 | ret = 0; | 417 | ret = 0; |
426 | } else | 418 | } else { |
419 | priv->scan_request = req; | ||
420 | priv->scan_vif = vif; | ||
421 | /* | ||
422 | * mac80211 will only ask for one band at a time | ||
423 | * so using channels[0] here is ok | ||
424 | */ | ||
427 | ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, | 425 | ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, |
428 | req->channels[0]->band); | 426 | req->channels[0]->band); |
427 | if (ret) { | ||
428 | priv->scan_request = NULL; | ||
429 | priv->scan_vif = NULL; | ||
430 | } | ||
431 | } | ||
429 | 432 | ||
430 | IWL_DEBUG_MAC80211(priv, "leave\n"); | 433 | IWL_DEBUG_MAC80211(priv, "leave\n"); |
431 | 434 | ||
432 | out_unlock: | ||
433 | mutex_unlock(&priv->mutex); | 435 | mutex_unlock(&priv->mutex); |
434 | 436 | ||
435 | return ret; | 437 | return ret; |
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 8b1cef0ffde6..4bf3cf457ef0 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c | |||
@@ -863,6 +863,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
863 | u8 tid = 0; | 863 | u8 tid = 0; |
864 | u16 seq_number = 0; | 864 | u16 seq_number = 0; |
865 | 865 | ||
866 | memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); | ||
866 | if (ieee80211_is_auth(fc)) { | 867 | if (ieee80211_is_auth(fc)) { |
867 | RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); | 868 | RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); |
868 | rtl_ips_nic_on(hw); | 869 | rtl_ips_nic_on(hw); |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 0ca86f9ec4ed..182562952c79 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
327 | xenvif_get(vif); | 327 | xenvif_get(vif); |
328 | 328 | ||
329 | rtnl_lock(); | 329 | rtnl_lock(); |
330 | if (netif_running(vif->dev)) | ||
331 | xenvif_up(vif); | ||
332 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | 330 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
333 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 331 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
334 | netdev_update_features(vif->dev); | 332 | netdev_update_features(vif->dev); |
335 | netif_carrier_on(vif->dev); | 333 | netif_carrier_on(vif->dev); |
334 | if (netif_running(vif->dev)) | ||
335 | xenvif_up(vif); | ||
336 | rtnl_unlock(); | 336 | rtnl_unlock(); |
337 | 337 | ||
338 | return 0; | 338 | return 0; |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4e84fd4a4312..e9651f0a8817 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | |||
77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | 77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | 78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; |
79 | 79 | ||
80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; | 80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * The default CLS is used if arch didn't set CLS explicitly and not | 83 | * The default CLS is used if arch didn't set CLS explicitly and not |
@@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str) | |||
3568 | pci_hotplug_io_size = memparse(str + 9, &str); | 3568 | pci_hotplug_io_size = memparse(str + 9, &str); |
3569 | } else if (!strncmp(str, "hpmemsize=", 10)) { | 3569 | } else if (!strncmp(str, "hpmemsize=", 10)) { |
3570 | pci_hotplug_mem_size = memparse(str + 10, &str); | 3570 | pci_hotplug_mem_size = memparse(str + 10, &str); |
3571 | } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { | ||
3572 | pcie_bus_config = PCIE_BUS_TUNE_OFF; | ||
3571 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { | 3573 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { |
3572 | pcie_bus_config = PCIE_BUS_SAFE; | 3574 | pcie_bus_config = PCIE_BUS_SAFE; |
3573 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { | 3575 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { |
3574 | pcie_bus_config = PCIE_BUS_PERFORMANCE; | 3576 | pcie_bus_config = PCIE_BUS_PERFORMANCE; |
3577 | } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { | ||
3578 | pcie_bus_config = PCIE_BUS_PEER2PEER; | ||
3575 | } else { | 3579 | } else { |
3576 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 3580 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
3577 | str); | 3581 | str); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f3f94a5c068f..6ab6bd3df4b2 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -1458,12 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) | |||
1458 | */ | 1458 | */ |
1459 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) | 1459 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) |
1460 | { | 1460 | { |
1461 | u8 smpss = mpss; | 1461 | u8 smpss; |
1462 | 1462 | ||
1463 | if (!pci_is_pcie(bus->self)) | 1463 | if (!pci_is_pcie(bus->self)) |
1464 | return; | 1464 | return; |
1465 | 1465 | ||
1466 | if (pcie_bus_config == PCIE_BUS_TUNE_OFF) | ||
1467 | return; | ||
1468 | |||
1469 | /* FIXME - Peer to peer DMA is possible, though the endpoint would need | ||
1470 | * to be aware to the MPS of the destination. To work around this, | ||
1471 | * simply force the MPS of the entire system to the smallest possible. | ||
1472 | */ | ||
1473 | if (pcie_bus_config == PCIE_BUS_PEER2PEER) | ||
1474 | smpss = 0; | ||
1475 | |||
1466 | if (pcie_bus_config == PCIE_BUS_SAFE) { | 1476 | if (pcie_bus_config == PCIE_BUS_SAFE) { |
1477 | smpss = mpss; | ||
1478 | |||
1467 | pcie_find_smpss(bus->self, &smpss); | 1479 | pcie_find_smpss(bus->self, &smpss); |
1468 | pci_walk_bus(bus, pcie_find_smpss, &smpss); | 1480 | pci_walk_bus(bus, pcie_find_smpss, &smpss); |
1469 | } | 1481 | } |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index cbde448f9947..eb3140ee821e 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv; | |||
654 | static int console_subchannel_in_use; | 654 | static int console_subchannel_in_use; |
655 | 655 | ||
656 | /* | 656 | /* |
657 | * Use tpi to get a pending interrupt, call the interrupt handler and | 657 | * Use cio_tpi to get a pending interrupt and call the interrupt handler. |
658 | * return a pointer to the subchannel structure. | 658 | * Return non-zero if an interrupt was processed, zero otherwise. |
659 | */ | 659 | */ |
660 | static int cio_tpi(void) | 660 | static int cio_tpi(void) |
661 | { | 661 | { |
@@ -667,6 +667,10 @@ static int cio_tpi(void) | |||
667 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; | 667 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; |
668 | if (tpi(NULL) != 1) | 668 | if (tpi(NULL) != 1) |
669 | return 0; | 669 | return 0; |
670 | if (tpi_info->adapter_IO) { | ||
671 | do_adapter_IO(tpi_info->isc); | ||
672 | return 1; | ||
673 | } | ||
670 | irb = (struct irb *)&S390_lowcore.irb; | 674 | irb = (struct irb *)&S390_lowcore.irb; |
671 | /* Store interrupt response block to lowcore. */ | 675 | /* Store interrupt response block to lowcore. */ |
672 | if (tsch(tpi_info->schid, irb) != 0) | 676 | if (tsch(tpi_info->schid, irb) != 0) |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index b7bd5b0cc7aa..3868ab2397c6 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ | |||
1800 | switch (retval) { | 1800 | switch (retval) { |
1801 | case SCSI_MLQUEUE_HOST_BUSY: | 1801 | case SCSI_MLQUEUE_HOST_BUSY: |
1802 | twa_free_request_id(tw_dev, request_id); | 1802 | twa_free_request_id(tw_dev, request_id); |
1803 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1803 | break; | 1804 | break; |
1804 | case 1: | 1805 | case 1: |
1805 | tw_dev->state[request_id] = TW_S_COMPLETED; | 1806 | tw_dev->state[request_id] = TW_S_COMPLETED; |
1806 | twa_free_request_id(tw_dev, request_id); | 1807 | twa_free_request_id(tw_dev, request_id); |
1808 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1807 | SCpnt->result = (DID_ERROR << 16); | 1809 | SCpnt->result = (DID_ERROR << 16); |
1808 | done(SCpnt); | 1810 | done(SCpnt); |
1809 | retval = 0; | 1811 | retval = 0; |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 3c08f5352b2d..6153a66a8a31 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o | |||
88 | obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o | 88 | obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o |
89 | obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o | 89 | obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o |
90 | obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ | 90 | obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ |
91 | obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ | 91 | obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ |
92 | obj-$(CONFIG_SCSI_LPFC) += lpfc/ | 92 | obj-$(CONFIG_SCSI_LPFC) += lpfc/ |
93 | obj-$(CONFIG_SCSI_BFA_FC) += bfa/ | 93 | obj-$(CONFIG_SCSI_BFA_FC) += bfa/ |
94 | obj-$(CONFIG_SCSI_PAS16) += pas16.o | 94 | obj-$(CONFIG_SCSI_PAS16) += pas16.o |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e7d0d47b9185..e5f2d7d9002e 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) | |||
1283 | kfree(aac->queues); | 1283 | kfree(aac->queues); |
1284 | aac->queues = NULL; | 1284 | aac->queues = NULL; |
1285 | free_irq(aac->pdev->irq, aac); | 1285 | free_irq(aac->pdev->irq, aac); |
1286 | if (aac->msi) | ||
1287 | pci_disable_msi(aac->pdev); | ||
1286 | kfree(aac->fsa_dev); | 1288 | kfree(aac->fsa_dev); |
1287 | aac->fsa_dev = NULL; | 1289 | aac->fsa_dev = NULL; |
1288 | quirks = aac_get_driver_ident(index)->quirks; | 1290 | quirks = aac_get_driver_ident(index)->quirks; |
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index bd22041e2789..f58644850333 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | |||
@@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk) | |||
913 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | 913 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; |
914 | 914 | ||
915 | if (csk->l2t) { | 915 | if (csk->l2t) { |
916 | l2t_release(L2DATA(t3dev), csk->l2t); | 916 | l2t_release(t3dev, csk->l2t); |
917 | csk->l2t = NULL; | 917 | csk->l2t = NULL; |
918 | cxgbi_sock_put(csk); | 918 | cxgbi_sock_put(csk); |
919 | } | 919 | } |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f84084bba2f0..16ad97df5ba6 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, | |||
1721 | list_for_each_entry(ch, &ex->children, siblings) { | 1721 | list_for_each_entry(ch, &ex->children, siblings) { |
1722 | if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { | 1722 | if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { |
1723 | res = sas_find_bcast_dev(ch, src_dev); | 1723 | res = sas_find_bcast_dev(ch, src_dev); |
1724 | if (src_dev) | 1724 | if (*src_dev) |
1725 | return res; | 1725 | return res; |
1726 | } | 1726 | } |
1727 | } | 1727 | } |
@@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, | |||
1769 | sas_disable_routing(parent, phy->attached_sas_addr); | 1769 | sas_disable_routing(parent, phy->attached_sas_addr); |
1770 | } | 1770 | } |
1771 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); | 1771 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); |
1772 | sas_port_delete_phy(phy->port, phy->phy); | 1772 | if (phy->port) { |
1773 | if (phy->port->num_phys == 0) | 1773 | sas_port_delete_phy(phy->port, phy->phy); |
1774 | sas_port_delete(phy->port); | 1774 | if (phy->port->num_phys == 0) |
1775 | phy->port = NULL; | 1775 | sas_port_delete(phy->port); |
1776 | phy->port = NULL; | ||
1777 | } | ||
1776 | } | 1778 | } |
1777 | 1779 | ||
1778 | static int sas_discover_bfs_by_root_level(struct domain_device *root, | 1780 | static int sas_discover_bfs_by_root_level(struct domain_device *root, |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 4cace3f20c04..1e69527f1e4e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1328 | qla2x00_sp_compl(ha, sp); | 1328 | qla2x00_sp_compl(ha, sp); |
1329 | } else { | 1329 | } else { |
1330 | ctx = sp->ctx; | 1330 | ctx = sp->ctx; |
1331 | if (ctx->type == SRB_LOGIN_CMD || | 1331 | if (ctx->type == SRB_ELS_CMD_RPT || |
1332 | ctx->type == SRB_LOGOUT_CMD) { | 1332 | ctx->type == SRB_ELS_CMD_HST || |
1333 | ctx->u.iocb_cmd->free(sp); | 1333 | ctx->type == SRB_CT_CMD) { |
1334 | } else { | ||
1335 | struct fc_bsg_job *bsg_job = | 1334 | struct fc_bsg_job *bsg_job = |
1336 | ctx->u.bsg_job; | 1335 | ctx->u.bsg_job; |
1337 | if (bsg_job->request->msgcode | 1336 | if (bsg_job->request->msgcode |
@@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1343 | kfree(sp->ctx); | 1342 | kfree(sp->ctx); |
1344 | mempool_free(sp, | 1343 | mempool_free(sp, |
1345 | ha->srb_mempool); | 1344 | ha->srb_mempool); |
1345 | } else { | ||
1346 | ctx->u.iocb_cmd->free(sp); | ||
1346 | } | 1347 | } |
1347 | } | 1348 | } |
1348 | } | 1349 | } |
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 1d23f3831866..6a80749391db 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c | |||
@@ -50,6 +50,8 @@ | |||
50 | #define PCH_RX_THOLD 7 | 50 | #define PCH_RX_THOLD 7 |
51 | #define PCH_RX_THOLD_MAX 15 | 51 | #define PCH_RX_THOLD_MAX 15 |
52 | 52 | ||
53 | #define PCH_TX_THOLD 2 | ||
54 | |||
53 | #define PCH_MAX_BAUDRATE 5000000 | 55 | #define PCH_MAX_BAUDRATE 5000000 |
54 | #define PCH_MAX_FIFO_DEPTH 16 | 56 | #define PCH_MAX_FIFO_DEPTH 16 |
55 | 57 | ||
@@ -58,6 +60,7 @@ | |||
58 | #define PCH_SLEEP_TIME 10 | 60 | #define PCH_SLEEP_TIME 10 |
59 | 61 | ||
60 | #define SSN_LOW 0x02U | 62 | #define SSN_LOW 0x02U |
63 | #define SSN_HIGH 0x03U | ||
61 | #define SSN_NO_CONTROL 0x00U | 64 | #define SSN_NO_CONTROL 0x00U |
62 | #define PCH_MAX_CS 0xFF | 65 | #define PCH_MAX_CS 0xFF |
63 | #define PCI_DEVICE_ID_GE_SPI 0x8816 | 66 | #define PCI_DEVICE_ID_GE_SPI 0x8816 |
@@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, | |||
316 | 319 | ||
317 | /* if transfer complete interrupt */ | 320 | /* if transfer complete interrupt */ |
318 | if (reg_spsr_val & SPSR_FI_BIT) { | 321 | if (reg_spsr_val & SPSR_FI_BIT) { |
319 | if (tx_index < bpw_len) | 322 | if ((tx_index == bpw_len) && (rx_index == tx_index)) { |
323 | /* disable interrupts */ | ||
324 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); | ||
325 | |||
326 | /* transfer is completed; | ||
327 | inform pch_spi_process_messages */ | ||
328 | data->transfer_complete = true; | ||
329 | data->transfer_active = false; | ||
330 | wake_up(&data->wait); | ||
331 | } else { | ||
320 | dev_err(&data->master->dev, | 332 | dev_err(&data->master->dev, |
321 | "%s : Transfer is not completed", __func__); | 333 | "%s : Transfer is not completed", __func__); |
322 | /* disable interrupts */ | 334 | } |
323 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); | ||
324 | |||
325 | /* transfer is completed;inform pch_spi_process_messages */ | ||
326 | data->transfer_complete = true; | ||
327 | data->transfer_active = false; | ||
328 | wake_up(&data->wait); | ||
329 | } | 335 | } |
330 | } | 336 | } |
331 | 337 | ||
@@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id) | |||
348 | "%s returning due to suspend\n", __func__); | 354 | "%s returning due to suspend\n", __func__); |
349 | return IRQ_NONE; | 355 | return IRQ_NONE; |
350 | } | 356 | } |
351 | if (data->use_dma) | ||
352 | return IRQ_NONE; | ||
353 | 357 | ||
354 | io_remap_addr = data->io_remap_addr; | 358 | io_remap_addr = data->io_remap_addr; |
355 | spsr = io_remap_addr + PCH_SPSR; | 359 | spsr = io_remap_addr + PCH_SPSR; |
356 | 360 | ||
357 | reg_spsr_val = ioread32(spsr); | 361 | reg_spsr_val = ioread32(spsr); |
358 | 362 | ||
359 | if (reg_spsr_val & SPSR_ORF_BIT) | 363 | if (reg_spsr_val & SPSR_ORF_BIT) { |
360 | dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); | 364 | dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__); |
365 | if (data->current_msg->complete != 0) { | ||
366 | data->transfer_complete = true; | ||
367 | data->current_msg->status = -EIO; | ||
368 | data->current_msg->complete(data->current_msg->context); | ||
369 | data->bcurrent_msg_processing = false; | ||
370 | data->current_msg = NULL; | ||
371 | data->cur_trans = NULL; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | if (data->use_dma) | ||
376 | return IRQ_NONE; | ||
361 | 377 | ||
362 | /* Check if the interrupt is for SPI device */ | 378 | /* Check if the interrupt is for SPI device */ |
363 | if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { | 379 | if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { |
@@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data) | |||
756 | 772 | ||
757 | wait_event_interruptible(data->wait, data->transfer_complete); | 773 | wait_event_interruptible(data->wait, data->transfer_complete); |
758 | 774 | ||
759 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
760 | dev_dbg(&data->master->dev, | ||
761 | "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); | ||
762 | |||
763 | /* clear all interrupts */ | 775 | /* clear all interrupts */ |
764 | pch_spi_writereg(data->master, PCH_SPSR, | 776 | pch_spi_writereg(data->master, PCH_SPSR, |
765 | pch_spi_readreg(data->master, PCH_SPSR)); | 777 | pch_spi_readreg(data->master, PCH_SPSR)); |
@@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw) | |||
815 | } | 827 | } |
816 | } | 828 | } |
817 | 829 | ||
818 | static void pch_spi_start_transfer(struct pch_spi_data *data) | 830 | static int pch_spi_start_transfer(struct pch_spi_data *data) |
819 | { | 831 | { |
820 | struct pch_spi_dma_ctrl *dma; | 832 | struct pch_spi_dma_ctrl *dma; |
821 | unsigned long flags; | 833 | unsigned long flags; |
834 | int rtn; | ||
822 | 835 | ||
823 | dma = &data->dma; | 836 | dma = &data->dma; |
824 | 837 | ||
@@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) | |||
833 | initiating the transfer. */ | 846 | initiating the transfer. */ |
834 | dev_dbg(&data->master->dev, | 847 | dev_dbg(&data->master->dev, |
835 | "%s:waiting for transfer to get over\n", __func__); | 848 | "%s:waiting for transfer to get over\n", __func__); |
836 | wait_event_interruptible(data->wait, data->transfer_complete); | 849 | rtn = wait_event_interruptible_timeout(data->wait, |
850 | data->transfer_complete, | ||
851 | msecs_to_jiffies(2 * HZ)); | ||
837 | 852 | ||
838 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, | 853 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, |
839 | DMA_FROM_DEVICE); | 854 | DMA_FROM_DEVICE); |
855 | |||
856 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, | ||
857 | DMA_FROM_DEVICE); | ||
858 | memset(data->dma.tx_buf_virt, 0, PAGE_SIZE); | ||
859 | |||
840 | async_tx_ack(dma->desc_rx); | 860 | async_tx_ack(dma->desc_rx); |
841 | async_tx_ack(dma->desc_tx); | 861 | async_tx_ack(dma->desc_tx); |
842 | kfree(dma->sg_tx_p); | 862 | kfree(dma->sg_tx_p); |
843 | kfree(dma->sg_rx_p); | 863 | kfree(dma->sg_rx_p); |
844 | 864 | ||
845 | spin_lock_irqsave(&data->lock, flags); | 865 | spin_lock_irqsave(&data->lock, flags); |
846 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
847 | dev_dbg(&data->master->dev, | ||
848 | "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); | ||
849 | 866 | ||
850 | /* clear fifo threshold, disable interrupts, disable SPI transfer */ | 867 | /* clear fifo threshold, disable interrupts, disable SPI transfer */ |
851 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, | 868 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, |
@@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) | |||
858 | pch_spi_clear_fifo(data->master); | 875 | pch_spi_clear_fifo(data->master); |
859 | 876 | ||
860 | spin_unlock_irqrestore(&data->lock, flags); | 877 | spin_unlock_irqrestore(&data->lock, flags); |
878 | |||
879 | return rtn; | ||
861 | } | 880 | } |
862 | 881 | ||
863 | static void pch_dma_rx_complete(void *arg) | 882 | static void pch_dma_rx_complete(void *arg) |
@@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1023 | /* set receive fifo threshold and transmit fifo threshold */ | 1042 | /* set receive fifo threshold and transmit fifo threshold */ |
1024 | pch_spi_setclr_reg(data->master, PCH_SPCR, | 1043 | pch_spi_setclr_reg(data->master, PCH_SPCR, |
1025 | ((size - 1) << SPCR_RFIC_FIELD) | | 1044 | ((size - 1) << SPCR_RFIC_FIELD) | |
1026 | ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << | 1045 | (PCH_TX_THOLD << SPCR_TFIC_FIELD), |
1027 | SPCR_TFIC_FIELD), | ||
1028 | MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); | 1046 | MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); |
1029 | 1047 | ||
1030 | spin_unlock_irqrestore(&data->lock, flags); | 1048 | spin_unlock_irqrestore(&data->lock, flags); |
@@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1035 | /* offset, length setting */ | 1053 | /* offset, length setting */ |
1036 | sg = dma->sg_rx_p; | 1054 | sg = dma->sg_rx_p; |
1037 | for (i = 0; i < num; i++, sg++) { | 1055 | for (i = 0; i < num; i++, sg++) { |
1038 | if (i == 0) { | 1056 | if (i == (num - 2)) { |
1039 | sg->offset = 0; | 1057 | sg->offset = size * i; |
1058 | sg->offset = sg->offset * (*bpw / 8); | ||
1040 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, | 1059 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, |
1041 | sg->offset); | 1060 | sg->offset); |
1042 | sg_dma_len(sg) = rem; | 1061 | sg_dma_len(sg) = rem; |
1062 | } else if (i == (num - 1)) { | ||
1063 | sg->offset = size * (i - 1) + rem; | ||
1064 | sg->offset = sg->offset * (*bpw / 8); | ||
1065 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, | ||
1066 | sg->offset); | ||
1067 | sg_dma_len(sg) = size; | ||
1043 | } else { | 1068 | } else { |
1044 | sg->offset = rem + size * (i - 1); | 1069 | sg->offset = size * i; |
1045 | sg->offset = sg->offset * (*bpw / 8); | 1070 | sg->offset = sg->offset * (*bpw / 8); |
1046 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, | 1071 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, |
1047 | sg->offset); | 1072 | sg->offset); |
@@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1065 | dma->desc_rx = desc_rx; | 1090 | dma->desc_rx = desc_rx; |
1066 | 1091 | ||
1067 | /* TX */ | 1092 | /* TX */ |
1093 | if (data->bpw_len > PCH_DMA_TRANS_SIZE) { | ||
1094 | num = data->bpw_len / PCH_DMA_TRANS_SIZE; | ||
1095 | size = PCH_DMA_TRANS_SIZE; | ||
1096 | rem = 16; | ||
1097 | } else { | ||
1098 | num = 1; | ||
1099 | size = data->bpw_len; | ||
1100 | rem = data->bpw_len; | ||
1101 | } | ||
1102 | |||
1068 | dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); | 1103 | dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); |
1069 | sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ | 1104 | sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ |
1070 | /* offset, length setting */ | 1105 | /* offset, length setting */ |
@@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1162 | if (data->use_dma) | 1197 | if (data->use_dma) |
1163 | pch_spi_request_dma(data, | 1198 | pch_spi_request_dma(data, |
1164 | data->current_msg->spi->bits_per_word); | 1199 | data->current_msg->spi->bits_per_word); |
1200 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
1165 | do { | 1201 | do { |
1166 | /* If we are already processing a message get the next | 1202 | /* If we are already processing a message get the next |
1167 | transfer structure from the message otherwise retrieve | 1203 | transfer structure from the message otherwise retrieve |
@@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1184 | 1220 | ||
1185 | if (data->use_dma) { | 1221 | if (data->use_dma) { |
1186 | pch_spi_handle_dma(data, &bpw); | 1222 | pch_spi_handle_dma(data, &bpw); |
1187 | pch_spi_start_transfer(data); | 1223 | if (!pch_spi_start_transfer(data)) |
1224 | goto out; | ||
1188 | pch_spi_copy_rx_data_for_dma(data, bpw); | 1225 | pch_spi_copy_rx_data_for_dma(data, bpw); |
1189 | } else { | 1226 | } else { |
1190 | pch_spi_set_tx(data, &bpw); | 1227 | pch_spi_set_tx(data, &bpw); |
@@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1222 | 1259 | ||
1223 | } while (data->cur_trans != NULL); | 1260 | } while (data->cur_trans != NULL); |
1224 | 1261 | ||
1262 | out: | ||
1263 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH); | ||
1225 | if (data->use_dma) | 1264 | if (data->use_dma) |
1226 | pch_spi_release_dma(data); | 1265 | pch_spi_release_dma(data); |
1227 | } | 1266 | } |
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 1a7c19ae766f..8b307b428791 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c | |||
@@ -411,7 +411,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) | |||
411 | skb->protocol = eth_type_trans(skb, dev); | 411 | skb->protocol = eth_type_trans(skb, dev); |
412 | skb->dev = dev; | 412 | skb->dev = dev; |
413 | 413 | ||
414 | if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) | 414 | if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || |
415 | work->word2.s.L4_error || !work->word2.s.tcp_or_udp)) | ||
415 | skb->ip_summed = CHECKSUM_NONE; | 416 | skb->ip_summed = CHECKSUM_NONE; |
416 | else | 417 | else |
417 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 418 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index 58cf279ed879..bc95f52cad8b 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c | |||
@@ -478,8 +478,10 @@ lqasc_set_termios(struct uart_port *port, | |||
478 | spin_unlock_irqrestore(<q_asc_lock, flags); | 478 | spin_unlock_irqrestore(<q_asc_lock, flags); |
479 | 479 | ||
480 | /* Don't rewrite B0 */ | 480 | /* Don't rewrite B0 */ |
481 | if (tty_termios_baud_rate(new)) | 481 | if (tty_termios_baud_rate(new)) |
482 | tty_termios_encode_baud_rate(new, baud, baud); | 482 | tty_termios_encode_baud_rate(new, baud, baud); |
483 | |||
484 | uart_update_timeout(port, cflag, baud); | ||
483 | } | 485 | } |
484 | 486 | ||
485 | static const char* | 487 | static const char* |
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index e6ba83876508..776790272454 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c | |||
@@ -19,6 +19,7 @@ | |||
19 | # define SUPPORT_SYSRQ | 19 | # define SUPPORT_SYSRQ |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #include <linux/atomic.h> | ||
22 | #include <linux/hrtimer.h> | 23 | #include <linux/hrtimer.h> |
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
24 | #include <linux/io.h> | 25 | #include <linux/io.h> |
@@ -33,6 +34,8 @@ | |||
33 | #include <linux/clk.h> | 34 | #include <linux/clk.h> |
34 | #include <linux/platform_device.h> | 35 | #include <linux/platform_device.h> |
35 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/of.h> | ||
38 | #include <linux/of_device.h> | ||
36 | 39 | ||
37 | #include "msm_serial.h" | 40 | #include "msm_serial.h" |
38 | 41 | ||
@@ -589,9 +592,8 @@ static void msm_release_port(struct uart_port *port) | |||
589 | iowrite32(GSBI_PROTOCOL_IDLE, msm_port->gsbi_base + | 592 | iowrite32(GSBI_PROTOCOL_IDLE, msm_port->gsbi_base + |
590 | GSBI_CONTROL); | 593 | GSBI_CONTROL); |
591 | 594 | ||
592 | gsbi_resource = platform_get_resource_byname(pdev, | 595 | gsbi_resource = platform_get_resource(pdev, |
593 | IORESOURCE_MEM, | 596 | IORESOURCE_MEM, 1); |
594 | "gsbi_resource"); | ||
595 | 597 | ||
596 | if (unlikely(!gsbi_resource)) | 598 | if (unlikely(!gsbi_resource)) |
597 | return; | 599 | return; |
@@ -612,8 +614,7 @@ static int msm_request_port(struct uart_port *port) | |||
612 | resource_size_t size; | 614 | resource_size_t size; |
613 | int ret; | 615 | int ret; |
614 | 616 | ||
615 | uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 617 | uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
616 | "uart_resource"); | ||
617 | if (unlikely(!uart_resource)) | 618 | if (unlikely(!uart_resource)) |
618 | return -ENXIO; | 619 | return -ENXIO; |
619 | 620 | ||
@@ -628,8 +629,7 @@ static int msm_request_port(struct uart_port *port) | |||
628 | goto fail_release_port; | 629 | goto fail_release_port; |
629 | } | 630 | } |
630 | 631 | ||
631 | gsbi_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 632 | gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
632 | "gsbi_resource"); | ||
633 | /* Is this a GSBI-based port? */ | 633 | /* Is this a GSBI-based port? */ |
634 | if (gsbi_resource) { | 634 | if (gsbi_resource) { |
635 | size = resource_size(gsbi_resource); | 635 | size = resource_size(gsbi_resource); |
@@ -859,6 +859,8 @@ static struct uart_driver msm_uart_driver = { | |||
859 | .cons = MSM_CONSOLE, | 859 | .cons = MSM_CONSOLE, |
860 | }; | 860 | }; |
861 | 861 | ||
862 | static atomic_t msm_uart_next_id = ATOMIC_INIT(0); | ||
863 | |||
862 | static int __init msm_serial_probe(struct platform_device *pdev) | 864 | static int __init msm_serial_probe(struct platform_device *pdev) |
863 | { | 865 | { |
864 | struct msm_port *msm_port; | 866 | struct msm_port *msm_port; |
@@ -866,6 +868,9 @@ static int __init msm_serial_probe(struct platform_device *pdev) | |||
866 | struct uart_port *port; | 868 | struct uart_port *port; |
867 | int irq; | 869 | int irq; |
868 | 870 | ||
871 | if (pdev->id == -1) | ||
872 | pdev->id = atomic_inc_return(&msm_uart_next_id) - 1; | ||
873 | |||
869 | if (unlikely(pdev->id < 0 || pdev->id >= UART_NR)) | 874 | if (unlikely(pdev->id < 0 || pdev->id >= UART_NR)) |
870 | return -ENXIO; | 875 | return -ENXIO; |
871 | 876 | ||
@@ -875,7 +880,7 @@ static int __init msm_serial_probe(struct platform_device *pdev) | |||
875 | port->dev = &pdev->dev; | 880 | port->dev = &pdev->dev; |
876 | msm_port = UART_TO_MSM(port); | 881 | msm_port = UART_TO_MSM(port); |
877 | 882 | ||
878 | if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsbi_resource")) | 883 | if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) |
879 | msm_port->is_uartdm = 1; | 884 | msm_port->is_uartdm = 1; |
880 | else | 885 | else |
881 | msm_port->is_uartdm = 0; | 886 | msm_port->is_uartdm = 0; |
@@ -899,8 +904,7 @@ static int __init msm_serial_probe(struct platform_device *pdev) | |||
899 | printk(KERN_INFO "uartclk = %d\n", port->uartclk); | 904 | printk(KERN_INFO "uartclk = %d\n", port->uartclk); |
900 | 905 | ||
901 | 906 | ||
902 | resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 907 | resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
903 | "uart_resource"); | ||
904 | if (unlikely(!resource)) | 908 | if (unlikely(!resource)) |
905 | return -ENXIO; | 909 | return -ENXIO; |
906 | port->mapbase = resource->start; | 910 | port->mapbase = resource->start; |
@@ -924,11 +928,17 @@ static int __devexit msm_serial_remove(struct platform_device *pdev) | |||
924 | return 0; | 928 | return 0; |
925 | } | 929 | } |
926 | 930 | ||
931 | static struct of_device_id msm_match_table[] = { | ||
932 | { .compatible = "qcom,msm-uart" }, | ||
933 | {} | ||
934 | }; | ||
935 | |||
927 | static struct platform_driver msm_platform_driver = { | 936 | static struct platform_driver msm_platform_driver = { |
928 | .remove = msm_serial_remove, | 937 | .remove = msm_serial_remove, |
929 | .driver = { | 938 | .driver = { |
930 | .name = "msm_serial", | 939 | .name = "msm_serial", |
931 | .owner = THIS_MODULE, | 940 | .owner = THIS_MODULE, |
941 | .of_match_table = msm_match_table, | ||
932 | }, | 942 | }, |
933 | }; | 943 | }; |
934 | 944 | ||