diff options
author | David S. Miller <davem@davemloft.net> | 2011-10-07 13:38:43 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-10-07 13:38:43 -0400 |
commit | 88c5100c28b02c4b2b2c6f6fafbbd76d90f698b9 (patch) | |
tree | 08c4399e0341f7eb0ccb24e15f2cab687275c2a4 /drivers | |
parent | 8083f0fc969d9b5353061a7a6f963405057e26b1 (diff) | |
parent | 3ee72ca99288f1de95ec9c570e43f531c8799f06 (diff) |
Merge branch 'master' of github.com:davem330/net
Conflicts:
net/batman-adv/soft-interface.c
Diffstat (limited to 'drivers')
72 files changed, 539 insertions, 604 deletions
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 2c18d584066d..b97294e2d95b 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -42,6 +42,22 @@ static struct pm_clk_data *__to_pcd(struct device *dev) | |||
42 | } | 42 | } |
43 | 43 | ||
44 | /** | 44 | /** |
45 | * pm_clk_acquire - Acquire a device clock. | ||
46 | * @dev: Device whose clock is to be acquired. | ||
47 | * @ce: PM clock entry corresponding to the clock. | ||
48 | */ | ||
49 | static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) | ||
50 | { | ||
51 | ce->clk = clk_get(dev, ce->con_id); | ||
52 | if (IS_ERR(ce->clk)) { | ||
53 | ce->status = PCE_STATUS_ERROR; | ||
54 | } else { | ||
55 | ce->status = PCE_STATUS_ACQUIRED; | ||
56 | dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | /** | ||
45 | * pm_clk_add - Start using a device clock for power management. | 61 | * pm_clk_add - Start using a device clock for power management. |
46 | * @dev: Device whose clock is going to be used for power management. | 62 | * @dev: Device whose clock is going to be used for power management. |
47 | * @con_id: Connection ID of the clock. | 63 | * @con_id: Connection ID of the clock. |
@@ -73,6 +89,8 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
73 | } | 89 | } |
74 | } | 90 | } |
75 | 91 | ||
92 | pm_clk_acquire(dev, ce); | ||
93 | |||
76 | spin_lock_irq(&pcd->lock); | 94 | spin_lock_irq(&pcd->lock); |
77 | list_add_tail(&ce->node, &pcd->clock_list); | 95 | list_add_tail(&ce->node, &pcd->clock_list); |
78 | spin_unlock_irq(&pcd->lock); | 96 | spin_unlock_irq(&pcd->lock); |
@@ -82,17 +100,12 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
82 | /** | 100 | /** |
83 | * __pm_clk_remove - Destroy PM clock entry. | 101 | * __pm_clk_remove - Destroy PM clock entry. |
84 | * @ce: PM clock entry to destroy. | 102 | * @ce: PM clock entry to destroy. |
85 | * | ||
86 | * This routine must be called under the spinlock protecting the PM list of | ||
87 | * clocks corresponding the the @ce's device. | ||
88 | */ | 103 | */ |
89 | static void __pm_clk_remove(struct pm_clock_entry *ce) | 104 | static void __pm_clk_remove(struct pm_clock_entry *ce) |
90 | { | 105 | { |
91 | if (!ce) | 106 | if (!ce) |
92 | return; | 107 | return; |
93 | 108 | ||
94 | list_del(&ce->node); | ||
95 | |||
96 | if (ce->status < PCE_STATUS_ERROR) { | 109 | if (ce->status < PCE_STATUS_ERROR) { |
97 | if (ce->status == PCE_STATUS_ENABLED) | 110 | if (ce->status == PCE_STATUS_ENABLED) |
98 | clk_disable(ce->clk); | 111 | clk_disable(ce->clk); |
@@ -126,18 +139,22 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
126 | spin_lock_irq(&pcd->lock); | 139 | spin_lock_irq(&pcd->lock); |
127 | 140 | ||
128 | list_for_each_entry(ce, &pcd->clock_list, node) { | 141 | list_for_each_entry(ce, &pcd->clock_list, node) { |
129 | if (!con_id && !ce->con_id) { | 142 | if (!con_id && !ce->con_id) |
130 | __pm_clk_remove(ce); | 143 | goto remove; |
131 | break; | 144 | else if (!con_id || !ce->con_id) |
132 | } else if (!con_id || !ce->con_id) { | ||
133 | continue; | 145 | continue; |
134 | } else if (!strcmp(con_id, ce->con_id)) { | 146 | else if (!strcmp(con_id, ce->con_id)) |
135 | __pm_clk_remove(ce); | 147 | goto remove; |
136 | break; | ||
137 | } | ||
138 | } | 148 | } |
139 | 149 | ||
140 | spin_unlock_irq(&pcd->lock); | 150 | spin_unlock_irq(&pcd->lock); |
151 | return; | ||
152 | |||
153 | remove: | ||
154 | list_del(&ce->node); | ||
155 | spin_unlock_irq(&pcd->lock); | ||
156 | |||
157 | __pm_clk_remove(ce); | ||
141 | } | 158 | } |
142 | 159 | ||
143 | /** | 160 | /** |
@@ -175,20 +192,27 @@ void pm_clk_destroy(struct device *dev) | |||
175 | { | 192 | { |
176 | struct pm_clk_data *pcd = __to_pcd(dev); | 193 | struct pm_clk_data *pcd = __to_pcd(dev); |
177 | struct pm_clock_entry *ce, *c; | 194 | struct pm_clock_entry *ce, *c; |
195 | struct list_head list; | ||
178 | 196 | ||
179 | if (!pcd) | 197 | if (!pcd) |
180 | return; | 198 | return; |
181 | 199 | ||
182 | dev->power.subsys_data = NULL; | 200 | dev->power.subsys_data = NULL; |
201 | INIT_LIST_HEAD(&list); | ||
183 | 202 | ||
184 | spin_lock_irq(&pcd->lock); | 203 | spin_lock_irq(&pcd->lock); |
185 | 204 | ||
186 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) | 205 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) |
187 | __pm_clk_remove(ce); | 206 | list_move(&ce->node, &list); |
188 | 207 | ||
189 | spin_unlock_irq(&pcd->lock); | 208 | spin_unlock_irq(&pcd->lock); |
190 | 209 | ||
191 | kfree(pcd); | 210 | kfree(pcd); |
211 | |||
212 | list_for_each_entry_safe_reverse(ce, c, &list, node) { | ||
213 | list_del(&ce->node); | ||
214 | __pm_clk_remove(ce); | ||
215 | } | ||
192 | } | 216 | } |
193 | 217 | ||
194 | #endif /* CONFIG_PM */ | 218 | #endif /* CONFIG_PM */ |
@@ -196,23 +220,6 @@ void pm_clk_destroy(struct device *dev) | |||
196 | #ifdef CONFIG_PM_RUNTIME | 220 | #ifdef CONFIG_PM_RUNTIME |
197 | 221 | ||
198 | /** | 222 | /** |
199 | * pm_clk_acquire - Acquire a device clock. | ||
200 | * @dev: Device whose clock is to be acquired. | ||
201 | * @con_id: Connection ID of the clock. | ||
202 | */ | ||
203 | static void pm_clk_acquire(struct device *dev, | ||
204 | struct pm_clock_entry *ce) | ||
205 | { | ||
206 | ce->clk = clk_get(dev, ce->con_id); | ||
207 | if (IS_ERR(ce->clk)) { | ||
208 | ce->status = PCE_STATUS_ERROR; | ||
209 | } else { | ||
210 | ce->status = PCE_STATUS_ACQUIRED; | ||
211 | dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); | ||
212 | } | ||
213 | } | ||
214 | |||
215 | /** | ||
216 | * pm_clk_suspend - Disable clocks in a device's PM clock list. | 223 | * pm_clk_suspend - Disable clocks in a device's PM clock list. |
217 | * @dev: Device to disable the clocks for. | 224 | * @dev: Device to disable the clocks for. |
218 | */ | 225 | */ |
@@ -230,9 +237,6 @@ int pm_clk_suspend(struct device *dev) | |||
230 | spin_lock_irqsave(&pcd->lock, flags); | 237 | spin_lock_irqsave(&pcd->lock, flags); |
231 | 238 | ||
232 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { | 239 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { |
233 | if (ce->status == PCE_STATUS_NONE) | ||
234 | pm_clk_acquire(dev, ce); | ||
235 | |||
236 | if (ce->status < PCE_STATUS_ERROR) { | 240 | if (ce->status < PCE_STATUS_ERROR) { |
237 | clk_disable(ce->clk); | 241 | clk_disable(ce->clk); |
238 | ce->status = PCE_STATUS_ACQUIRED; | 242 | ce->status = PCE_STATUS_ACQUIRED; |
@@ -262,9 +266,6 @@ int pm_clk_resume(struct device *dev) | |||
262 | spin_lock_irqsave(&pcd->lock, flags); | 266 | spin_lock_irqsave(&pcd->lock, flags); |
263 | 267 | ||
264 | list_for_each_entry(ce, &pcd->clock_list, node) { | 268 | list_for_each_entry(ce, &pcd->clock_list, node) { |
265 | if (ce->status == PCE_STATUS_NONE) | ||
266 | pm_clk_acquire(dev, ce); | ||
267 | |||
268 | if (ce->status < PCE_STATUS_ERROR) { | 269 | if (ce->status < PCE_STATUS_ERROR) { |
269 | clk_enable(ce->clk); | 270 | clk_enable(ce->clk); |
270 | ce->status = PCE_STATUS_ENABLED; | 271 | ce->status = PCE_STATUS_ENABLED; |
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index f6595aba4f0f..fa567f1158c2 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig | |||
@@ -43,6 +43,7 @@ config TCG_NSC | |||
43 | 43 | ||
44 | config TCG_ATMEL | 44 | config TCG_ATMEL |
45 | tristate "Atmel TPM Interface" | 45 | tristate "Atmel TPM Interface" |
46 | depends on PPC64 || HAS_IOPORT | ||
46 | ---help--- | 47 | ---help--- |
47 | If you have a TPM security chip from Atmel say Yes and it | 48 | If you have a TPM security chip from Atmel say Yes and it |
48 | will be accessible from within Linux. To compile this driver | 49 | will be accessible from within Linux. To compile this driver |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index caf8012ef47c..9ca5c021d0b6 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, | |||
383 | u32 count, ordinal; | 383 | u32 count, ordinal; |
384 | unsigned long stop; | 384 | unsigned long stop; |
385 | 385 | ||
386 | if (bufsiz > TPM_BUFSIZE) | ||
387 | bufsiz = TPM_BUFSIZE; | ||
388 | |||
386 | count = be32_to_cpu(*((__be32 *) (buf + 2))); | 389 | count = be32_to_cpu(*((__be32 *) (buf + 2))); |
387 | ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); | 390 | ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); |
388 | if (count == 0) | 391 | if (count == 0) |
@@ -1102,6 +1105,7 @@ ssize_t tpm_read(struct file *file, char __user *buf, | |||
1102 | { | 1105 | { |
1103 | struct tpm_chip *chip = file->private_data; | 1106 | struct tpm_chip *chip = file->private_data; |
1104 | ssize_t ret_size; | 1107 | ssize_t ret_size; |
1108 | int rc; | ||
1105 | 1109 | ||
1106 | del_singleshot_timer_sync(&chip->user_read_timer); | 1110 | del_singleshot_timer_sync(&chip->user_read_timer); |
1107 | flush_work_sync(&chip->work); | 1111 | flush_work_sync(&chip->work); |
@@ -1112,8 +1116,11 @@ ssize_t tpm_read(struct file *file, char __user *buf, | |||
1112 | ret_size = size; | 1116 | ret_size = size; |
1113 | 1117 | ||
1114 | mutex_lock(&chip->buffer_mutex); | 1118 | mutex_lock(&chip->buffer_mutex); |
1115 | if (copy_to_user(buf, chip->data_buffer, ret_size)) | 1119 | rc = copy_to_user(buf, chip->data_buffer, ret_size); |
1120 | memset(chip->data_buffer, 0, ret_size); | ||
1121 | if (rc) | ||
1116 | ret_size = -EFAULT; | 1122 | ret_size = -EFAULT; |
1123 | |||
1117 | mutex_unlock(&chip->buffer_mutex); | 1124 | mutex_unlock(&chip->buffer_mutex); |
1118 | } | 1125 | } |
1119 | 1126 | ||
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 82facc9104c7..4d2464871ada 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c | |||
@@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void) | |||
396 | if (pdev) { | 396 | if (pdev) { |
397 | tpm_nsc_remove(&pdev->dev); | 397 | tpm_nsc_remove(&pdev->dev); |
398 | platform_device_unregister(pdev); | 398 | platform_device_unregister(pdev); |
399 | kfree(pdev); | ||
400 | pdev = NULL; | ||
401 | } | 399 | } |
402 | 400 | ||
403 | platform_driver_unregister(&nsc_drv); | 401 | platform_driver_unregister(&nsc_drv); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ce045a8cf82c..f07e4252b708 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | |||
67 | MODULE_PARM_DESC(i915_enable_rc6, | 67 | MODULE_PARM_DESC(i915_enable_rc6, |
68 | "Enable power-saving render C-state 6 (default: true)"); | 68 | "Enable power-saving render C-state 6 (default: true)"); |
69 | 69 | ||
70 | unsigned int i915_enable_fbc __read_mostly = 1; | 70 | unsigned int i915_enable_fbc __read_mostly = -1; |
71 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); | 71 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
72 | MODULE_PARM_DESC(i915_enable_fbc, | 72 | MODULE_PARM_DESC(i915_enable_fbc, |
73 | "Enable frame buffer compression for power savings " | 73 | "Enable frame buffer compression for power savings " |
74 | "(default: false)"); | 74 | "(default: -1 (use per-chip default))"); |
75 | 75 | ||
76 | unsigned int i915_lvds_downclock __read_mostly = 0; | 76 | unsigned int i915_lvds_downclock __read_mostly = 0; |
77 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 77 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 56a8554d9039..04411ad2e779 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1799 | struct drm_framebuffer *fb; | 1799 | struct drm_framebuffer *fb; |
1800 | struct intel_framebuffer *intel_fb; | 1800 | struct intel_framebuffer *intel_fb; |
1801 | struct drm_i915_gem_object *obj; | 1801 | struct drm_i915_gem_object *obj; |
1802 | int enable_fbc; | ||
1802 | 1803 | ||
1803 | DRM_DEBUG_KMS("\n"); | 1804 | DRM_DEBUG_KMS("\n"); |
1804 | 1805 | ||
@@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1839 | intel_fb = to_intel_framebuffer(fb); | 1840 | intel_fb = to_intel_framebuffer(fb); |
1840 | obj = intel_fb->obj; | 1841 | obj = intel_fb->obj; |
1841 | 1842 | ||
1842 | if (!i915_enable_fbc) { | 1843 | enable_fbc = i915_enable_fbc; |
1843 | DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); | 1844 | if (enable_fbc < 0) { |
1845 | DRM_DEBUG_KMS("fbc set to per-chip default\n"); | ||
1846 | enable_fbc = 1; | ||
1847 | if (INTEL_INFO(dev)->gen <= 5) | ||
1848 | enable_fbc = 0; | ||
1849 | } | ||
1850 | if (!enable_fbc) { | ||
1851 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | ||
1844 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; | 1852 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; |
1845 | goto out_disable; | 1853 | goto out_disable; |
1846 | } | 1854 | } |
@@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4687 | bpc = 6; /* min is 18bpp */ | 4695 | bpc = 6; /* min is 18bpp */ |
4688 | break; | 4696 | break; |
4689 | case 24: | 4697 | case 24: |
4690 | bpc = min((unsigned int)8, display_bpc); | 4698 | bpc = 8; |
4691 | break; | 4699 | break; |
4692 | case 30: | 4700 | case 30: |
4693 | bpc = min((unsigned int)10, display_bpc); | 4701 | bpc = 10; |
4694 | break; | 4702 | break; |
4695 | case 48: | 4703 | case 48: |
4696 | bpc = min((unsigned int)12, display_bpc); | 4704 | bpc = 12; |
4697 | break; | 4705 | break; |
4698 | default: | 4706 | default: |
4699 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); | 4707 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); |
@@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4701 | break; | 4709 | break; |
4702 | } | 4710 | } |
4703 | 4711 | ||
4712 | display_bpc = min(display_bpc, bpc); | ||
4713 | |||
4704 | DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", | 4714 | DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", |
4705 | bpc, display_bpc); | 4715 | bpc, display_bpc); |
4706 | 4716 | ||
4707 | *pipe_bpp = bpc * 3; | 4717 | *pipe_bpp = display_bpc * 3; |
4708 | 4718 | ||
4709 | return display_bpc != bpc; | 4719 | return display_bpc != bpc; |
4710 | } | 4720 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0b2ee9d39980..fe1099d8817e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
337 | struct drm_connector *connector, | 337 | struct drm_connector *connector, |
338 | struct intel_load_detect_pipe *old); | 338 | struct intel_load_detect_pipe *old); |
339 | 339 | ||
340 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | ||
341 | extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); | ||
342 | extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); | ||
343 | extern void intelfb_restore(void); | 340 | extern void intelfb_restore(void); |
344 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 341 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
345 | u16 blue, int regno); | 342 | u16 blue, int regno); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 30fe554d8936..6348c499616f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -92,6 +92,11 @@ struct intel_sdvo { | |||
92 | */ | 92 | */ |
93 | uint16_t attached_output; | 93 | uint16_t attached_output; |
94 | 94 | ||
95 | /* | ||
96 | * Hotplug activation bits for this device | ||
97 | */ | ||
98 | uint8_t hotplug_active[2]; | ||
99 | |||
95 | /** | 100 | /** |
96 | * This is used to select the color range of RBG outputs in HDMI mode. | 101 | * This is used to select the color range of RBG outputs in HDMI mode. |
97 | * It is only valid when using TMDS encoding and 8 bit per color mode. | 102 | * It is only valid when using TMDS encoding and 8 bit per color mode. |
@@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in | |||
1208 | return true; | 1213 | return true; |
1209 | } | 1214 | } |
1210 | 1215 | ||
1211 | /* No use! */ | 1216 | static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) |
1212 | #if 0 | ||
1213 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | ||
1214 | { | ||
1215 | struct drm_connector *connector = NULL; | ||
1216 | struct intel_sdvo *iout = NULL; | ||
1217 | struct intel_sdvo *sdvo; | ||
1218 | |||
1219 | /* find the sdvo connector */ | ||
1220 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1221 | iout = to_intel_sdvo(connector); | ||
1222 | |||
1223 | if (iout->type != INTEL_OUTPUT_SDVO) | ||
1224 | continue; | ||
1225 | |||
1226 | sdvo = iout->dev_priv; | ||
1227 | |||
1228 | if (sdvo->sdvo_reg == SDVOB && sdvoB) | ||
1229 | return connector; | ||
1230 | |||
1231 | if (sdvo->sdvo_reg == SDVOC && !sdvoB) | ||
1232 | return connector; | ||
1233 | |||
1234 | } | ||
1235 | |||
1236 | return NULL; | ||
1237 | } | ||
1238 | |||
1239 | int intel_sdvo_supports_hotplug(struct drm_connector *connector) | ||
1240 | { | 1217 | { |
1241 | u8 response[2]; | 1218 | u8 response[2]; |
1242 | u8 status; | ||
1243 | struct intel_sdvo *intel_sdvo; | ||
1244 | DRM_DEBUG_KMS("\n"); | ||
1245 | |||
1246 | if (!connector) | ||
1247 | return 0; | ||
1248 | |||
1249 | intel_sdvo = to_intel_sdvo(connector); | ||
1250 | 1219 | ||
1251 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, | 1220 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, |
1252 | &response, 2) && response[0]; | 1221 | &response, 2) && response[0]; |
1253 | } | 1222 | } |
1254 | 1223 | ||
1255 | void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | 1224 | static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) |
1256 | { | 1225 | { |
1257 | u8 response[2]; | 1226 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); |
1258 | u8 status; | ||
1259 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); | ||
1260 | |||
1261 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | ||
1262 | intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1263 | |||
1264 | if (on) { | ||
1265 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | ||
1266 | status = intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1267 | |||
1268 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1269 | } else { | ||
1270 | response[0] = 0; | ||
1271 | response[1] = 0; | ||
1272 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1273 | } | ||
1274 | 1227 | ||
1275 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1228 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); |
1276 | intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1277 | } | 1229 | } |
1278 | #endif | ||
1279 | 1230 | ||
1280 | static bool | 1231 | static bool |
1281 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) | 1232 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) |
@@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2045 | { | 1996 | { |
2046 | struct drm_encoder *encoder = &intel_sdvo->base.base; | 1997 | struct drm_encoder *encoder = &intel_sdvo->base.base; |
2047 | struct drm_connector *connector; | 1998 | struct drm_connector *connector; |
1999 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||
2048 | struct intel_connector *intel_connector; | 2000 | struct intel_connector *intel_connector; |
2049 | struct intel_sdvo_connector *intel_sdvo_connector; | 2001 | struct intel_sdvo_connector *intel_sdvo_connector; |
2050 | 2002 | ||
@@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2062 | 2014 | ||
2063 | intel_connector = &intel_sdvo_connector->base; | 2015 | intel_connector = &intel_sdvo_connector->base; |
2064 | connector = &intel_connector->base; | 2016 | connector = &intel_connector->base; |
2065 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | 2017 | if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { |
2018 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
2019 | intel_sdvo->hotplug_active[0] |= 1 << device; | ||
2020 | /* Some SDVO devices have one-shot hotplug interrupts. | ||
2021 | * Ensure that they get re-enabled when an interrupt happens. | ||
2022 | */ | ||
2023 | intel_encoder->hot_plug = intel_sdvo_enable_hotplug; | ||
2024 | intel_sdvo_enable_hotplug(intel_encoder); | ||
2025 | } | ||
2026 | else | ||
2027 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | ||
2066 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2028 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2067 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2029 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2068 | 2030 | ||
@@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2569 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) | 2531 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) |
2570 | goto err; | 2532 | goto err; |
2571 | 2533 | ||
2534 | /* Set up hotplug command - note paranoia about contents of reply. | ||
2535 | * We assume that the hardware is in a sane state, and only touch | ||
2536 | * the bits we think we understand. | ||
2537 | */ | ||
2538 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, | ||
2539 | &intel_sdvo->hotplug_active, 2); | ||
2540 | intel_sdvo->hotplug_active[0] &= ~0x3; | ||
2541 | |||
2572 | if (intel_sdvo_output_setup(intel_sdvo, | 2542 | if (intel_sdvo_output_setup(intel_sdvo, |
2573 | intel_sdvo->caps.output_flags) != true) { | 2543 | intel_sdvo->caps.output_flags) != true) { |
2574 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2544 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 7ad43c6b1db7..4da23889fea6 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
115 | u8 msg[20]; | 115 | u8 msg[20]; |
116 | int msg_bytes = send_bytes + 4; | 116 | int msg_bytes = send_bytes + 4; |
117 | u8 ack; | 117 | u8 ack; |
118 | unsigned retry; | ||
118 | 119 | ||
119 | if (send_bytes > 16) | 120 | if (send_bytes > 16) |
120 | return -1; | 121 | return -1; |
@@ -125,20 +126,20 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
125 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); | 126 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); |
126 | memcpy(&msg[4], send, send_bytes); | 127 | memcpy(&msg[4], send, send_bytes); |
127 | 128 | ||
128 | while (1) { | 129 | for (retry = 0; retry < 4; retry++) { |
129 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 130 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
130 | msg, msg_bytes, NULL, 0, delay, &ack); | 131 | msg, msg_bytes, NULL, 0, delay, &ack); |
131 | if (ret < 0) | 132 | if (ret < 0) |
132 | return ret; | 133 | return ret; |
133 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 134 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
134 | break; | 135 | return send_bytes; |
135 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 136 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
136 | udelay(400); | 137 | udelay(400); |
137 | else | 138 | else |
138 | return -EIO; | 139 | return -EIO; |
139 | } | 140 | } |
140 | 141 | ||
141 | return send_bytes; | 142 | return -EIO; |
142 | } | 143 | } |
143 | 144 | ||
144 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | 145 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, |
@@ -149,26 +150,29 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | |||
149 | int msg_bytes = 4; | 150 | int msg_bytes = 4; |
150 | u8 ack; | 151 | u8 ack; |
151 | int ret; | 152 | int ret; |
153 | unsigned retry; | ||
152 | 154 | ||
153 | msg[0] = address; | 155 | msg[0] = address; |
154 | msg[1] = address >> 8; | 156 | msg[1] = address >> 8; |
155 | msg[2] = AUX_NATIVE_READ << 4; | 157 | msg[2] = AUX_NATIVE_READ << 4; |
156 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); | 158 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); |
157 | 159 | ||
158 | while (1) { | 160 | for (retry = 0; retry < 4; retry++) { |
159 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 161 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
160 | msg, msg_bytes, recv, recv_bytes, delay, &ack); | 162 | msg, msg_bytes, recv, recv_bytes, delay, &ack); |
161 | if (ret == 0) | ||
162 | return -EPROTO; | ||
163 | if (ret < 0) | 163 | if (ret < 0) |
164 | return ret; | 164 | return ret; |
165 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 165 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
166 | return ret; | 166 | return ret; |
167 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 167 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
168 | udelay(400); | 168 | udelay(400); |
169 | else if (ret == 0) | ||
170 | return -EPROTO; | ||
169 | else | 171 | else |
170 | return -EIO; | 172 | return -EIO; |
171 | } | 173 | } |
174 | |||
175 | return -EIO; | ||
172 | } | 176 | } |
173 | 177 | ||
174 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, | 178 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e8a746712b5b..c4ffa14fb2f4 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
1590 | return backend_map; | 1590 | return backend_map; |
1591 | } | 1591 | } |
1592 | 1592 | ||
1593 | static void evergreen_program_channel_remap(struct radeon_device *rdev) | ||
1594 | { | ||
1595 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
1596 | |||
1597 | tmp = RREG32(MC_SHARED_CHMAP); | ||
1598 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
1599 | case 0: | ||
1600 | case 1: | ||
1601 | case 2: | ||
1602 | case 3: | ||
1603 | default: | ||
1604 | /* default mapping */ | ||
1605 | mc_shared_chremap = 0x00fac688; | ||
1606 | break; | ||
1607 | } | ||
1608 | |||
1609 | switch (rdev->family) { | ||
1610 | case CHIP_HEMLOCK: | ||
1611 | case CHIP_CYPRESS: | ||
1612 | case CHIP_BARTS: | ||
1613 | tcp_chan_steer_lo = 0x54763210; | ||
1614 | tcp_chan_steer_hi = 0x0000ba98; | ||
1615 | break; | ||
1616 | case CHIP_JUNIPER: | ||
1617 | case CHIP_REDWOOD: | ||
1618 | case CHIP_CEDAR: | ||
1619 | case CHIP_PALM: | ||
1620 | case CHIP_SUMO: | ||
1621 | case CHIP_SUMO2: | ||
1622 | case CHIP_TURKS: | ||
1623 | case CHIP_CAICOS: | ||
1624 | default: | ||
1625 | tcp_chan_steer_lo = 0x76543210; | ||
1626 | tcp_chan_steer_hi = 0x0000ba98; | ||
1627 | break; | ||
1628 | } | ||
1629 | |||
1630 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
1631 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
1632 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
1633 | } | ||
1634 | |||
1635 | static void evergreen_gpu_init(struct radeon_device *rdev) | 1593 | static void evergreen_gpu_init(struct radeon_device *rdev) |
1636 | { | 1594 | { |
1637 | u32 cc_rb_backend_disable = 0; | 1595 | u32 cc_rb_backend_disable = 0; |
@@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
2078 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 2036 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
2079 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 2037 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
2080 | 2038 | ||
2081 | evergreen_program_channel_remap(rdev); | ||
2082 | |||
2083 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; | 2039 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; |
2084 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; | 2040 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; |
2085 | 2041 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 99fbd793c08c..8c79ca97753d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
569 | return backend_map; | 569 | return backend_map; |
570 | } | 570 | } |
571 | 571 | ||
572 | static void cayman_program_channel_remap(struct radeon_device *rdev) | ||
573 | { | ||
574 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
575 | |||
576 | tmp = RREG32(MC_SHARED_CHMAP); | ||
577 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
578 | case 0: | ||
579 | case 1: | ||
580 | case 2: | ||
581 | case 3: | ||
582 | default: | ||
583 | /* default mapping */ | ||
584 | mc_shared_chremap = 0x00fac688; | ||
585 | break; | ||
586 | } | ||
587 | |||
588 | switch (rdev->family) { | ||
589 | case CHIP_CAYMAN: | ||
590 | default: | ||
591 | //tcp_chan_steer_lo = 0x54763210 | ||
592 | tcp_chan_steer_lo = 0x76543210; | ||
593 | tcp_chan_steer_hi = 0x0000ba98; | ||
594 | break; | ||
595 | } | ||
596 | |||
597 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
598 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
599 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
600 | } | ||
601 | |||
602 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, | 572 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, |
603 | u32 disable_mask_per_se, | 573 | u32 disable_mask_per_se, |
604 | u32 max_disable_mask_per_se, | 574 | u32 max_disable_mask_per_se, |
@@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
842 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 812 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
843 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 813 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
844 | 814 | ||
845 | cayman_program_channel_remap(rdev); | ||
846 | |||
847 | /* primary versions */ | 815 | /* primary versions */ |
848 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 816 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
849 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 817 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 5b1837b4aacf..7fcdbbbf2979 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
773 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | 773 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); |
774 | radeon_ring_write(rdev, 0); | 774 | radeon_ring_write(rdev, 0); |
775 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | 775 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); |
776 | radeon_ring_write(rdev, cur_pages); | 776 | radeon_ring_write(rdev, num_gpu_pages); |
777 | radeon_ring_write(rdev, cur_pages); | 777 | radeon_ring_write(rdev, num_gpu_pages); |
778 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); | 778 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); |
779 | } | 779 | } |
780 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); | 780 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index c4b8741dbf58..bce63fd329d4 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector) | |||
68 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | 68 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { |
69 | int saved_dpms = connector->dpms; | 69 | int saved_dpms = connector->dpms; |
70 | 70 | ||
71 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && | 71 | /* Only turn off the display it it's physically disconnected */ |
72 | radeon_dp_needs_link_train(radeon_connector)) | 72 | if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) |
73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
74 | else | ||
75 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
74 | else if (radeon_dp_needs_link_train(radeon_connector)) | ||
75 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
76 | connector->dpms = saved_dpms; | 76 | connector->dpms = saved_dpms; |
77 | } | 77 | } |
78 | } | 78 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 3189a7efb2e9..fde25c0d65a0 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
208 | int xorigin = 0, yorigin = 0; | 208 | int xorigin = 0, yorigin = 0; |
209 | int w = radeon_crtc->cursor_width; | 209 | int w = radeon_crtc->cursor_width; |
210 | 210 | ||
211 | if (x < 0) | ||
212 | xorigin = -x + 1; | ||
213 | if (y < 0) | ||
214 | yorigin = -y + 1; | ||
215 | if (xorigin >= CURSOR_WIDTH) | ||
216 | xorigin = CURSOR_WIDTH - 1; | ||
217 | if (yorigin >= CURSOR_HEIGHT) | ||
218 | yorigin = CURSOR_HEIGHT - 1; | ||
219 | |||
220 | if (ASIC_IS_AVIVO(rdev)) { | 211 | if (ASIC_IS_AVIVO(rdev)) { |
221 | int i = 0; | ||
222 | struct drm_crtc *crtc_p; | ||
223 | |||
224 | /* avivo cursor are offset into the total surface */ | 212 | /* avivo cursor are offset into the total surface */ |
225 | x += crtc->x; | 213 | x += crtc->x; |
226 | y += crtc->y; | 214 | y += crtc->y; |
227 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | 215 | } |
216 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | ||
217 | |||
218 | if (x < 0) { | ||
219 | xorigin = min(-x, CURSOR_WIDTH - 1); | ||
220 | x = 0; | ||
221 | } | ||
222 | if (y < 0) { | ||
223 | yorigin = min(-y, CURSOR_HEIGHT - 1); | ||
224 | y = 0; | ||
225 | } | ||
226 | |||
227 | if (ASIC_IS_AVIVO(rdev)) { | ||
228 | int i = 0; | ||
229 | struct drm_crtc *crtc_p; | ||
228 | 230 | ||
229 | /* avivo cursor image can't end on 128 pixel boundary or | 231 | /* avivo cursor image can't end on 128 pixel boundary or |
230 | * go past the end of the frame if both crtcs are enabled | 232 | * go past the end of the frame if both crtcs are enabled |
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
253 | 255 | ||
254 | radeon_lock_cursor(crtc, true); | 256 | radeon_lock_cursor(crtc, true); |
255 | if (ASIC_IS_DCE4(rdev)) { | 257 | if (ASIC_IS_DCE4(rdev)) { |
256 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, | 258 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
257 | ((xorigin ? 0 : x) << 16) | | ||
258 | (yorigin ? 0 : y)); | ||
259 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | 259 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
260 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, | 260 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, |
261 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | 261 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
262 | } else if (ASIC_IS_AVIVO(rdev)) { | 262 | } else if (ASIC_IS_AVIVO(rdev)) { |
263 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, | 263 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
264 | ((xorigin ? 0 : x) << 16) | | ||
265 | (yorigin ? 0 : y)); | ||
266 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | 264 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
267 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, | 265 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, |
268 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | 266 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
276 | | yorigin)); | 274 | | yorigin)); |
277 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, | 275 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, |
278 | (RADEON_CUR_LOCK | 276 | (RADEON_CUR_LOCK |
279 | | ((xorigin ? 0 : x) << 16) | 277 | | (x << 16) |
280 | | (yorigin ? 0 : y))); | 278 | | y)); |
281 | /* offset is from DISP(2)_BASE_ADDRESS */ | 279 | /* offset is from DISP(2)_BASE_ADDRESS */ |
282 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + | 280 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + |
283 | (yorigin * 256))); | 281 | (yorigin * 256))); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 319d85d7e759..13690f3eb4a4 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1507 | switch (mode) { | 1507 | switch (mode) { |
1508 | case DRM_MODE_DPMS_ON: | 1508 | case DRM_MODE_DPMS_ON: |
1509 | args.ucAction = ATOM_ENABLE; | 1509 | args.ucAction = ATOM_ENABLE; |
1510 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1510 | /* workaround for DVOOutputControl on some RS690 systems */ |
1511 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { | ||
1512 | u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); | ||
1513 | WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); | ||
1514 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1515 | WREG32(RADEON_BIOS_3_SCRATCH, reg); | ||
1516 | } else | ||
1517 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1511 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 1518 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
1512 | args.ucAction = ATOM_LCD_BLON; | 1519 | args.ucAction = ATOM_LCD_BLON; |
1513 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1520 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4720d000d440..b13c2eedc321 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
536 | return backend_map; | 536 | return backend_map; |
537 | } | 537 | } |
538 | 538 | ||
539 | static void rv770_program_channel_remap(struct radeon_device *rdev) | ||
540 | { | ||
541 | u32 tcp_chan_steer, mc_shared_chremap, tmp; | ||
542 | bool force_no_swizzle; | ||
543 | |||
544 | switch (rdev->family) { | ||
545 | case CHIP_RV770: | ||
546 | case CHIP_RV730: | ||
547 | force_no_swizzle = false; | ||
548 | break; | ||
549 | case CHIP_RV710: | ||
550 | case CHIP_RV740: | ||
551 | default: | ||
552 | force_no_swizzle = true; | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | tmp = RREG32(MC_SHARED_CHMAP); | ||
557 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
558 | case 0: | ||
559 | case 1: | ||
560 | default: | ||
561 | /* default mapping */ | ||
562 | mc_shared_chremap = 0x00fac688; | ||
563 | break; | ||
564 | case 2: | ||
565 | case 3: | ||
566 | if (force_no_swizzle) | ||
567 | mc_shared_chremap = 0x00fac688; | ||
568 | else | ||
569 | mc_shared_chremap = 0x00bbc298; | ||
570 | break; | ||
571 | } | ||
572 | |||
573 | if (rdev->family == CHIP_RV740) | ||
574 | tcp_chan_steer = 0x00ef2a60; | ||
575 | else | ||
576 | tcp_chan_steer = 0x00fac688; | ||
577 | |||
578 | /* RV770 CE has special chremap setup */ | ||
579 | if (rdev->pdev->device == 0x944e) { | ||
580 | tcp_chan_steer = 0x00b08b08; | ||
581 | mc_shared_chremap = 0x00b08b08; | ||
582 | } | ||
583 | |||
584 | WREG32(TCP_CHAN_STEER, tcp_chan_steer); | ||
585 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
586 | } | ||
587 | |||
588 | static void rv770_gpu_init(struct radeon_device *rdev) | 539 | static void rv770_gpu_init(struct radeon_device *rdev) |
589 | { | 540 | { |
590 | int i, j, num_qd_pipes; | 541 | int i, j, num_qd_pipes; |
@@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
785 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 736 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
786 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 737 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
787 | 738 | ||
788 | rv770_program_channel_remap(rdev); | ||
789 | |||
790 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 739 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
791 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 740 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
792 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 741 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 411257676133..932383786642 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -36,17 +36,25 @@ | |||
36 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
37 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
38 | #include <linux/smp.h> | 38 | #include <linux/smp.h> |
39 | #include <linux/moduleparam.h> | ||
39 | #include <asm/msr.h> | 40 | #include <asm/msr.h> |
40 | #include <asm/processor.h> | 41 | #include <asm/processor.h> |
41 | 42 | ||
42 | #define DRVNAME "coretemp" | 43 | #define DRVNAME "coretemp" |
43 | 44 | ||
45 | /* | ||
46 | * force_tjmax only matters when TjMax can't be read from the CPU itself. | ||
47 | * When set, it replaces the driver's suboptimal heuristic. | ||
48 | */ | ||
49 | static int force_tjmax; | ||
50 | module_param_named(tjmax, force_tjmax, int, 0444); | ||
51 | MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); | ||
52 | |||
44 | #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ | 53 | #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ |
45 | #define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ | 54 | #define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ |
46 | #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ | 55 | #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ |
47 | #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ | 56 | #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ |
48 | #define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */ | 57 | #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) |
49 | #define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS) | ||
50 | #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) | 58 | #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) |
51 | 59 | ||
52 | #ifdef CONFIG_SMP | 60 | #ifdef CONFIG_SMP |
@@ -69,8 +77,6 @@ | |||
69 | * This value is passed as "id" field to rdmsr/wrmsr functions. | 77 | * This value is passed as "id" field to rdmsr/wrmsr functions. |
70 | * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, | 78 | * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, |
71 | * from where the temperature values should be read. | 79 | * from where the temperature values should be read. |
72 | * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT, | ||
73 | * from where the thresholds are read. | ||
74 | * @attr_size: Total number of pre-core attrs displayed in the sysfs. | 80 | * @attr_size: Total number of pre-core attrs displayed in the sysfs. |
75 | * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. | 81 | * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. |
76 | * Otherwise, temp_data holds coretemp data. | 82 | * Otherwise, temp_data holds coretemp data. |
@@ -79,13 +85,11 @@ | |||
79 | struct temp_data { | 85 | struct temp_data { |
80 | int temp; | 86 | int temp; |
81 | int ttarget; | 87 | int ttarget; |
82 | int tmin; | ||
83 | int tjmax; | 88 | int tjmax; |
84 | unsigned long last_updated; | 89 | unsigned long last_updated; |
85 | unsigned int cpu; | 90 | unsigned int cpu; |
86 | u32 cpu_core_id; | 91 | u32 cpu_core_id; |
87 | u32 status_reg; | 92 | u32 status_reg; |
88 | u32 intrpt_reg; | ||
89 | int attr_size; | 93 | int attr_size; |
90 | bool is_pkg_data; | 94 | bool is_pkg_data; |
91 | bool valid; | 95 | bool valid; |
@@ -143,19 +147,6 @@ static ssize_t show_crit_alarm(struct device *dev, | |||
143 | return sprintf(buf, "%d\n", (eax >> 5) & 1); | 147 | return sprintf(buf, "%d\n", (eax >> 5) & 1); |
144 | } | 148 | } |
145 | 149 | ||
146 | static ssize_t show_max_alarm(struct device *dev, | ||
147 | struct device_attribute *devattr, char *buf) | ||
148 | { | ||
149 | u32 eax, edx; | ||
150 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
151 | struct platform_data *pdata = dev_get_drvdata(dev); | ||
152 | struct temp_data *tdata = pdata->core_data[attr->index]; | ||
153 | |||
154 | rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); | ||
155 | |||
156 | return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1)); | ||
157 | } | ||
158 | |||
159 | static ssize_t show_tjmax(struct device *dev, | 150 | static ssize_t show_tjmax(struct device *dev, |
160 | struct device_attribute *devattr, char *buf) | 151 | struct device_attribute *devattr, char *buf) |
161 | { | 152 | { |
@@ -174,83 +165,6 @@ static ssize_t show_ttarget(struct device *dev, | |||
174 | return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); | 165 | return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); |
175 | } | 166 | } |
176 | 167 | ||
177 | static ssize_t store_ttarget(struct device *dev, | ||
178 | struct device_attribute *devattr, | ||
179 | const char *buf, size_t count) | ||
180 | { | ||
181 | struct platform_data *pdata = dev_get_drvdata(dev); | ||
182 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
183 | struct temp_data *tdata = pdata->core_data[attr->index]; | ||
184 | u32 eax, edx; | ||
185 | unsigned long val; | ||
186 | int diff; | ||
187 | |||
188 | if (strict_strtoul(buf, 10, &val)) | ||
189 | return -EINVAL; | ||
190 | |||
191 | /* | ||
192 | * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms | ||
193 | * of milli degree celsius. Hence don't accept val > (127 * 1000) | ||
194 | */ | ||
195 | if (val > tdata->tjmax || val > 127000) | ||
196 | return -EINVAL; | ||
197 | |||
198 | diff = (tdata->tjmax - val) / 1000; | ||
199 | |||
200 | mutex_lock(&tdata->update_lock); | ||
201 | rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx); | ||
202 | eax = (eax & ~THERM_MASK_THRESHOLD1) | | ||
203 | (diff << THERM_SHIFT_THRESHOLD1); | ||
204 | wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx); | ||
205 | tdata->ttarget = val; | ||
206 | mutex_unlock(&tdata->update_lock); | ||
207 | |||
208 | return count; | ||
209 | } | ||
210 | |||
211 | static ssize_t show_tmin(struct device *dev, | ||
212 | struct device_attribute *devattr, char *buf) | ||
213 | { | ||
214 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
215 | struct platform_data *pdata = dev_get_drvdata(dev); | ||
216 | |||
217 | return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin); | ||
218 | } | ||
219 | |||
220 | static ssize_t store_tmin(struct device *dev, | ||
221 | struct device_attribute *devattr, | ||
222 | const char *buf, size_t count) | ||
223 | { | ||
224 | struct platform_data *pdata = dev_get_drvdata(dev); | ||
225 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
226 | struct temp_data *tdata = pdata->core_data[attr->index]; | ||
227 | u32 eax, edx; | ||
228 | unsigned long val; | ||
229 | int diff; | ||
230 | |||
231 | if (strict_strtoul(buf, 10, &val)) | ||
232 | return -EINVAL; | ||
233 | |||
234 | /* | ||
235 | * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms | ||
236 | * of milli degree celsius. Hence don't accept val > (127 * 1000) | ||
237 | */ | ||
238 | if (val > tdata->tjmax || val > 127000) | ||
239 | return -EINVAL; | ||
240 | |||
241 | diff = (tdata->tjmax - val) / 1000; | ||
242 | |||
243 | mutex_lock(&tdata->update_lock); | ||
244 | rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx); | ||
245 | eax = (eax & ~THERM_MASK_THRESHOLD0) | | ||
246 | (diff << THERM_SHIFT_THRESHOLD0); | ||
247 | wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx); | ||
248 | tdata->tmin = val; | ||
249 | mutex_unlock(&tdata->update_lock); | ||
250 | |||
251 | return count; | ||
252 | } | ||
253 | |||
254 | static ssize_t show_temp(struct device *dev, | 168 | static ssize_t show_temp(struct device *dev, |
255 | struct device_attribute *devattr, char *buf) | 169 | struct device_attribute *devattr, char *buf) |
256 | { | 170 | { |
@@ -374,7 +288,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) | |||
374 | 288 | ||
375 | static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) | 289 | static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) |
376 | { | 290 | { |
377 | /* The 100C is default for both mobile and non mobile CPUs */ | ||
378 | int err; | 291 | int err; |
379 | u32 eax, edx; | 292 | u32 eax, edx; |
380 | u32 val; | 293 | u32 val; |
@@ -385,7 +298,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) | |||
385 | */ | 298 | */ |
386 | err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); | 299 | err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); |
387 | if (err) { | 300 | if (err) { |
388 | dev_warn(dev, "Unable to read TjMax from CPU.\n"); | 301 | if (c->x86_model > 0xe && c->x86_model != 0x1c) |
302 | dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); | ||
389 | } else { | 303 | } else { |
390 | val = (eax >> 16) & 0xff; | 304 | val = (eax >> 16) & 0xff; |
391 | /* | 305 | /* |
@@ -393,11 +307,17 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) | |||
393 | * will be used | 307 | * will be used |
394 | */ | 308 | */ |
395 | if (val) { | 309 | if (val) { |
396 | dev_info(dev, "TjMax is %d C.\n", val); | 310 | dev_dbg(dev, "TjMax is %d degrees C\n", val); |
397 | return val * 1000; | 311 | return val * 1000; |
398 | } | 312 | } |
399 | } | 313 | } |
400 | 314 | ||
315 | if (force_tjmax) { | ||
316 | dev_notice(dev, "TjMax forced to %d degrees C by user\n", | ||
317 | force_tjmax); | ||
318 | return force_tjmax * 1000; | ||
319 | } | ||
320 | |||
401 | /* | 321 | /* |
402 | * An assumption is made for early CPUs and unreadable MSR. | 322 | * An assumption is made for early CPUs and unreadable MSR. |
403 | * NOTE: the calculated value may not be correct. | 323 | * NOTE: the calculated value may not be correct. |
@@ -414,21 +334,6 @@ static void __devinit get_ucode_rev_on_cpu(void *edx) | |||
414 | rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx); | 334 | rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx); |
415 | } | 335 | } |
416 | 336 | ||
417 | static int get_pkg_tjmax(unsigned int cpu, struct device *dev) | ||
418 | { | ||
419 | int err; | ||
420 | u32 eax, edx, val; | ||
421 | |||
422 | err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); | ||
423 | if (!err) { | ||
424 | val = (eax >> 16) & 0xff; | ||
425 | if (val) | ||
426 | return val * 1000; | ||
427 | } | ||
428 | dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu); | ||
429 | return 100000; /* Default TjMax: 100 degree celsius */ | ||
430 | } | ||
431 | |||
432 | static int create_name_attr(struct platform_data *pdata, struct device *dev) | 337 | static int create_name_attr(struct platform_data *pdata, struct device *dev) |
433 | { | 338 | { |
434 | sysfs_attr_init(&pdata->name_attr.attr); | 339 | sysfs_attr_init(&pdata->name_attr.attr); |
@@ -442,19 +347,14 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev, | |||
442 | int attr_no) | 347 | int attr_no) |
443 | { | 348 | { |
444 | int err, i; | 349 | int err, i; |
445 | static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev, | 350 | static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, |
446 | struct device_attribute *devattr, char *buf) = { | 351 | struct device_attribute *devattr, char *buf) = { |
447 | show_label, show_crit_alarm, show_temp, show_tjmax, | 352 | show_label, show_crit_alarm, show_temp, show_tjmax, |
448 | show_max_alarm, show_ttarget, show_tmin }; | 353 | show_ttarget }; |
449 | static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev, | 354 | static const char *const names[TOTAL_ATTRS] = { |
450 | struct device_attribute *devattr, const char *buf, | ||
451 | size_t count) = { NULL, NULL, NULL, NULL, NULL, | ||
452 | store_ttarget, store_tmin }; | ||
453 | static const char *names[TOTAL_ATTRS] = { | ||
454 | "temp%d_label", "temp%d_crit_alarm", | 355 | "temp%d_label", "temp%d_crit_alarm", |
455 | "temp%d_input", "temp%d_crit", | 356 | "temp%d_input", "temp%d_crit", |
456 | "temp%d_max_alarm", "temp%d_max", | 357 | "temp%d_max" }; |
457 | "temp%d_max_hyst" }; | ||
458 | 358 | ||
459 | for (i = 0; i < tdata->attr_size; i++) { | 359 | for (i = 0; i < tdata->attr_size; i++) { |
460 | snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], | 360 | snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], |
@@ -462,10 +362,6 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev, | |||
462 | sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); | 362 | sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); |
463 | tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; | 363 | tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; |
464 | tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; | 364 | tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; |
465 | if (rw_ptr[i]) { | ||
466 | tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR; | ||
467 | tdata->sd_attrs[i].dev_attr.store = rw_ptr[i]; | ||
468 | } | ||
469 | tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; | 365 | tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; |
470 | tdata->sd_attrs[i].index = attr_no; | 366 | tdata->sd_attrs[i].index = attr_no; |
471 | err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); | 367 | err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); |
@@ -481,9 +377,9 @@ exit_free: | |||
481 | } | 377 | } |
482 | 378 | ||
483 | 379 | ||
484 | static int __devinit chk_ucode_version(struct platform_device *pdev) | 380 | static int __cpuinit chk_ucode_version(unsigned int cpu) |
485 | { | 381 | { |
486 | struct cpuinfo_x86 *c = &cpu_data(pdev->id); | 382 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
487 | int err; | 383 | int err; |
488 | u32 edx; | 384 | u32 edx; |
489 | 385 | ||
@@ -494,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev) | |||
494 | */ | 390 | */ |
495 | if (c->x86_model == 0xe && c->x86_mask < 0xc) { | 391 | if (c->x86_model == 0xe && c->x86_mask < 0xc) { |
496 | /* check for microcode update */ | 392 | /* check for microcode update */ |
497 | err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, | 393 | err = smp_call_function_single(cpu, get_ucode_rev_on_cpu, |
498 | &edx, 1); | 394 | &edx, 1); |
499 | if (err) { | 395 | if (err) { |
500 | dev_err(&pdev->dev, | 396 | pr_err("Cannot determine microcode revision of " |
501 | "Cannot determine microcode revision of " | 397 | "CPU#%u (%d)!\n", cpu, err); |
502 | "CPU#%u (%d)!\n", pdev->id, err); | ||
503 | return -ENODEV; | 398 | return -ENODEV; |
504 | } else if (edx < 0x39) { | 399 | } else if (edx < 0x39) { |
505 | dev_err(&pdev->dev, | 400 | pr_err("Errata AE18 not fixed, update BIOS or " |
506 | "Errata AE18 not fixed, update BIOS or " | 401 | "microcode of the CPU!\n"); |
507 | "microcode of the CPU!\n"); | ||
508 | return -ENODEV; | 402 | return -ENODEV; |
509 | } | 403 | } |
510 | } | 404 | } |
@@ -538,8 +432,6 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) | |||
538 | 432 | ||
539 | tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : | 433 | tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : |
540 | MSR_IA32_THERM_STATUS; | 434 | MSR_IA32_THERM_STATUS; |
541 | tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT : | ||
542 | MSR_IA32_THERM_INTERRUPT; | ||
543 | tdata->is_pkg_data = pkg_flag; | 435 | tdata->is_pkg_data = pkg_flag; |
544 | tdata->cpu = cpu; | 436 | tdata->cpu = cpu; |
545 | tdata->cpu_core_id = TO_CORE_ID(cpu); | 437 | tdata->cpu_core_id = TO_CORE_ID(cpu); |
@@ -548,11 +440,11 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) | |||
548 | return tdata; | 440 | return tdata; |
549 | } | 441 | } |
550 | 442 | ||
551 | static int create_core_data(struct platform_data *pdata, | 443 | static int create_core_data(struct platform_device *pdev, |
552 | struct platform_device *pdev, | ||
553 | unsigned int cpu, int pkg_flag) | 444 | unsigned int cpu, int pkg_flag) |
554 | { | 445 | { |
555 | struct temp_data *tdata; | 446 | struct temp_data *tdata; |
447 | struct platform_data *pdata = platform_get_drvdata(pdev); | ||
556 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 448 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
557 | u32 eax, edx; | 449 | u32 eax, edx; |
558 | int err, attr_no; | 450 | int err, attr_no; |
@@ -588,25 +480,21 @@ static int create_core_data(struct platform_data *pdata, | |||
588 | goto exit_free; | 480 | goto exit_free; |
589 | 481 | ||
590 | /* We can access status register. Get Critical Temperature */ | 482 | /* We can access status register. Get Critical Temperature */ |
591 | if (pkg_flag) | 483 | tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); |
592 | tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev); | ||
593 | else | ||
594 | tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); | ||
595 | 484 | ||
596 | /* | 485 | /* |
597 | * Test if we can access the intrpt register. If so, increase the | 486 | * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET. |
598 | * 'size' enough to have ttarget/tmin/max_alarm interfaces. | 487 | * The target temperature is available on older CPUs but not in this |
599 | * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT | 488 | * register. Atoms don't have the register at all. |
600 | */ | 489 | */ |
601 | err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); | 490 | if (c->x86_model > 0xe && c->x86_model != 0x1c) { |
602 | if (!err) { | 491 | err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, |
603 | tdata->attr_size += MAX_THRESH_ATTRS; | 492 | &eax, &edx); |
604 | tdata->tmin = tdata->tjmax - | 493 | if (!err) { |
605 | ((eax & THERM_MASK_THRESHOLD0) >> | 494 | tdata->ttarget |
606 | THERM_SHIFT_THRESHOLD0) * 1000; | 495 | = tdata->tjmax - ((eax >> 8) & 0xff) * 1000; |
607 | tdata->ttarget = tdata->tjmax - | 496 | tdata->attr_size++; |
608 | ((eax & THERM_MASK_THRESHOLD1) >> | 497 | } |
609 | THERM_SHIFT_THRESHOLD1) * 1000; | ||
610 | } | 498 | } |
611 | 499 | ||
612 | pdata->core_data[attr_no] = tdata; | 500 | pdata->core_data[attr_no] = tdata; |
@@ -618,22 +506,20 @@ static int create_core_data(struct platform_data *pdata, | |||
618 | 506 | ||
619 | return 0; | 507 | return 0; |
620 | exit_free: | 508 | exit_free: |
509 | pdata->core_data[attr_no] = NULL; | ||
621 | kfree(tdata); | 510 | kfree(tdata); |
622 | return err; | 511 | return err; |
623 | } | 512 | } |
624 | 513 | ||
625 | static void coretemp_add_core(unsigned int cpu, int pkg_flag) | 514 | static void coretemp_add_core(unsigned int cpu, int pkg_flag) |
626 | { | 515 | { |
627 | struct platform_data *pdata; | ||
628 | struct platform_device *pdev = coretemp_get_pdev(cpu); | 516 | struct platform_device *pdev = coretemp_get_pdev(cpu); |
629 | int err; | 517 | int err; |
630 | 518 | ||
631 | if (!pdev) | 519 | if (!pdev) |
632 | return; | 520 | return; |
633 | 521 | ||
634 | pdata = platform_get_drvdata(pdev); | 522 | err = create_core_data(pdev, cpu, pkg_flag); |
635 | |||
636 | err = create_core_data(pdata, pdev, cpu, pkg_flag); | ||
637 | if (err) | 523 | if (err) |
638 | dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); | 524 | dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); |
639 | } | 525 | } |
@@ -657,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev) | |||
657 | struct platform_data *pdata; | 543 | struct platform_data *pdata; |
658 | int err; | 544 | int err; |
659 | 545 | ||
660 | /* Check the microcode version of the CPU */ | ||
661 | err = chk_ucode_version(pdev); | ||
662 | if (err) | ||
663 | return err; | ||
664 | |||
665 | /* Initialize the per-package data structures */ | 546 | /* Initialize the per-package data structures */ |
666 | pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); | 547 | pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); |
667 | if (!pdata) | 548 | if (!pdata) |
@@ -671,7 +552,7 @@ static int __devinit coretemp_probe(struct platform_device *pdev) | |||
671 | if (err) | 552 | if (err) |
672 | goto exit_free; | 553 | goto exit_free; |
673 | 554 | ||
674 | pdata->phys_proc_id = TO_PHYS_ID(pdev->id); | 555 | pdata->phys_proc_id = pdev->id; |
675 | platform_set_drvdata(pdev, pdata); | 556 | platform_set_drvdata(pdev, pdata); |
676 | 557 | ||
677 | pdata->hwmon_dev = hwmon_device_register(&pdev->dev); | 558 | pdata->hwmon_dev = hwmon_device_register(&pdev->dev); |
@@ -723,7 +604,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) | |||
723 | 604 | ||
724 | mutex_lock(&pdev_list_mutex); | 605 | mutex_lock(&pdev_list_mutex); |
725 | 606 | ||
726 | pdev = platform_device_alloc(DRVNAME, cpu); | 607 | pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu)); |
727 | if (!pdev) { | 608 | if (!pdev) { |
728 | err = -ENOMEM; | 609 | err = -ENOMEM; |
729 | pr_err("Device allocation failed\n"); | 610 | pr_err("Device allocation failed\n"); |
@@ -743,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) | |||
743 | } | 624 | } |
744 | 625 | ||
745 | pdev_entry->pdev = pdev; | 626 | pdev_entry->pdev = pdev; |
746 | pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); | 627 | pdev_entry->phys_proc_id = pdev->id; |
747 | 628 | ||
748 | list_add_tail(&pdev_entry->list, &pdev_list); | 629 | list_add_tail(&pdev_entry->list, &pdev_list); |
749 | mutex_unlock(&pdev_list_mutex); | 630 | mutex_unlock(&pdev_list_mutex); |
@@ -804,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu) | |||
804 | return; | 685 | return; |
805 | 686 | ||
806 | if (!pdev) { | 687 | if (!pdev) { |
688 | /* Check the microcode version of the CPU */ | ||
689 | if (chk_ucode_version(cpu)) | ||
690 | return; | ||
691 | |||
807 | /* | 692 | /* |
808 | * Alright, we have DTS support. | 693 | * Alright, we have DTS support. |
809 | * We are bringing the _first_ core in this pkg | 694 | * We are bringing the _first_ core in this pkg |
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c index 257957c69d92..4f7c3fc40a89 100644 --- a/drivers/hwmon/ds620.c +++ b/drivers/hwmon/ds620.c | |||
@@ -72,7 +72,7 @@ struct ds620_data { | |||
72 | char valid; /* !=0 if following fields are valid */ | 72 | char valid; /* !=0 if following fields are valid */ |
73 | unsigned long last_updated; /* In jiffies */ | 73 | unsigned long last_updated; /* In jiffies */ |
74 | 74 | ||
75 | u16 temp[3]; /* Register values, word */ | 75 | s16 temp[3]; /* Register values, word */ |
76 | }; | 76 | }; |
77 | 77 | ||
78 | /* | 78 | /* |
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index 17cf1ab95521..8c2844e5691c 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c | |||
@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client, | |||
329 | struct i2c_board_info *info); | 329 | struct i2c_board_info *info); |
330 | static int w83791d_remove(struct i2c_client *client); | 330 | static int w83791d_remove(struct i2c_client *client); |
331 | 331 | ||
332 | static int w83791d_read(struct i2c_client *client, u8 register); | 332 | static int w83791d_read(struct i2c_client *client, u8 reg); |
333 | static int w83791d_write(struct i2c_client *client, u8 register, u8 value); | 333 | static int w83791d_write(struct i2c_client *client, u8 reg, u8 value); |
334 | static struct w83791d_data *w83791d_update_device(struct device *dev); | 334 | static struct w83791d_data *w83791d_update_device(struct device *dev); |
335 | 335 | ||
336 | #ifdef DEBUG | 336 | #ifdef DEBUG |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 274798068a54..16f69be820c7 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq) | |||
435 | if (!(rq->cmd_flags & REQ_FLUSH)) | 435 | if (!(rq->cmd_flags & REQ_FLUSH)) |
436 | return BLKPREP_OK; | 436 | return BLKPREP_OK; |
437 | 437 | ||
438 | cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); | 438 | if (rq->special) { |
439 | cmd = rq->special; | ||
440 | memset(cmd, 0, sizeof(*cmd)); | ||
441 | } else { | ||
442 | cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); | ||
443 | } | ||
439 | 444 | ||
440 | /* FIXME: map struct ide_taskfile on rq->cmd[] */ | 445 | /* FIXME: map struct ide_taskfile on rq->cmd[] */ |
441 | BUG_ON(cmd == NULL); | 446 | BUG_ON(cmd == NULL); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 17bf9d95463c..6cd642aaa4de 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -287,7 +287,7 @@ void __free_ep(struct kref *kref) | |||
287 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { | 287 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { |
288 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); | 288 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); |
289 | dst_release(ep->dst); | 289 | dst_release(ep->dst); |
290 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 290 | l2t_release(ep->com.tdev, ep->l2t); |
291 | } | 291 | } |
292 | kfree(ep); | 292 | kfree(ep); |
293 | } | 293 | } |
@@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1178 | release_tid(ep->com.tdev, GET_TID(rpl), NULL); | 1178 | release_tid(ep->com.tdev, GET_TID(rpl), NULL); |
1179 | cxgb3_free_atid(ep->com.tdev, ep->atid); | 1179 | cxgb3_free_atid(ep->com.tdev, ep->atid); |
1180 | dst_release(ep->dst); | 1180 | dst_release(ep->dst); |
1181 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 1181 | l2t_release(ep->com.tdev, ep->l2t); |
1182 | put_ep(&ep->com); | 1182 | put_ep(&ep->com); |
1183 | return CPL_RET_BUF_DONE; | 1183 | return CPL_RET_BUF_DONE; |
1184 | } | 1184 | } |
@@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1377 | if (!child_ep) { | 1377 | if (!child_ep) { |
1378 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", | 1378 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", |
1379 | __func__); | 1379 | __func__); |
1380 | l2t_release(L2DATA(tdev), l2t); | 1380 | l2t_release(tdev, l2t); |
1381 | dst_release(dst); | 1381 | dst_release(dst); |
1382 | goto reject; | 1382 | goto reject; |
1383 | } | 1383 | } |
@@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1956 | if (!err) | 1956 | if (!err) |
1957 | goto out; | 1957 | goto out; |
1958 | 1958 | ||
1959 | l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); | 1959 | l2t_release(h->rdev.t3cdev_p, ep->l2t); |
1960 | fail4: | 1960 | fail4: |
1961 | dst_release(ep->dst); | 1961 | dst_release(ep->dst); |
1962 | fail3: | 1962 | fail3: |
@@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, | |||
2127 | PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, | 2127 | PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, |
2128 | l2t); | 2128 | l2t); |
2129 | dst_hold(new); | 2129 | dst_hold(new); |
2130 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 2130 | l2t_release(ep->com.tdev, ep->l2t); |
2131 | ep->l2t = l2t; | 2131 | ep->l2t = l2t; |
2132 | dst_release(old); | 2132 | dst_release(old); |
2133 | ep->dst = new; | 2133 | ep->dst = new; |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 0dc97ec15c28..9dea71849f40 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
1124 | for (i = 0; i < 8; i++) | 1124 | for (i = 0; i < 8; i++) |
1125 | __set_bit(BTN_0 + i, input_dev->keybit); | 1125 | __set_bit(BTN_0 + i, input_dev->keybit); |
1126 | 1126 | ||
1127 | if (wacom_wac->features.type != WACOM_21UX2) { | 1127 | input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); |
1128 | input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); | 1128 | input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); |
1129 | input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); | ||
1130 | } | ||
1131 | |||
1132 | input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); | 1129 | input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); |
1133 | 1130 | ||
1134 | __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); | 1131 | __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 49da55c1528a..8c2a000cf3f5 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1698 | } | 1698 | } |
1699 | 1699 | ||
1700 | ti->num_flush_requests = 1; | 1700 | ti->num_flush_requests = 1; |
1701 | ti->discard_zeroes_data_unsupported = 1; | ||
1702 | |||
1701 | return 0; | 1703 | return 0; |
1702 | 1704 | ||
1703 | bad: | 1705 | bad: |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 89f73ca22cfa..f84c08029b21 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, | |||
81 | * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> | 81 | * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> |
82 | */ | 82 | */ |
83 | if (!strcasecmp(arg_name, "corrupt_bio_byte")) { | 83 | if (!strcasecmp(arg_name, "corrupt_bio_byte")) { |
84 | if (!argc) | 84 | if (!argc) { |
85 | ti->error = "Feature corrupt_bio_byte requires parameters"; | 85 | ti->error = "Feature corrupt_bio_byte requires parameters"; |
86 | return -EINVAL; | ||
87 | } | ||
86 | 88 | ||
87 | r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); | 89 | r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); |
88 | if (r) | 90 | if (r) |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index a002dd85db1e..86df8b2cf927 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
449 | rs->ti->error = "write_mostly option is only valid for RAID1"; | 449 | rs->ti->error = "write_mostly option is only valid for RAID1"; |
450 | return -EINVAL; | 450 | return -EINVAL; |
451 | } | 451 | } |
452 | if (value > rs->md.raid_disks) { | 452 | if (value >= rs->md.raid_disks) { |
453 | rs->ti->error = "Invalid write_mostly drive index given"; | 453 | rs->ti->error = "Invalid write_mostly drive index given"; |
454 | return -EINVAL; | 454 | return -EINVAL; |
455 | } | 455 | } |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 986b8754bb08..bc04518e9d8b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t) | |||
1238 | return; | 1238 | return; |
1239 | 1239 | ||
1240 | template_disk = dm_table_get_integrity_disk(t, true); | 1240 | template_disk = dm_table_get_integrity_disk(t, true); |
1241 | if (!template_disk && | 1241 | if (template_disk) |
1242 | blk_integrity_is_initialized(dm_disk(t->md))) { | 1242 | blk_integrity_register(dm_disk(t->md), |
1243 | blk_get_integrity(template_disk)); | ||
1244 | else if (blk_integrity_is_initialized(dm_disk(t->md))) | ||
1243 | DMWARN("%s: device no longer has a valid integrity profile", | 1245 | DMWARN("%s: device no longer has a valid integrity profile", |
1244 | dm_device_name(t->md)); | 1246 | dm_device_name(t->md)); |
1245 | return; | 1247 | else |
1246 | } | 1248 | DMWARN("%s: unable to establish an integrity profile", |
1247 | blk_integrity_register(dm_disk(t->md), | 1249 | dm_device_name(t->md)); |
1248 | blk_get_integrity(template_disk)); | ||
1249 | } | 1250 | } |
1250 | 1251 | ||
1251 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, | 1252 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, |
@@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) | |||
1282 | return 0; | 1283 | return 0; |
1283 | } | 1284 | } |
1284 | 1285 | ||
1286 | static bool dm_table_discard_zeroes_data(struct dm_table *t) | ||
1287 | { | ||
1288 | struct dm_target *ti; | ||
1289 | unsigned i = 0; | ||
1290 | |||
1291 | /* Ensure that all targets supports discard_zeroes_data. */ | ||
1292 | while (i < dm_table_get_num_targets(t)) { | ||
1293 | ti = dm_table_get_target(t, i++); | ||
1294 | |||
1295 | if (ti->discard_zeroes_data_unsupported) | ||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | return 1; | ||
1300 | } | ||
1301 | |||
1285 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1302 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1286 | struct queue_limits *limits) | 1303 | struct queue_limits *limits) |
1287 | { | 1304 | { |
@@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1304 | } | 1321 | } |
1305 | blk_queue_flush(q, flush); | 1322 | blk_queue_flush(q, flush); |
1306 | 1323 | ||
1324 | if (!dm_table_discard_zeroes_data(t)) | ||
1325 | q->limits.discard_zeroes_data = 0; | ||
1326 | |||
1307 | dm_table_set_integrity(t); | 1327 | dm_table_set_integrity(t); |
1308 | 1328 | ||
1309 | /* | 1329 | /* |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 5404b2295820..5c95ccb59500 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -61,6 +61,11 @@ | |||
61 | static void autostart_arrays(int part); | 61 | static void autostart_arrays(int part); |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | /* pers_list is a list of registered personalities protected | ||
65 | * by pers_lock. | ||
66 | * pers_lock does extra service to protect accesses to | ||
67 | * mddev->thread when the mutex cannot be held. | ||
68 | */ | ||
64 | static LIST_HEAD(pers_list); | 69 | static LIST_HEAD(pers_list); |
65 | static DEFINE_SPINLOCK(pers_lock); | 70 | static DEFINE_SPINLOCK(pers_lock); |
66 | 71 | ||
@@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev) | |||
739 | } else | 744 | } else |
740 | mutex_unlock(&mddev->reconfig_mutex); | 745 | mutex_unlock(&mddev->reconfig_mutex); |
741 | 746 | ||
747 | /* was we've dropped the mutex we need a spinlock to | ||
748 | * make sur the thread doesn't disappear | ||
749 | */ | ||
750 | spin_lock(&pers_lock); | ||
742 | md_wakeup_thread(mddev->thread); | 751 | md_wakeup_thread(mddev->thread); |
752 | spin_unlock(&pers_lock); | ||
743 | } | 753 | } |
744 | 754 | ||
745 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) | 755 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) |
@@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, | |||
6429 | return thread; | 6439 | return thread; |
6430 | } | 6440 | } |
6431 | 6441 | ||
6432 | void md_unregister_thread(mdk_thread_t *thread) | 6442 | void md_unregister_thread(mdk_thread_t **threadp) |
6433 | { | 6443 | { |
6444 | mdk_thread_t *thread = *threadp; | ||
6434 | if (!thread) | 6445 | if (!thread) |
6435 | return; | 6446 | return; |
6436 | dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); | 6447 | dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); |
6448 | /* Locking ensures that mddev_unlock does not wake_up a | ||
6449 | * non-existent thread | ||
6450 | */ | ||
6451 | spin_lock(&pers_lock); | ||
6452 | *threadp = NULL; | ||
6453 | spin_unlock(&pers_lock); | ||
6437 | 6454 | ||
6438 | kthread_stop(thread->tsk); | 6455 | kthread_stop(thread->tsk); |
6439 | kfree(thread); | 6456 | kfree(thread); |
@@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev) | |||
7340 | mdk_rdev_t *rdev; | 7357 | mdk_rdev_t *rdev; |
7341 | 7358 | ||
7342 | /* resync has finished, collect result */ | 7359 | /* resync has finished, collect result */ |
7343 | md_unregister_thread(mddev->sync_thread); | 7360 | md_unregister_thread(&mddev->sync_thread); |
7344 | mddev->sync_thread = NULL; | ||
7345 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | 7361 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && |
7346 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | 7362 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { |
7347 | /* success...*/ | 7363 | /* success...*/ |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 1e586bb4452e..0a309dc29b45 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p); | |||
560 | extern int unregister_md_personality(struct mdk_personality *p); | 560 | extern int unregister_md_personality(struct mdk_personality *p); |
561 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), | 561 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), |
562 | mddev_t *mddev, const char *name); | 562 | mddev_t *mddev, const char *name); |
563 | extern void md_unregister_thread(mdk_thread_t *thread); | 563 | extern void md_unregister_thread(mdk_thread_t **threadp); |
564 | extern void md_wakeup_thread(mdk_thread_t *thread); | 564 | extern void md_wakeup_thread(mdk_thread_t *thread); |
565 | extern void md_check_recovery(mddev_t *mddev); | 565 | extern void md_check_recovery(mddev_t *mddev); |
566 | extern void md_write_start(mddev_t *mddev, struct bio *bi); | 566 | extern void md_write_start(mddev_t *mddev, struct bio *bi); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 3535c23af288..d5b5fb300171 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev) | |||
514 | { | 514 | { |
515 | multipath_conf_t *conf = mddev->private; | 515 | multipath_conf_t *conf = mddev->private; |
516 | 516 | ||
517 | md_unregister_thread(mddev->thread); | 517 | md_unregister_thread(&mddev->thread); |
518 | mddev->thread = NULL; | ||
519 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 518 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
520 | mempool_destroy(conf->pool); | 519 | mempool_destroy(conf->pool); |
521 | kfree(conf->multipaths); | 520 | kfree(conf->multipaths); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f4622dd8fc59..d9587dffe533 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev) | |||
2562 | raise_barrier(conf); | 2562 | raise_barrier(conf); |
2563 | lower_barrier(conf); | 2563 | lower_barrier(conf); |
2564 | 2564 | ||
2565 | md_unregister_thread(mddev->thread); | 2565 | md_unregister_thread(&mddev->thread); |
2566 | mddev->thread = NULL; | ||
2567 | if (conf->r1bio_pool) | 2566 | if (conf->r1bio_pool) |
2568 | mempool_destroy(conf->r1bio_pool); | 2567 | mempool_destroy(conf->r1bio_pool); |
2569 | kfree(conf->mirrors); | 2568 | kfree(conf->mirrors); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d7a8468ddeab..0cd9672cf9cb 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev) | |||
2955 | return 0; | 2955 | return 0; |
2956 | 2956 | ||
2957 | out_free_conf: | 2957 | out_free_conf: |
2958 | md_unregister_thread(mddev->thread); | 2958 | md_unregister_thread(&mddev->thread); |
2959 | if (conf->r10bio_pool) | 2959 | if (conf->r10bio_pool) |
2960 | mempool_destroy(conf->r10bio_pool); | 2960 | mempool_destroy(conf->r10bio_pool); |
2961 | safe_put_page(conf->tmppage); | 2961 | safe_put_page(conf->tmppage); |
@@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev) | |||
2973 | raise_barrier(conf, 0); | 2973 | raise_barrier(conf, 0); |
2974 | lower_barrier(conf); | 2974 | lower_barrier(conf); |
2975 | 2975 | ||
2976 | md_unregister_thread(mddev->thread); | 2976 | md_unregister_thread(&mddev->thread); |
2977 | mddev->thread = NULL; | ||
2978 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 2977 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
2979 | if (conf->r10bio_pool) | 2978 | if (conf->r10bio_pool) |
2980 | mempool_destroy(conf->r10bio_pool); | 2979 | mempool_destroy(conf->r10bio_pool); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 43709fa6b6df..ac5e8b57e50f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev) | |||
4941 | 4941 | ||
4942 | return 0; | 4942 | return 0; |
4943 | abort: | 4943 | abort: |
4944 | md_unregister_thread(mddev->thread); | 4944 | md_unregister_thread(&mddev->thread); |
4945 | mddev->thread = NULL; | ||
4946 | if (conf) { | 4945 | if (conf) { |
4947 | print_raid5_conf(conf); | 4946 | print_raid5_conf(conf); |
4948 | free_conf(conf); | 4947 | free_conf(conf); |
@@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev) | |||
4956 | { | 4955 | { |
4957 | raid5_conf_t *conf = mddev->private; | 4956 | raid5_conf_t *conf = mddev->private; |
4958 | 4957 | ||
4959 | md_unregister_thread(mddev->thread); | 4958 | md_unregister_thread(&mddev->thread); |
4960 | mddev->thread = NULL; | ||
4961 | if (mddev->queue) | 4959 | if (mddev->queue) |
4962 | mddev->queue->backing_dev_info.congested_fn = NULL; | 4960 | mddev->queue->backing_dev_info.congested_fn = NULL; |
4963 | free_conf(conf); | 4961 | free_conf(conf); |
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c index b5ef36222440..b3a5ecdb33ac 100644 --- a/drivers/media/video/omap/omap_vout.c +++ b/drivers/media/video/omap/omap_vout.c | |||
@@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev) | |||
2194 | "'%s' Display already enabled\n", | 2194 | "'%s' Display already enabled\n", |
2195 | def_display->name); | 2195 | def_display->name); |
2196 | } | 2196 | } |
2197 | /* set the update mode */ | ||
2198 | if (def_display->caps & | ||
2199 | OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { | ||
2200 | if (dssdrv->enable_te) | ||
2201 | dssdrv->enable_te(def_display, 0); | ||
2202 | if (dssdrv->set_update_mode) | ||
2203 | dssdrv->set_update_mode(def_display, | ||
2204 | OMAP_DSS_UPDATE_MANUAL); | ||
2205 | } else { | ||
2206 | if (dssdrv->set_update_mode) | ||
2207 | dssdrv->set_update_mode(def_display, | ||
2208 | OMAP_DSS_UPDATE_AUTO); | ||
2209 | } | ||
2210 | } | 2197 | } |
2211 | } | 2198 | } |
2212 | 2199 | ||
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c index 9d3459de04b2..80796eb0c53e 100644 --- a/drivers/media/video/omap3isp/ispccdc.c +++ b/drivers/media/video/omap3isp/ispccdc.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/slab.h> | ||
34 | #include <media/v4l2-event.h> | 35 | #include <media/v4l2-event.h> |
35 | 36 | ||
36 | #include "isp.h" | 37 | #include "isp.h" |
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index d29f9c2d0854..e4100b1f68df 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c | |||
@@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset) | |||
1961 | 1961 | ||
1962 | list_for_each_entry(stream, &dev->streams, list) { | 1962 | list_for_each_entry(stream, &dev->streams, list) { |
1963 | if (stream->intf == intf) | 1963 | if (stream->intf == intf) |
1964 | return uvc_video_resume(stream); | 1964 | return uvc_video_resume(stream, reset); |
1965 | } | 1965 | } |
1966 | 1966 | ||
1967 | uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " | 1967 | uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " |
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c index 48fea373c25a..29e239911d0e 100644 --- a/drivers/media/video/uvc/uvc_entity.c +++ b/drivers/media/video/uvc/uvc_entity.c | |||
@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain, | |||
49 | if (remote == NULL) | 49 | if (remote == NULL) |
50 | return -EINVAL; | 50 | return -EINVAL; |
51 | 51 | ||
52 | source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) | 52 | source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) |
53 | ? (remote->vdev ? &remote->vdev->entity : NULL) | 53 | ? (remote->vdev ? &remote->vdev->entity : NULL) |
54 | : &remote->subdev.entity; | 54 | : &remote->subdev.entity; |
55 | if (source == NULL) | 55 | if (source == NULL) |
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 8244167c8915..ffd1158628b6 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c | |||
@@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream) | |||
1104 | * buffers, making sure userspace applications are notified of the problem | 1104 | * buffers, making sure userspace applications are notified of the problem |
1105 | * instead of waiting forever. | 1105 | * instead of waiting forever. |
1106 | */ | 1106 | */ |
1107 | int uvc_video_resume(struct uvc_streaming *stream) | 1107 | int uvc_video_resume(struct uvc_streaming *stream, int reset) |
1108 | { | 1108 | { |
1109 | int ret; | 1109 | int ret; |
1110 | 1110 | ||
1111 | /* If the bus has been reset on resume, set the alternate setting to 0. | ||
1112 | * This should be the default value, but some devices crash or otherwise | ||
1113 | * misbehave if they don't receive a SET_INTERFACE request before any | ||
1114 | * other video control request. | ||
1115 | */ | ||
1116 | if (reset) | ||
1117 | usb_set_interface(stream->dev->udev, stream->intfnum, 0); | ||
1118 | |||
1111 | stream->frozen = 0; | 1119 | stream->frozen = 0; |
1112 | 1120 | ||
1113 | ret = uvc_commit_video(stream, &stream->ctrl); | 1121 | ret = uvc_commit_video(stream, &stream->ctrl); |
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index df32a43ca86a..cbdd49bf8b67 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h | |||
@@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity); | |||
638 | /* Video */ | 638 | /* Video */ |
639 | extern int uvc_video_init(struct uvc_streaming *stream); | 639 | extern int uvc_video_init(struct uvc_streaming *stream); |
640 | extern int uvc_video_suspend(struct uvc_streaming *stream); | 640 | extern int uvc_video_suspend(struct uvc_streaming *stream); |
641 | extern int uvc_video_resume(struct uvc_streaming *stream); | 641 | extern int uvc_video_resume(struct uvc_streaming *stream, int reset); |
642 | extern int uvc_video_enable(struct uvc_streaming *stream, int enable); | 642 | extern int uvc_video_enable(struct uvc_streaming *stream, int enable); |
643 | extern int uvc_probe_video(struct uvc_streaming *stream, | 643 | extern int uvc_probe_video(struct uvc_streaming *stream, |
644 | struct uvc_streaming_control *probe); | 644 | struct uvc_streaming_control *probe); |
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 06f14008b346..d72156517726 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c | |||
@@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd) | |||
173 | media_device_unregister_entity(&vdev->entity); | 173 | media_device_unregister_entity(&vdev->entity); |
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | /* Do not call v4l2_device_put if there is no release callback set. | ||
177 | * Drivers that have no v4l2_device release callback might free the | ||
178 | * v4l2_dev instance in the video_device release callback below, so we | ||
179 | * must perform this check here. | ||
180 | * | ||
181 | * TODO: In the long run all drivers that use v4l2_device should use the | ||
182 | * v4l2_device release callback. This check will then be unnecessary. | ||
183 | */ | ||
184 | if (v4l2_dev->release == NULL) | ||
185 | v4l2_dev = NULL; | ||
186 | |||
176 | /* Release video_device and perform other | 187 | /* Release video_device and perform other |
177 | cleanups as needed. */ | 188 | cleanups as needed. */ |
178 | vdev->release(vdev); | 189 | vdev->release(vdev); |
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index c72856c41434..e6a2c3b302d4 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c | |||
@@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) | |||
38 | mutex_init(&v4l2_dev->ioctl_lock); | 38 | mutex_init(&v4l2_dev->ioctl_lock); |
39 | v4l2_prio_init(&v4l2_dev->prio); | 39 | v4l2_prio_init(&v4l2_dev->prio); |
40 | kref_init(&v4l2_dev->ref); | 40 | kref_init(&v4l2_dev->ref); |
41 | get_device(dev); | ||
41 | v4l2_dev->dev = dev; | 42 | v4l2_dev->dev = dev; |
42 | if (dev == NULL) { | 43 | if (dev == NULL) { |
43 | /* If dev == NULL, then name must be filled in by the caller */ | 44 | /* If dev == NULL, then name must be filled in by the caller */ |
@@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) | |||
93 | 94 | ||
94 | if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) | 95 | if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) |
95 | dev_set_drvdata(v4l2_dev->dev, NULL); | 96 | dev_set_drvdata(v4l2_dev->dev, NULL); |
97 | put_device(v4l2_dev->dev); | ||
96 | v4l2_dev->dev = NULL; | 98 | v4l2_dev->dev = NULL; |
97 | } | 99 | } |
98 | EXPORT_SYMBOL_GPL(v4l2_device_disconnect); | 100 | EXPORT_SYMBOL_GPL(v4l2_device_disconnect); |
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 21131c7b0f1e..563654c9b19e 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c | |||
@@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev) | |||
273 | ct->regs.ack = JZ_REG_ADC_STATUS; | 273 | ct->regs.ack = JZ_REG_ADC_STATUS; |
274 | ct->chip.irq_mask = irq_gc_mask_set_bit; | 274 | ct->chip.irq_mask = irq_gc_mask_set_bit; |
275 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; | 275 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; |
276 | ct->chip.irq_ack = irq_gc_ack; | 276 | ct->chip.irq_ack = irq_gc_ack_set_bit; |
277 | 277 | ||
278 | irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); | 278 | irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); |
279 | 279 | ||
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index b928bc14e97b..8b51cd62d067 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c | |||
@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3) | |||
375 | * both have been read. So the value read will always be correct. | 375 | * both have been read. So the value read will always be correct. |
376 | * Set BOOT bit to refresh factory tuning values. | 376 | * Set BOOT bit to refresh factory tuning values. |
377 | */ | 377 | */ |
378 | lis3->read(lis3, CTRL_REG2, ®); | 378 | if (lis3->pdata) { |
379 | if (lis3->whoami == WAI_12B) | 379 | lis3->read(lis3, CTRL_REG2, ®); |
380 | reg |= CTRL2_BDU | CTRL2_BOOT; | 380 | if (lis3->whoami == WAI_12B) |
381 | else | 381 | reg |= CTRL2_BDU | CTRL2_BOOT; |
382 | reg |= CTRL2_BOOT_8B; | 382 | else |
383 | lis3->write(lis3, CTRL_REG2, reg); | 383 | reg |= CTRL2_BOOT_8B; |
384 | lis3->write(lis3, CTRL_REG2, reg); | ||
385 | } | ||
384 | 386 | ||
385 | /* LIS3 power on delay is quite long */ | 387 | /* LIS3 power on delay is quite long */ |
386 | msleep(lis3->pwron_delay / lis3lv02d_get_odr()); | 388 | msleep(lis3->pwron_delay / lis3lv02d_get_odr()); |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a047eb973e3b..47b928ed08f8 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2168 | } | 2168 | } |
2169 | 2169 | ||
2170 | re_arm: | 2170 | re_arm: |
2171 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | 2171 | if (!bond->kill_timers) |
2172 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | ||
2172 | out: | 2173 | out: |
2173 | read_unlock(&bond->lock); | 2174 | read_unlock(&bond->lock); |
2174 | } | 2175 | } |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 7f8b20a34ee3..d4fbd2e62616 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work) | |||
1440 | } | 1440 | } |
1441 | 1441 | ||
1442 | re_arm: | 1442 | re_arm: |
1443 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); | 1443 | if (!bond->kill_timers) |
1444 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); | ||
1444 | out: | 1445 | out: |
1445 | read_unlock(&bond->lock); | 1446 | read_unlock(&bond->lock); |
1446 | } | 1447 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1dcb07ce5263..6191e6337284 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -774,6 +774,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) | |||
774 | 774 | ||
775 | read_lock(&bond->lock); | 775 | read_lock(&bond->lock); |
776 | 776 | ||
777 | if (bond->kill_timers) | ||
778 | goto out; | ||
779 | |||
777 | /* rejoin all groups on bond device */ | 780 | /* rejoin all groups on bond device */ |
778 | __bond_resend_igmp_join_requests(bond->dev); | 781 | __bond_resend_igmp_join_requests(bond->dev); |
779 | 782 | ||
@@ -787,9 +790,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) | |||
787 | __bond_resend_igmp_join_requests(vlan_dev); | 790 | __bond_resend_igmp_join_requests(vlan_dev); |
788 | } | 791 | } |
789 | 792 | ||
790 | if (--bond->igmp_retrans > 0) | 793 | if ((--bond->igmp_retrans > 0) && !bond->kill_timers) |
791 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); | 794 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); |
792 | 795 | out: | |
793 | read_unlock(&bond->lock); | 796 | read_unlock(&bond->lock); |
794 | } | 797 | } |
795 | 798 | ||
@@ -2535,7 +2538,7 @@ void bond_mii_monitor(struct work_struct *work) | |||
2535 | } | 2538 | } |
2536 | 2539 | ||
2537 | re_arm: | 2540 | re_arm: |
2538 | if (bond->params.miimon) | 2541 | if (bond->params.miimon && !bond->kill_timers) |
2539 | queue_delayed_work(bond->wq, &bond->mii_work, | 2542 | queue_delayed_work(bond->wq, &bond->mii_work, |
2540 | msecs_to_jiffies(bond->params.miimon)); | 2543 | msecs_to_jiffies(bond->params.miimon)); |
2541 | out: | 2544 | out: |
@@ -2883,7 +2886,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2883 | } | 2886 | } |
2884 | 2887 | ||
2885 | re_arm: | 2888 | re_arm: |
2886 | if (bond->params.arp_interval) | 2889 | if (bond->params.arp_interval && !bond->kill_timers) |
2887 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 2890 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
2888 | out: | 2891 | out: |
2889 | read_unlock(&bond->lock); | 2892 | read_unlock(&bond->lock); |
@@ -3151,7 +3154,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
3151 | bond_ab_arp_probe(bond); | 3154 | bond_ab_arp_probe(bond); |
3152 | 3155 | ||
3153 | re_arm: | 3156 | re_arm: |
3154 | if (bond->params.arp_interval) | 3157 | if (bond->params.arp_interval && !bond->kill_timers) |
3155 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 3158 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
3156 | out: | 3159 | out: |
3157 | read_unlock(&bond->lock); | 3160 | read_unlock(&bond->lock); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 0b9bd551580b..51bd7485ab18 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | |||
@@ -2123,6 +2123,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) | |||
2123 | break; | 2123 | break; |
2124 | case DCB_CAP_ATTR_DCBX: | 2124 | case DCB_CAP_ATTR_DCBX: |
2125 | *cap = BNX2X_DCBX_CAPS; | 2125 | *cap = BNX2X_DCBX_CAPS; |
2126 | break; | ||
2126 | default: | 2127 | default: |
2127 | rval = -EINVAL; | 2128 | rval = -EINVAL; |
2128 | break; | 2129 | break; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 28bde1610ffb..6486ab8c8fc8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -4937,7 +4937,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4937 | int igu_seg_id; | 4937 | int igu_seg_id; |
4938 | int port = BP_PORT(bp); | 4938 | int port = BP_PORT(bp); |
4939 | int func = BP_FUNC(bp); | 4939 | int func = BP_FUNC(bp); |
4940 | int reg_offset; | 4940 | int reg_offset, reg_offset_en5; |
4941 | u64 section; | 4941 | u64 section; |
4942 | int index; | 4942 | int index; |
4943 | struct hc_sp_status_block_data sp_sb_data; | 4943 | struct hc_sp_status_block_data sp_sb_data; |
@@ -4960,6 +4960,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4960 | 4960 | ||
4961 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 4961 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
4962 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 4962 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
4963 | reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : | ||
4964 | MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); | ||
4963 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { | 4965 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
4964 | int sindex; | 4966 | int sindex; |
4965 | /* take care of sig[0]..sig[4] */ | 4967 | /* take care of sig[0]..sig[4] */ |
@@ -4974,7 +4976,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4974 | * and not 16 between the different groups | 4976 | * and not 16 between the different groups |
4975 | */ | 4977 | */ |
4976 | bp->attn_group[index].sig[4] = REG_RD(bp, | 4978 | bp->attn_group[index].sig[4] = REG_RD(bp, |
4977 | reg_offset + 0x10 + 0x4*index); | 4979 | reg_offset_en5 + 0x4*index); |
4978 | else | 4980 | else |
4979 | bp->attn_group[index].sig[4] = 0; | 4981 | bp->attn_group[index].sig[4] = 0; |
4980 | } | 4982 | } |
@@ -7619,8 +7621,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7619 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 7621 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
7620 | u8 *mac_addr = bp->dev->dev_addr; | 7622 | u8 *mac_addr = bp->dev->dev_addr; |
7621 | u32 val; | 7623 | u32 val; |
7624 | u16 pmc; | ||
7625 | |||
7622 | /* The mac address is written to entries 1-4 to | 7626 | /* The mac address is written to entries 1-4 to |
7623 | preserve entry 0 which is used by the PMF */ | 7627 | * preserve entry 0 which is used by the PMF |
7628 | */ | ||
7624 | u8 entry = (BP_VN(bp) + 1)*8; | 7629 | u8 entry = (BP_VN(bp) + 1)*8; |
7625 | 7630 | ||
7626 | val = (mac_addr[0] << 8) | mac_addr[1]; | 7631 | val = (mac_addr[0] << 8) | mac_addr[1]; |
@@ -7630,6 +7635,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7630 | (mac_addr[4] << 8) | mac_addr[5]; | 7635 | (mac_addr[4] << 8) | mac_addr[5]; |
7631 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); | 7636 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); |
7632 | 7637 | ||
7638 | /* Enable the PME and clear the status */ | ||
7639 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); | ||
7640 | pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; | ||
7641 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); | ||
7642 | |||
7633 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | 7643 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
7634 | 7644 | ||
7635 | } else | 7645 | } else |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 750e8445dac4..fc7bd0f23c0b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | |||
@@ -1384,6 +1384,18 @@ | |||
1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ | 1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ |
1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 | 1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 |
1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 | 1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 |
1387 | /* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped | ||
1388 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1389 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1390 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1391 | * parity; [31-10] Reserved; */ | ||
1392 | #define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 | ||
1393 | /* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped | ||
1394 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1395 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1396 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1397 | * parity; [31-10] Reserved; */ | ||
1398 | #define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 | ||
1387 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu | 1399 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu |
1388 | 128 bit vector */ | 1400 | 128 bit vector */ |
1389 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 | 1401 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 805076c54f1b..da5a5d9b8aff 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c | |||
@@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) | |||
1146 | if (te && te->ctx && te->client && te->client->redirect) { | 1146 | if (te && te->ctx && te->client && te->client->redirect) { |
1147 | update_tcb = te->client->redirect(te->ctx, old, new, e); | 1147 | update_tcb = te->client->redirect(te->ctx, old, new, e); |
1148 | if (update_tcb) { | 1148 | if (update_tcb) { |
1149 | rcu_read_lock(); | ||
1149 | l2t_hold(L2DATA(tdev), e); | 1150 | l2t_hold(L2DATA(tdev), e); |
1151 | rcu_read_unlock(); | ||
1150 | set_l2t_ix(tdev, tid, e); | 1152 | set_l2t_ix(tdev, tid, e); |
1151 | } | 1153 | } |
1152 | } | 1154 | } |
1153 | } | 1155 | } |
1154 | l2t_release(L2DATA(tdev), e); | 1156 | l2t_release(tdev, e); |
1155 | } | 1157 | } |
1156 | 1158 | ||
1157 | /* | 1159 | /* |
@@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter) | |||
1264 | goto out_free; | 1266 | goto out_free; |
1265 | 1267 | ||
1266 | err = -ENOMEM; | 1268 | err = -ENOMEM; |
1267 | L2DATA(dev) = t3_init_l2t(l2t_capacity); | 1269 | RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); |
1268 | if (!L2DATA(dev)) | 1270 | if (!L2DATA(dev)) |
1269 | goto out_free; | 1271 | goto out_free; |
1270 | 1272 | ||
@@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter) | |||
1298 | 1300 | ||
1299 | out_free_l2t: | 1301 | out_free_l2t: |
1300 | t3_free_l2t(L2DATA(dev)); | 1302 | t3_free_l2t(L2DATA(dev)); |
1301 | L2DATA(dev) = NULL; | 1303 | rcu_assign_pointer(dev->l2opt, NULL); |
1302 | out_free: | 1304 | out_free: |
1303 | kfree(t); | 1305 | kfree(t); |
1304 | return err; | 1306 | return err; |
1305 | } | 1307 | } |
1306 | 1308 | ||
1309 | static void clean_l2_data(struct rcu_head *head) | ||
1310 | { | ||
1311 | struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); | ||
1312 | t3_free_l2t(d); | ||
1313 | } | ||
1314 | |||
1315 | |||
1307 | void cxgb3_offload_deactivate(struct adapter *adapter) | 1316 | void cxgb3_offload_deactivate(struct adapter *adapter) |
1308 | { | 1317 | { |
1309 | struct t3cdev *tdev = &adapter->tdev; | 1318 | struct t3cdev *tdev = &adapter->tdev; |
1310 | struct t3c_data *t = T3C_DATA(tdev); | 1319 | struct t3c_data *t = T3C_DATA(tdev); |
1320 | struct l2t_data *d; | ||
1311 | 1321 | ||
1312 | remove_adapter(adapter); | 1322 | remove_adapter(adapter); |
1313 | if (list_empty(&adapter_list)) | 1323 | if (list_empty(&adapter_list)) |
@@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter) | |||
1315 | 1325 | ||
1316 | free_tid_maps(&t->tid_maps); | 1326 | free_tid_maps(&t->tid_maps); |
1317 | T3C_DATA(tdev) = NULL; | 1327 | T3C_DATA(tdev) = NULL; |
1318 | t3_free_l2t(L2DATA(tdev)); | 1328 | rcu_read_lock(); |
1319 | L2DATA(tdev) = NULL; | 1329 | d = L2DATA(tdev); |
1330 | rcu_read_unlock(); | ||
1331 | rcu_assign_pointer(tdev->l2opt, NULL); | ||
1332 | call_rcu(&d->rcu_head, clean_l2_data); | ||
1320 | if (t->nofail_skb) | 1333 | if (t->nofail_skb) |
1321 | kfree_skb(t->nofail_skb); | 1334 | kfree_skb(t->nofail_skb); |
1322 | kfree(t); | 1335 | kfree(t); |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index f452c4003253..41540978a173 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c | |||
@@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) | |||
300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | 300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, |
301 | struct net_device *dev) | 301 | struct net_device *dev) |
302 | { | 302 | { |
303 | struct l2t_entry *e; | 303 | struct l2t_entry *e = NULL; |
304 | struct l2t_data *d = L2DATA(cdev); | 304 | struct l2t_data *d; |
305 | int hash; | ||
305 | u32 addr = *(u32 *) neigh->primary_key; | 306 | u32 addr = *(u32 *) neigh->primary_key; |
306 | int ifidx = neigh->dev->ifindex; | 307 | int ifidx = neigh->dev->ifindex; |
307 | int hash = arp_hash(addr, ifidx, d); | ||
308 | struct port_info *p = netdev_priv(dev); | 308 | struct port_info *p = netdev_priv(dev); |
309 | int smt_idx = p->port_id; | 309 | int smt_idx = p->port_id; |
310 | 310 | ||
311 | rcu_read_lock(); | ||
312 | d = L2DATA(cdev); | ||
313 | if (!d) | ||
314 | goto done_rcu; | ||
315 | |||
316 | hash = arp_hash(addr, ifidx, d); | ||
317 | |||
311 | write_lock_bh(&d->lock); | 318 | write_lock_bh(&d->lock); |
312 | for (e = d->l2tab[hash].first; e; e = e->next) | 319 | for (e = d->l2tab[hash].first; e; e = e->next) |
313 | if (e->addr == addr && e->ifindex == ifidx && | 320 | if (e->addr == addr && e->ifindex == ifidx && |
@@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | |||
338 | } | 345 | } |
339 | done: | 346 | done: |
340 | write_unlock_bh(&d->lock); | 347 | write_unlock_bh(&d->lock); |
348 | done_rcu: | ||
349 | rcu_read_unlock(); | ||
341 | return e; | 350 | return e; |
342 | } | 351 | } |
343 | 352 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index 7a12d52ed4fc..c5f54796e2cb 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h | |||
@@ -76,6 +76,7 @@ struct l2t_data { | |||
76 | atomic_t nfree; /* number of free entries */ | 76 | atomic_t nfree; /* number of free entries */ |
77 | rwlock_t lock; | 77 | rwlock_t lock; |
78 | struct l2t_entry l2tab[0]; | 78 | struct l2t_entry l2tab[0]; |
79 | struct rcu_head rcu_head; /* to handle rcu cleanup */ | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | typedef void (*arp_failure_handler_func)(struct t3cdev * dev, | 82 | typedef void (*arp_failure_handler_func)(struct t3cdev * dev, |
@@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb, | |||
99 | /* | 100 | /* |
100 | * Getting to the L2 data from an offload device. | 101 | * Getting to the L2 data from an offload device. |
101 | */ | 102 | */ |
102 | #define L2DATA(dev) ((dev)->l2opt) | 103 | #define L2DATA(cdev) (rcu_dereference((cdev)->l2opt)) |
103 | 104 | ||
104 | #define W_TCB_L2T_IX 0 | 105 | #define W_TCB_L2T_IX 0 |
105 | #define S_TCB_L2T_IX 7 | 106 | #define S_TCB_L2T_IX 7 |
@@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, | |||
126 | return t3_l2t_send_slow(dev, skb, e); | 127 | return t3_l2t_send_slow(dev, skb, e); |
127 | } | 128 | } |
128 | 129 | ||
129 | static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) | 130 | static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e) |
130 | { | 131 | { |
131 | if (atomic_dec_and_test(&e->refcnt)) | 132 | struct l2t_data *d; |
133 | |||
134 | rcu_read_lock(); | ||
135 | d = L2DATA(t); | ||
136 | |||
137 | if (atomic_dec_and_test(&e->refcnt) && d) | ||
132 | t3_l2e_free(d, e); | 138 | t3_l2e_free(d, e); |
139 | |||
140 | rcu_read_unlock(); | ||
133 | } | 141 | } |
134 | 142 | ||
135 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) | 143 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) |
136 | { | 144 | { |
137 | if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ | 145 | if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ |
138 | atomic_dec(&d->nfree); | 146 | atomic_dec(&d->nfree); |
139 | } | 147 | } |
140 | 148 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 40b395f932cf..4c8f42afa3c6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -3715,6 +3715,9 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3715 | setup_debugfs(adapter); | 3715 | setup_debugfs(adapter); |
3716 | } | 3716 | } |
3717 | 3717 | ||
3718 | /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ | ||
3719 | pdev->needs_freset = 1; | ||
3720 | |||
3718 | if (is_offload(adapter)) | 3721 | if (is_offload(adapter)) |
3719 | attach_ulds(adapter); | 3722 | attach_ulds(adapter); |
3720 | 3723 | ||
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 72b84de48756..4da972eaabb4 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
@@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
636 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", | 636 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", |
637 | netdev->irq, rc); | 637 | netdev->irq, rc); |
638 | do { | 638 | do { |
639 | rc = h_free_logical_lan(adapter->vdev->unit_address); | 639 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); |
640 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | 640 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); |
641 | 641 | ||
642 | goto err_out; | 642 | goto err_out; |
643 | } | 643 | } |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 5dc61b4ef3cd..b89f3a684aec 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | |||
@@ -1198,6 +1198,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1198 | iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), | 1198 | iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), |
1199 | &hw->reg->INT_EN); | 1199 | &hw->reg->INT_EN); |
1200 | pch_gbe_stop_receive(adapter); | 1200 | pch_gbe_stop_receive(adapter); |
1201 | int_st |= ioread32(&hw->reg->INT_ST); | ||
1202 | int_st = int_st & ioread32(&hw->reg->INT_EN); | ||
1201 | } | 1203 | } |
1202 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) | 1204 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) |
1203 | adapter->stats.intr_rx_dma_err_count++; | 1205 | adapter->stats.intr_rx_dma_err_count++; |
@@ -1217,14 +1219,11 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1217 | /* Set Pause packet */ | 1219 | /* Set Pause packet */ |
1218 | pch_gbe_mac_set_pause_packet(hw); | 1220 | pch_gbe_mac_set_pause_packet(hw); |
1219 | } | 1221 | } |
1220 | if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) | ||
1221 | == 0) { | ||
1222 | return IRQ_HANDLED; | ||
1223 | } | ||
1224 | } | 1222 | } |
1225 | 1223 | ||
1226 | /* When request status is Receive interruption */ | 1224 | /* When request status is Receive interruption */ |
1227 | if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { | 1225 | if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || |
1226 | (adapter->rx_stop_flag == true)) { | ||
1228 | if (likely(napi_schedule_prep(&adapter->napi))) { | 1227 | if (likely(napi_schedule_prep(&adapter->napi))) { |
1229 | /* Enable only Rx Descriptor empty */ | 1228 | /* Enable only Rx Descriptor empty */ |
1230 | atomic_inc(&adapter->irq_sem); | 1229 | atomic_inc(&adapter->irq_sem); |
@@ -1384,7 +1383,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1384 | struct sk_buff *skb; | 1383 | struct sk_buff *skb; |
1385 | unsigned int i; | 1384 | unsigned int i; |
1386 | unsigned int cleaned_count = 0; | 1385 | unsigned int cleaned_count = 0; |
1387 | bool cleaned = false; | 1386 | bool cleaned = true; |
1388 | 1387 | ||
1389 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); | 1388 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); |
1390 | 1389 | ||
@@ -1395,7 +1394,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1395 | 1394 | ||
1396 | while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { | 1395 | while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { |
1397 | pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); | 1396 | pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); |
1398 | cleaned = true; | ||
1399 | buffer_info = &tx_ring->buffer_info[i]; | 1397 | buffer_info = &tx_ring->buffer_info[i]; |
1400 | skb = buffer_info->skb; | 1398 | skb = buffer_info->skb; |
1401 | 1399 | ||
@@ -1438,8 +1436,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1438 | tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); | 1436 | tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); |
1439 | 1437 | ||
1440 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | 1438 | /* weight of a sort for tx, to avoid endless transmit cleanup */ |
1441 | if (cleaned_count++ == PCH_GBE_TX_WEIGHT) | 1439 | if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { |
1440 | cleaned = false; | ||
1442 | break; | 1441 | break; |
1442 | } | ||
1443 | } | 1443 | } |
1444 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", | 1444 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", |
1445 | cleaned_count); | 1445 | cleaned_count); |
@@ -2167,7 +2167,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2167 | { | 2167 | { |
2168 | struct pch_gbe_adapter *adapter = | 2168 | struct pch_gbe_adapter *adapter = |
2169 | container_of(napi, struct pch_gbe_adapter, napi); | 2169 | container_of(napi, struct pch_gbe_adapter, napi); |
2170 | struct net_device *netdev = adapter->netdev; | ||
2171 | int work_done = 0; | 2170 | int work_done = 0; |
2172 | bool poll_end_flag = false; | 2171 | bool poll_end_flag = false; |
2173 | bool cleaned = false; | 2172 | bool cleaned = false; |
@@ -2175,33 +2174,32 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2175 | 2174 | ||
2176 | pr_debug("budget : %d\n", budget); | 2175 | pr_debug("budget : %d\n", budget); |
2177 | 2176 | ||
2178 | /* Keep link state information with original netdev */ | 2177 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); |
2179 | if (!netif_carrier_ok(netdev)) { | 2178 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); |
2179 | |||
2180 | if (!cleaned) | ||
2181 | work_done = budget; | ||
2182 | /* If no Tx and not enough Rx work done, | ||
2183 | * exit the polling mode | ||
2184 | */ | ||
2185 | if (work_done < budget) | ||
2180 | poll_end_flag = true; | 2186 | poll_end_flag = true; |
2181 | } else { | 2187 | |
2182 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); | 2188 | if (poll_end_flag) { |
2189 | napi_complete(napi); | ||
2190 | if (adapter->rx_stop_flag) { | ||
2191 | adapter->rx_stop_flag = false; | ||
2192 | pch_gbe_start_receive(&adapter->hw); | ||
2193 | } | ||
2194 | pch_gbe_irq_enable(adapter); | ||
2195 | } else | ||
2183 | if (adapter->rx_stop_flag) { | 2196 | if (adapter->rx_stop_flag) { |
2184 | adapter->rx_stop_flag = false; | 2197 | adapter->rx_stop_flag = false; |
2185 | pch_gbe_start_receive(&adapter->hw); | 2198 | pch_gbe_start_receive(&adapter->hw); |
2186 | int_en = ioread32(&adapter->hw.reg->INT_EN); | 2199 | int_en = ioread32(&adapter->hw.reg->INT_EN); |
2187 | iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), | 2200 | iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), |
2188 | &adapter->hw.reg->INT_EN); | 2201 | &adapter->hw.reg->INT_EN); |
2189 | } | 2202 | } |
2190 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); | ||
2191 | |||
2192 | if (cleaned) | ||
2193 | work_done = budget; | ||
2194 | /* If no Tx and not enough Rx work done, | ||
2195 | * exit the polling mode | ||
2196 | */ | ||
2197 | if ((work_done < budget) || !netif_running(netdev)) | ||
2198 | poll_end_flag = true; | ||
2199 | } | ||
2200 | |||
2201 | if (poll_end_flag) { | ||
2202 | napi_complete(napi); | ||
2203 | pch_gbe_irq_enable(adapter); | ||
2204 | } | ||
2205 | 2203 | ||
2206 | pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", | 2204 | pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", |
2207 | poll_end_flag, work_done, budget); | 2205 | poll_end_flag, work_done, budget); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b100c90e8507..24cf942e1316 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) | |||
239 | dest = macvlan_hash_lookup(port, eth->h_dest); | 239 | dest = macvlan_hash_lookup(port, eth->h_dest); |
240 | if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { | 240 | if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { |
241 | /* send to lowerdev first for its network taps */ | 241 | /* send to lowerdev first for its network taps */ |
242 | vlan->forward(vlan->lowerdev, skb); | 242 | dev_forward_skb(vlan->lowerdev, skb); |
243 | 243 | ||
244 | return NET_XMIT_SUCCESS; | 244 | return NET_XMIT_SUCCESS; |
245 | } | 245 | } |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index be381c24c4b4..c588a162050f 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -686,7 +686,7 @@ static void decode_rxts(struct dp83640_private *dp83640, | |||
686 | prune_rx_ts(dp83640); | 686 | prune_rx_ts(dp83640); |
687 | 687 | ||
688 | if (list_empty(&dp83640->rxpool)) { | 688 | if (list_empty(&dp83640->rxpool)) { |
689 | pr_warning("dp83640: rx timestamp pool is empty\n"); | 689 | pr_debug("dp83640: rx timestamp pool is empty\n"); |
690 | goto out; | 690 | goto out; |
691 | } | 691 | } |
692 | rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); | 692 | rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); |
@@ -709,7 +709,7 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
709 | skb = skb_dequeue(&dp83640->tx_queue); | 709 | skb = skb_dequeue(&dp83640->tx_queue); |
710 | 710 | ||
711 | if (!skb) { | 711 | if (!skb) { |
712 | pr_warning("dp83640: have timestamp but tx_queue empty\n"); | 712 | pr_debug("dp83640: have timestamp but tx_queue empty\n"); |
713 | return; | 713 | return; |
714 | } | 714 | } |
715 | ns = phy2txts(phy_txts); | 715 | ns = phy2txts(phy_txts); |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 0ca86f9ec4ed..182562952c79 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
327 | xenvif_get(vif); | 327 | xenvif_get(vif); |
328 | 328 | ||
329 | rtnl_lock(); | 329 | rtnl_lock(); |
330 | if (netif_running(vif->dev)) | ||
331 | xenvif_up(vif); | ||
332 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | 330 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
333 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 331 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
334 | netdev_update_features(vif->dev); | 332 | netdev_update_features(vif->dev); |
335 | netif_carrier_on(vif->dev); | 333 | netif_carrier_on(vif->dev); |
334 | if (netif_running(vif->dev)) | ||
335 | xenvif_up(vif); | ||
336 | rtnl_unlock(); | 336 | rtnl_unlock(); |
337 | 337 | ||
338 | return 0; | 338 | return 0; |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4e84fd4a4312..e9651f0a8817 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | |||
77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | 77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | 78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; |
79 | 79 | ||
80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; | 80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * The default CLS is used if arch didn't set CLS explicitly and not | 83 | * The default CLS is used if arch didn't set CLS explicitly and not |
@@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str) | |||
3568 | pci_hotplug_io_size = memparse(str + 9, &str); | 3568 | pci_hotplug_io_size = memparse(str + 9, &str); |
3569 | } else if (!strncmp(str, "hpmemsize=", 10)) { | 3569 | } else if (!strncmp(str, "hpmemsize=", 10)) { |
3570 | pci_hotplug_mem_size = memparse(str + 10, &str); | 3570 | pci_hotplug_mem_size = memparse(str + 10, &str); |
3571 | } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { | ||
3572 | pcie_bus_config = PCIE_BUS_TUNE_OFF; | ||
3571 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { | 3573 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { |
3572 | pcie_bus_config = PCIE_BUS_SAFE; | 3574 | pcie_bus_config = PCIE_BUS_SAFE; |
3573 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { | 3575 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { |
3574 | pcie_bus_config = PCIE_BUS_PERFORMANCE; | 3576 | pcie_bus_config = PCIE_BUS_PERFORMANCE; |
3577 | } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { | ||
3578 | pcie_bus_config = PCIE_BUS_PEER2PEER; | ||
3575 | } else { | 3579 | } else { |
3576 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 3580 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
3577 | str); | 3581 | str); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f3f94a5c068f..6ab6bd3df4b2 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -1458,12 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) | |||
1458 | */ | 1458 | */ |
1459 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) | 1459 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) |
1460 | { | 1460 | { |
1461 | u8 smpss = mpss; | 1461 | u8 smpss; |
1462 | 1462 | ||
1463 | if (!pci_is_pcie(bus->self)) | 1463 | if (!pci_is_pcie(bus->self)) |
1464 | return; | 1464 | return; |
1465 | 1465 | ||
1466 | if (pcie_bus_config == PCIE_BUS_TUNE_OFF) | ||
1467 | return; | ||
1468 | |||
1469 | /* FIXME - Peer to peer DMA is possible, though the endpoint would need | ||
1470 | * to be aware to the MPS of the destination. To work around this, | ||
1471 | * simply force the MPS of the entire system to the smallest possible. | ||
1472 | */ | ||
1473 | if (pcie_bus_config == PCIE_BUS_PEER2PEER) | ||
1474 | smpss = 0; | ||
1475 | |||
1466 | if (pcie_bus_config == PCIE_BUS_SAFE) { | 1476 | if (pcie_bus_config == PCIE_BUS_SAFE) { |
1477 | smpss = mpss; | ||
1478 | |||
1467 | pcie_find_smpss(bus->self, &smpss); | 1479 | pcie_find_smpss(bus->self, &smpss); |
1468 | pci_walk_bus(bus, pcie_find_smpss, &smpss); | 1480 | pci_walk_bus(bus, pcie_find_smpss, &smpss); |
1469 | } | 1481 | } |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index cbde448f9947..eb3140ee821e 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv; | |||
654 | static int console_subchannel_in_use; | 654 | static int console_subchannel_in_use; |
655 | 655 | ||
656 | /* | 656 | /* |
657 | * Use tpi to get a pending interrupt, call the interrupt handler and | 657 | * Use cio_tpi to get a pending interrupt and call the interrupt handler. |
658 | * return a pointer to the subchannel structure. | 658 | * Return non-zero if an interrupt was processed, zero otherwise. |
659 | */ | 659 | */ |
660 | static int cio_tpi(void) | 660 | static int cio_tpi(void) |
661 | { | 661 | { |
@@ -667,6 +667,10 @@ static int cio_tpi(void) | |||
667 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; | 667 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; |
668 | if (tpi(NULL) != 1) | 668 | if (tpi(NULL) != 1) |
669 | return 0; | 669 | return 0; |
670 | if (tpi_info->adapter_IO) { | ||
671 | do_adapter_IO(tpi_info->isc); | ||
672 | return 1; | ||
673 | } | ||
670 | irb = (struct irb *)&S390_lowcore.irb; | 674 | irb = (struct irb *)&S390_lowcore.irb; |
671 | /* Store interrupt response block to lowcore. */ | 675 | /* Store interrupt response block to lowcore. */ |
672 | if (tsch(tpi_info->schid, irb) != 0) | 676 | if (tsch(tpi_info->schid, irb) != 0) |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index b7bd5b0cc7aa..3868ab2397c6 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ | |||
1800 | switch (retval) { | 1800 | switch (retval) { |
1801 | case SCSI_MLQUEUE_HOST_BUSY: | 1801 | case SCSI_MLQUEUE_HOST_BUSY: |
1802 | twa_free_request_id(tw_dev, request_id); | 1802 | twa_free_request_id(tw_dev, request_id); |
1803 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1803 | break; | 1804 | break; |
1804 | case 1: | 1805 | case 1: |
1805 | tw_dev->state[request_id] = TW_S_COMPLETED; | 1806 | tw_dev->state[request_id] = TW_S_COMPLETED; |
1806 | twa_free_request_id(tw_dev, request_id); | 1807 | twa_free_request_id(tw_dev, request_id); |
1808 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1807 | SCpnt->result = (DID_ERROR << 16); | 1809 | SCpnt->result = (DID_ERROR << 16); |
1808 | done(SCpnt); | 1810 | done(SCpnt); |
1809 | retval = 0; | 1811 | retval = 0; |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8d9dae89f065..3878b7395081 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -837,6 +837,7 @@ config SCSI_ISCI | |||
837 | # (temporary): known alpha quality driver | 837 | # (temporary): known alpha quality driver |
838 | depends on EXPERIMENTAL | 838 | depends on EXPERIMENTAL |
839 | select SCSI_SAS_LIBSAS | 839 | select SCSI_SAS_LIBSAS |
840 | select SCSI_SAS_HOST_SMP | ||
840 | ---help--- | 841 | ---help--- |
841 | This driver supports the 6Gb/s SAS capabilities of the storage | 842 | This driver supports the 6Gb/s SAS capabilities of the storage |
842 | control unit found in the Intel(R) C600 series chipset. | 843 | control unit found in the Intel(R) C600 series chipset. |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 3c08f5352b2d..6153a66a8a31 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o | |||
88 | obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o | 88 | obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o |
89 | obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o | 89 | obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o |
90 | obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ | 90 | obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ |
91 | obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ | 91 | obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ |
92 | obj-$(CONFIG_SCSI_LPFC) += lpfc/ | 92 | obj-$(CONFIG_SCSI_LPFC) += lpfc/ |
93 | obj-$(CONFIG_SCSI_BFA_FC) += bfa/ | 93 | obj-$(CONFIG_SCSI_BFA_FC) += bfa/ |
94 | obj-$(CONFIG_SCSI_PAS16) += pas16.o | 94 | obj-$(CONFIG_SCSI_PAS16) += pas16.o |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e7d0d47b9185..e5f2d7d9002e 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) | |||
1283 | kfree(aac->queues); | 1283 | kfree(aac->queues); |
1284 | aac->queues = NULL; | 1284 | aac->queues = NULL; |
1285 | free_irq(aac->pdev->irq, aac); | 1285 | free_irq(aac->pdev->irq, aac); |
1286 | if (aac->msi) | ||
1287 | pci_disable_msi(aac->pdev); | ||
1286 | kfree(aac->fsa_dev); | 1288 | kfree(aac->fsa_dev); |
1287 | aac->fsa_dev = NULL; | 1289 | aac->fsa_dev = NULL; |
1288 | quirks = aac_get_driver_ident(index)->quirks; | 1290 | quirks = aac_get_driver_ident(index)->quirks; |
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index bd22041e2789..f58644850333 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | |||
@@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk) | |||
913 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | 913 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; |
914 | 914 | ||
915 | if (csk->l2t) { | 915 | if (csk->l2t) { |
916 | l2t_release(L2DATA(t3dev), csk->l2t); | 916 | l2t_release(t3dev, csk->l2t); |
917 | csk->l2t = NULL; | 917 | csk->l2t = NULL; |
918 | cxgbi_sock_put(csk); | 918 | cxgbi_sock_put(csk); |
919 | } | 919 | } |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f84084bba2f0..16ad97df5ba6 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, | |||
1721 | list_for_each_entry(ch, &ex->children, siblings) { | 1721 | list_for_each_entry(ch, &ex->children, siblings) { |
1722 | if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { | 1722 | if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { |
1723 | res = sas_find_bcast_dev(ch, src_dev); | 1723 | res = sas_find_bcast_dev(ch, src_dev); |
1724 | if (src_dev) | 1724 | if (*src_dev) |
1725 | return res; | 1725 | return res; |
1726 | } | 1726 | } |
1727 | } | 1727 | } |
@@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, | |||
1769 | sas_disable_routing(parent, phy->attached_sas_addr); | 1769 | sas_disable_routing(parent, phy->attached_sas_addr); |
1770 | } | 1770 | } |
1771 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); | 1771 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); |
1772 | sas_port_delete_phy(phy->port, phy->phy); | 1772 | if (phy->port) { |
1773 | if (phy->port->num_phys == 0) | 1773 | sas_port_delete_phy(phy->port, phy->phy); |
1774 | sas_port_delete(phy->port); | 1774 | if (phy->port->num_phys == 0) |
1775 | phy->port = NULL; | 1775 | sas_port_delete(phy->port); |
1776 | phy->port = NULL; | ||
1777 | } | ||
1776 | } | 1778 | } |
1777 | 1779 | ||
1778 | static int sas_discover_bfs_by_root_level(struct domain_device *root, | 1780 | static int sas_discover_bfs_by_root_level(struct domain_device *root, |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 646fc5263d50..8a7591f035e6 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1507,8 +1507,8 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | |||
1507 | 1507 | ||
1508 | if (k != blocks_done) { | 1508 | if (k != blocks_done) { |
1509 | qla_printk(KERN_WARNING, sp->fcport->vha->hw, | 1509 | qla_printk(KERN_WARNING, sp->fcport->vha->hw, |
1510 | "unexpected tag values tag:lba=%x:%lx)\n", | 1510 | "unexpected tag values tag:lba=%x:%llx)\n", |
1511 | e_ref_tag, lba_s); | 1511 | e_ref_tag, (unsigned long long)lba_s); |
1512 | return 1; | 1512 | return 1; |
1513 | } | 1513 | } |
1514 | 1514 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 4cace3f20c04..1e69527f1e4e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1328 | qla2x00_sp_compl(ha, sp); | 1328 | qla2x00_sp_compl(ha, sp); |
1329 | } else { | 1329 | } else { |
1330 | ctx = sp->ctx; | 1330 | ctx = sp->ctx; |
1331 | if (ctx->type == SRB_LOGIN_CMD || | 1331 | if (ctx->type == SRB_ELS_CMD_RPT || |
1332 | ctx->type == SRB_LOGOUT_CMD) { | 1332 | ctx->type == SRB_ELS_CMD_HST || |
1333 | ctx->u.iocb_cmd->free(sp); | 1333 | ctx->type == SRB_CT_CMD) { |
1334 | } else { | ||
1335 | struct fc_bsg_job *bsg_job = | 1334 | struct fc_bsg_job *bsg_job = |
1336 | ctx->u.bsg_job; | 1335 | ctx->u.bsg_job; |
1337 | if (bsg_job->request->msgcode | 1336 | if (bsg_job->request->msgcode |
@@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1343 | kfree(sp->ctx); | 1342 | kfree(sp->ctx); |
1344 | mempool_free(sp, | 1343 | mempool_free(sp, |
1345 | ha->srb_mempool); | 1344 | ha->srb_mempool); |
1345 | } else { | ||
1346 | ctx->u.iocb_cmd->free(sp); | ||
1346 | } | 1347 | } |
1347 | } | 1348 | } |
1348 | } | 1349 | } |
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index d2407558773f..24cacff57786 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c | |||
@@ -825,6 +825,9 @@ static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) | |||
825 | { | 825 | { |
826 | struct device *dev = mspi->dev; | 826 | struct device *dev = mspi->dev; |
827 | 827 | ||
828 | if (!(mspi->flags & SPI_CPM_MODE)) | ||
829 | return; | ||
830 | |||
828 | dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); | 831 | dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); |
829 | dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); | 832 | dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); |
830 | cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); | 833 | cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 8ac6542aedcd..fa594d604aca 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
@@ -786,9 +786,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) | |||
786 | int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); | 786 | int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); |
787 | if (cs_gpio < 0) | 787 | if (cs_gpio < 0) |
788 | cs_gpio = mxc_platform_info->chipselect[i]; | 788 | cs_gpio = mxc_platform_info->chipselect[i]; |
789 | |||
790 | spi_imx->chipselect[i] = cs_gpio; | ||
789 | if (cs_gpio < 0) | 791 | if (cs_gpio < 0) |
790 | continue; | 792 | continue; |
791 | spi_imx->chipselect[i] = cs_gpio; | 793 | |
792 | ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); | 794 | ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); |
793 | if (ret) { | 795 | if (ret) { |
794 | while (i > 0) { | 796 | while (i > 0) { |
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 1d23f3831866..6a80749391db 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c | |||
@@ -50,6 +50,8 @@ | |||
50 | #define PCH_RX_THOLD 7 | 50 | #define PCH_RX_THOLD 7 |
51 | #define PCH_RX_THOLD_MAX 15 | 51 | #define PCH_RX_THOLD_MAX 15 |
52 | 52 | ||
53 | #define PCH_TX_THOLD 2 | ||
54 | |||
53 | #define PCH_MAX_BAUDRATE 5000000 | 55 | #define PCH_MAX_BAUDRATE 5000000 |
54 | #define PCH_MAX_FIFO_DEPTH 16 | 56 | #define PCH_MAX_FIFO_DEPTH 16 |
55 | 57 | ||
@@ -58,6 +60,7 @@ | |||
58 | #define PCH_SLEEP_TIME 10 | 60 | #define PCH_SLEEP_TIME 10 |
59 | 61 | ||
60 | #define SSN_LOW 0x02U | 62 | #define SSN_LOW 0x02U |
63 | #define SSN_HIGH 0x03U | ||
61 | #define SSN_NO_CONTROL 0x00U | 64 | #define SSN_NO_CONTROL 0x00U |
62 | #define PCH_MAX_CS 0xFF | 65 | #define PCH_MAX_CS 0xFF |
63 | #define PCI_DEVICE_ID_GE_SPI 0x8816 | 66 | #define PCI_DEVICE_ID_GE_SPI 0x8816 |
@@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, | |||
316 | 319 | ||
317 | /* if transfer complete interrupt */ | 320 | /* if transfer complete interrupt */ |
318 | if (reg_spsr_val & SPSR_FI_BIT) { | 321 | if (reg_spsr_val & SPSR_FI_BIT) { |
319 | if (tx_index < bpw_len) | 322 | if ((tx_index == bpw_len) && (rx_index == tx_index)) { |
323 | /* disable interrupts */ | ||
324 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); | ||
325 | |||
326 | /* transfer is completed; | ||
327 | inform pch_spi_process_messages */ | ||
328 | data->transfer_complete = true; | ||
329 | data->transfer_active = false; | ||
330 | wake_up(&data->wait); | ||
331 | } else { | ||
320 | dev_err(&data->master->dev, | 332 | dev_err(&data->master->dev, |
321 | "%s : Transfer is not completed", __func__); | 333 | "%s : Transfer is not completed", __func__); |
322 | /* disable interrupts */ | 334 | } |
323 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); | ||
324 | |||
325 | /* transfer is completed;inform pch_spi_process_messages */ | ||
326 | data->transfer_complete = true; | ||
327 | data->transfer_active = false; | ||
328 | wake_up(&data->wait); | ||
329 | } | 335 | } |
330 | } | 336 | } |
331 | 337 | ||
@@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id) | |||
348 | "%s returning due to suspend\n", __func__); | 354 | "%s returning due to suspend\n", __func__); |
349 | return IRQ_NONE; | 355 | return IRQ_NONE; |
350 | } | 356 | } |
351 | if (data->use_dma) | ||
352 | return IRQ_NONE; | ||
353 | 357 | ||
354 | io_remap_addr = data->io_remap_addr; | 358 | io_remap_addr = data->io_remap_addr; |
355 | spsr = io_remap_addr + PCH_SPSR; | 359 | spsr = io_remap_addr + PCH_SPSR; |
356 | 360 | ||
357 | reg_spsr_val = ioread32(spsr); | 361 | reg_spsr_val = ioread32(spsr); |
358 | 362 | ||
359 | if (reg_spsr_val & SPSR_ORF_BIT) | 363 | if (reg_spsr_val & SPSR_ORF_BIT) { |
360 | dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); | 364 | dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__); |
365 | if (data->current_msg->complete != 0) { | ||
366 | data->transfer_complete = true; | ||
367 | data->current_msg->status = -EIO; | ||
368 | data->current_msg->complete(data->current_msg->context); | ||
369 | data->bcurrent_msg_processing = false; | ||
370 | data->current_msg = NULL; | ||
371 | data->cur_trans = NULL; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | if (data->use_dma) | ||
376 | return IRQ_NONE; | ||
361 | 377 | ||
362 | /* Check if the interrupt is for SPI device */ | 378 | /* Check if the interrupt is for SPI device */ |
363 | if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { | 379 | if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { |
@@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data) | |||
756 | 772 | ||
757 | wait_event_interruptible(data->wait, data->transfer_complete); | 773 | wait_event_interruptible(data->wait, data->transfer_complete); |
758 | 774 | ||
759 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
760 | dev_dbg(&data->master->dev, | ||
761 | "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); | ||
762 | |||
763 | /* clear all interrupts */ | 775 | /* clear all interrupts */ |
764 | pch_spi_writereg(data->master, PCH_SPSR, | 776 | pch_spi_writereg(data->master, PCH_SPSR, |
765 | pch_spi_readreg(data->master, PCH_SPSR)); | 777 | pch_spi_readreg(data->master, PCH_SPSR)); |
@@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw) | |||
815 | } | 827 | } |
816 | } | 828 | } |
817 | 829 | ||
818 | static void pch_spi_start_transfer(struct pch_spi_data *data) | 830 | static int pch_spi_start_transfer(struct pch_spi_data *data) |
819 | { | 831 | { |
820 | struct pch_spi_dma_ctrl *dma; | 832 | struct pch_spi_dma_ctrl *dma; |
821 | unsigned long flags; | 833 | unsigned long flags; |
834 | int rtn; | ||
822 | 835 | ||
823 | dma = &data->dma; | 836 | dma = &data->dma; |
824 | 837 | ||
@@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) | |||
833 | initiating the transfer. */ | 846 | initiating the transfer. */ |
834 | dev_dbg(&data->master->dev, | 847 | dev_dbg(&data->master->dev, |
835 | "%s:waiting for transfer to get over\n", __func__); | 848 | "%s:waiting for transfer to get over\n", __func__); |
836 | wait_event_interruptible(data->wait, data->transfer_complete); | 849 | rtn = wait_event_interruptible_timeout(data->wait, |
850 | data->transfer_complete, | ||
851 | msecs_to_jiffies(2 * HZ)); | ||
837 | 852 | ||
838 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, | 853 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, |
839 | DMA_FROM_DEVICE); | 854 | DMA_FROM_DEVICE); |
855 | |||
856 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, | ||
857 | DMA_FROM_DEVICE); | ||
858 | memset(data->dma.tx_buf_virt, 0, PAGE_SIZE); | ||
859 | |||
840 | async_tx_ack(dma->desc_rx); | 860 | async_tx_ack(dma->desc_rx); |
841 | async_tx_ack(dma->desc_tx); | 861 | async_tx_ack(dma->desc_tx); |
842 | kfree(dma->sg_tx_p); | 862 | kfree(dma->sg_tx_p); |
843 | kfree(dma->sg_rx_p); | 863 | kfree(dma->sg_rx_p); |
844 | 864 | ||
845 | spin_lock_irqsave(&data->lock, flags); | 865 | spin_lock_irqsave(&data->lock, flags); |
846 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
847 | dev_dbg(&data->master->dev, | ||
848 | "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); | ||
849 | 866 | ||
850 | /* clear fifo threshold, disable interrupts, disable SPI transfer */ | 867 | /* clear fifo threshold, disable interrupts, disable SPI transfer */ |
851 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, | 868 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, |
@@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) | |||
858 | pch_spi_clear_fifo(data->master); | 875 | pch_spi_clear_fifo(data->master); |
859 | 876 | ||
860 | spin_unlock_irqrestore(&data->lock, flags); | 877 | spin_unlock_irqrestore(&data->lock, flags); |
878 | |||
879 | return rtn; | ||
861 | } | 880 | } |
862 | 881 | ||
863 | static void pch_dma_rx_complete(void *arg) | 882 | static void pch_dma_rx_complete(void *arg) |
@@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1023 | /* set receive fifo threshold and transmit fifo threshold */ | 1042 | /* set receive fifo threshold and transmit fifo threshold */ |
1024 | pch_spi_setclr_reg(data->master, PCH_SPCR, | 1043 | pch_spi_setclr_reg(data->master, PCH_SPCR, |
1025 | ((size - 1) << SPCR_RFIC_FIELD) | | 1044 | ((size - 1) << SPCR_RFIC_FIELD) | |
1026 | ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << | 1045 | (PCH_TX_THOLD << SPCR_TFIC_FIELD), |
1027 | SPCR_TFIC_FIELD), | ||
1028 | MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); | 1046 | MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); |
1029 | 1047 | ||
1030 | spin_unlock_irqrestore(&data->lock, flags); | 1048 | spin_unlock_irqrestore(&data->lock, flags); |
@@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1035 | /* offset, length setting */ | 1053 | /* offset, length setting */ |
1036 | sg = dma->sg_rx_p; | 1054 | sg = dma->sg_rx_p; |
1037 | for (i = 0; i < num; i++, sg++) { | 1055 | for (i = 0; i < num; i++, sg++) { |
1038 | if (i == 0) { | 1056 | if (i == (num - 2)) { |
1039 | sg->offset = 0; | 1057 | sg->offset = size * i; |
1058 | sg->offset = sg->offset * (*bpw / 8); | ||
1040 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, | 1059 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, |
1041 | sg->offset); | 1060 | sg->offset); |
1042 | sg_dma_len(sg) = rem; | 1061 | sg_dma_len(sg) = rem; |
1062 | } else if (i == (num - 1)) { | ||
1063 | sg->offset = size * (i - 1) + rem; | ||
1064 | sg->offset = sg->offset * (*bpw / 8); | ||
1065 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, | ||
1066 | sg->offset); | ||
1067 | sg_dma_len(sg) = size; | ||
1043 | } else { | 1068 | } else { |
1044 | sg->offset = rem + size * (i - 1); | 1069 | sg->offset = size * i; |
1045 | sg->offset = sg->offset * (*bpw / 8); | 1070 | sg->offset = sg->offset * (*bpw / 8); |
1046 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, | 1071 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, |
1047 | sg->offset); | 1072 | sg->offset); |
@@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1065 | dma->desc_rx = desc_rx; | 1090 | dma->desc_rx = desc_rx; |
1066 | 1091 | ||
1067 | /* TX */ | 1092 | /* TX */ |
1093 | if (data->bpw_len > PCH_DMA_TRANS_SIZE) { | ||
1094 | num = data->bpw_len / PCH_DMA_TRANS_SIZE; | ||
1095 | size = PCH_DMA_TRANS_SIZE; | ||
1096 | rem = 16; | ||
1097 | } else { | ||
1098 | num = 1; | ||
1099 | size = data->bpw_len; | ||
1100 | rem = data->bpw_len; | ||
1101 | } | ||
1102 | |||
1068 | dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); | 1103 | dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); |
1069 | sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ | 1104 | sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ |
1070 | /* offset, length setting */ | 1105 | /* offset, length setting */ |
@@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1162 | if (data->use_dma) | 1197 | if (data->use_dma) |
1163 | pch_spi_request_dma(data, | 1198 | pch_spi_request_dma(data, |
1164 | data->current_msg->spi->bits_per_word); | 1199 | data->current_msg->spi->bits_per_word); |
1200 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
1165 | do { | 1201 | do { |
1166 | /* If we are already processing a message get the next | 1202 | /* If we are already processing a message get the next |
1167 | transfer structure from the message otherwise retrieve | 1203 | transfer structure from the message otherwise retrieve |
@@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1184 | 1220 | ||
1185 | if (data->use_dma) { | 1221 | if (data->use_dma) { |
1186 | pch_spi_handle_dma(data, &bpw); | 1222 | pch_spi_handle_dma(data, &bpw); |
1187 | pch_spi_start_transfer(data); | 1223 | if (!pch_spi_start_transfer(data)) |
1224 | goto out; | ||
1188 | pch_spi_copy_rx_data_for_dma(data, bpw); | 1225 | pch_spi_copy_rx_data_for_dma(data, bpw); |
1189 | } else { | 1226 | } else { |
1190 | pch_spi_set_tx(data, &bpw); | 1227 | pch_spi_set_tx(data, &bpw); |
@@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1222 | 1259 | ||
1223 | } while (data->cur_trans != NULL); | 1260 | } while (data->cur_trans != NULL); |
1224 | 1261 | ||
1262 | out: | ||
1263 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH); | ||
1225 | if (data->use_dma) | 1264 | if (data->use_dma) |
1226 | pch_spi_release_dma(data); | 1265 | pch_spi_release_dma(data); |
1227 | } | 1266 | } |
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c index e0c2807b0970..181fa8158a8b 100644 --- a/drivers/zorro/zorro.c +++ b/drivers/zorro/zorro.c | |||
@@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) | |||
148 | } | 148 | } |
149 | platform_set_drvdata(pdev, bus); | 149 | platform_set_drvdata(pdev, bus); |
150 | 150 | ||
151 | /* Register all devices */ | ||
152 | pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", | 151 | pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", |
153 | zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); | 152 | zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); |
154 | 153 | ||
154 | /* First identify all devices ... */ | ||
155 | for (i = 0; i < zorro_num_autocon; i++) { | 155 | for (i = 0; i < zorro_num_autocon; i++) { |
156 | z = &zorro_autocon[i]; | 156 | z = &zorro_autocon[i]; |
157 | z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); | 157 | z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); |
@@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) | |||
172 | dev_set_name(&z->dev, "%02x", i); | 172 | dev_set_name(&z->dev, "%02x", i); |
173 | z->dev.parent = &bus->dev; | 173 | z->dev.parent = &bus->dev; |
174 | z->dev.bus = &zorro_bus_type; | 174 | z->dev.bus = &zorro_bus_type; |
175 | } | ||
176 | |||
177 | /* ... then register them */ | ||
178 | for (i = 0; i < zorro_num_autocon; i++) { | ||
179 | z = &zorro_autocon[i]; | ||
175 | error = device_register(&z->dev); | 180 | error = device_register(&z->dev); |
176 | if (error) { | 181 | if (error) { |
177 | dev_err(&bus->dev, "Error registering device %s\n", | 182 | dev_err(&bus->dev, "Error registering device %s\n", |