aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_pad.c7
-rw-r--r--drivers/acpi/apei/apei-base.c17
-rw-r--r--drivers/acpi/apei/apei-internal.h9
-rw-r--r--drivers/acpi/apei/ghes.c6
-rw-r--r--drivers/acpi/processor_idle.c32
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/video.c2
-rw-r--r--drivers/base/power/main.c6
-rw-r--r--drivers/block/drbd/drbd_bitmap.c11
-rw-r--r--drivers/block/drbd/drbd_req.c66
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c166
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h5
-rw-r--r--drivers/block/umem.c40
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkfront.c58
-rw-r--r--drivers/clk/clk.c28
-rw-r--r--drivers/gpu/drm/drm_edid.c27
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/md/dm-thin.c7
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.c54
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c11
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c11
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c26
-rw-r--r--drivers/md/raid5.c67
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c75
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c29
-rw-r--r--drivers/oprofile/oprofile_perf.c2
36 files changed, 585 insertions, 277 deletions
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index a43fa1a57d57..1502c50273b5 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -36,6 +36,7 @@
36#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" 36#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
37#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 37#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
38static DEFINE_MUTEX(isolated_cpus_lock); 38static DEFINE_MUTEX(isolated_cpus_lock);
39static DEFINE_MUTEX(round_robin_lock);
39 40
40static unsigned long power_saving_mwait_eax; 41static unsigned long power_saving_mwait_eax;
41 42
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
107 if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) 108 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
108 return; 109 return;
109 110
110 mutex_lock(&isolated_cpus_lock); 111 mutex_lock(&round_robin_lock);
111 cpumask_clear(tmp); 112 cpumask_clear(tmp);
112 for_each_cpu(cpu, pad_busy_cpus) 113 for_each_cpu(cpu, pad_busy_cpus)
113 cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); 114 cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
116 if (cpumask_empty(tmp)) 117 if (cpumask_empty(tmp))
117 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); 118 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
118 if (cpumask_empty(tmp)) { 119 if (cpumask_empty(tmp)) {
119 mutex_unlock(&isolated_cpus_lock); 120 mutex_unlock(&round_robin_lock);
120 return; 121 return;
121 } 122 }
122 for_each_cpu(cpu, tmp) { 123 for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
131 tsk_in_cpu[tsk_index] = preferred_cpu; 132 tsk_in_cpu[tsk_index] = preferred_cpu;
132 cpumask_set_cpu(preferred_cpu, pad_busy_cpus); 133 cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
133 cpu_weight[preferred_cpu]++; 134 cpu_weight[preferred_cpu]++;
134 mutex_unlock(&isolated_cpus_lock); 135 mutex_unlock(&round_robin_lock);
135 136
136 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); 137 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
137} 138}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 5577762daee1..6686b1eaf13e 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
243 u8 ins = entry->instruction; 243 u8 ins = entry->instruction;
244 244
245 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 245 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
246 return acpi_os_map_generic_address(&entry->register_region); 246 return apei_map_generic_address(&entry->register_region);
247 247
248 return 0; 248 return 0;
249} 249}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
276 u8 ins = entry->instruction; 276 u8 ins = entry->instruction;
277 277
278 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 278 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
279 acpi_os_unmap_generic_address(&entry->register_region); 279 apei_unmap_generic_address(&entry->register_region);
280 280
281 return 0; 281 return 0;
282} 282}
@@ -606,6 +606,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
606 return 0; 606 return 0;
607} 607}
608 608
609int apei_map_generic_address(struct acpi_generic_address *reg)
610{
611 int rc;
612 u32 access_bit_width;
613 u64 address;
614
615 rc = apei_check_gar(reg, &address, &access_bit_width);
616 if (rc)
617 return rc;
618 return acpi_os_map_generic_address(reg);
619}
620EXPORT_SYMBOL_GPL(apei_map_generic_address);
621
609/* read GAR in interrupt (including NMI) or process context */ 622/* read GAR in interrupt (including NMI) or process context */
610int apei_read(u64 *val, struct acpi_generic_address *reg) 623int apei_read(u64 *val, struct acpi_generic_address *reg)
611{ 624{
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cca240a33038..f220d642136e 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -7,6 +7,8 @@
7#define APEI_INTERNAL_H 7#define APEI_INTERNAL_H
8 8
9#include <linux/cper.h> 9#include <linux/cper.h>
10#include <linux/acpi.h>
11#include <linux/acpi_io.h>
10 12
11struct apei_exec_context; 13struct apei_exec_context;
12 14
@@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
68/* IP has been set in instruction function */ 70/* IP has been set in instruction function */
69#define APEI_EXEC_SET_IP 1 71#define APEI_EXEC_SET_IP 1
70 72
73int apei_map_generic_address(struct acpi_generic_address *reg);
74
75static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
76{
77 acpi_os_unmap_generic_address(reg);
78}
79
71int apei_read(u64 *val, struct acpi_generic_address *reg); 80int apei_read(u64 *val, struct acpi_generic_address *reg);
72int apei_write(u64 val, struct acpi_generic_address *reg); 81int apei_write(u64 val, struct acpi_generic_address *reg);
73 82
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 9b3cac0abecc..1599566ed1fe 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
301 if (!ghes) 301 if (!ghes)
302 return ERR_PTR(-ENOMEM); 302 return ERR_PTR(-ENOMEM);
303 ghes->generic = generic; 303 ghes->generic = generic;
304 rc = acpi_os_map_generic_address(&generic->error_status_address); 304 rc = apei_map_generic_address(&generic->error_status_address);
305 if (rc) 305 if (rc)
306 goto err_free; 306 goto err_free;
307 error_block_length = generic->error_block_length; 307 error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
321 return ghes; 321 return ghes;
322 322
323err_unmap: 323err_unmap:
324 acpi_os_unmap_generic_address(&generic->error_status_address); 324 apei_unmap_generic_address(&generic->error_status_address);
325err_free: 325err_free:
326 kfree(ghes); 326 kfree(ghes);
327 return ERR_PTR(rc); 327 return ERR_PTR(rc);
@@ -330,7 +330,7 @@ err_free:
330static void ghes_fini(struct ghes *ghes) 330static void ghes_fini(struct ghes *ghes)
331{ 331{
332 kfree(ghes->estatus); 332 kfree(ghes->estatus);
333 acpi_os_unmap_generic_address(&ghes->generic->error_status_address); 333 apei_unmap_generic_address(&ghes->generic->error_status_address);
334} 334}
335 335
336enum { 336enum {
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f3decb30223f..47a8caa89dbe 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -224,6 +224,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
224/* 224/*
225 * Suspend / resume control 225 * Suspend / resume control
226 */ 226 */
227static int acpi_idle_suspend;
227static u32 saved_bm_rld; 228static u32 saved_bm_rld;
228 229
229static void acpi_idle_bm_rld_save(void) 230static void acpi_idle_bm_rld_save(void)
@@ -242,13 +243,21 @@ static void acpi_idle_bm_rld_restore(void)
242 243
243int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 244int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
244{ 245{
246 if (acpi_idle_suspend == 1)
247 return 0;
248
245 acpi_idle_bm_rld_save(); 249 acpi_idle_bm_rld_save();
250 acpi_idle_suspend = 1;
246 return 0; 251 return 0;
247} 252}
248 253
249int acpi_processor_resume(struct acpi_device * device) 254int acpi_processor_resume(struct acpi_device * device)
250{ 255{
256 if (acpi_idle_suspend == 0)
257 return 0;
258
251 acpi_idle_bm_rld_restore(); 259 acpi_idle_bm_rld_restore();
260 acpi_idle_suspend = 0;
252 return 0; 261 return 0;
253} 262}
254 263
@@ -754,6 +763,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
754 763
755 local_irq_disable(); 764 local_irq_disable();
756 765
766 if (acpi_idle_suspend) {
767 local_irq_enable();
768 cpu_relax();
769 return -EBUSY;
770 }
771
757 lapic_timer_state_broadcast(pr, cx, 1); 772 lapic_timer_state_broadcast(pr, cx, 1);
758 kt1 = ktime_get_real(); 773 kt1 = ktime_get_real();
759 acpi_idle_do_entry(cx); 774 acpi_idle_do_entry(cx);
@@ -823,6 +838,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
823 838
824 local_irq_disable(); 839 local_irq_disable();
825 840
841 if (acpi_idle_suspend) {
842 local_irq_enable();
843 cpu_relax();
844 return -EBUSY;
845 }
846
826 if (cx->entry_method != ACPI_CSTATE_FFH) { 847 if (cx->entry_method != ACPI_CSTATE_FFH) {
827 current_thread_info()->status &= ~TS_POLLING; 848 current_thread_info()->status &= ~TS_POLLING;
828 /* 849 /*
@@ -907,14 +928,21 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
907 drv, drv->safe_state_index); 928 drv, drv->safe_state_index);
908 } else { 929 } else {
909 local_irq_disable(); 930 local_irq_disable();
910 acpi_safe_halt(); 931 if (!acpi_idle_suspend)
932 acpi_safe_halt();
911 local_irq_enable(); 933 local_irq_enable();
912 return -EINVAL; 934 return -EBUSY;
913 } 935 }
914 } 936 }
915 937
916 local_irq_disable(); 938 local_irq_disable();
917 939
940 if (acpi_idle_suspend) {
941 local_irq_enable();
942 cpu_relax();
943 return -EBUSY;
944 }
945
918 if (cx->entry_method != ACPI_CSTATE_FFH) { 946 if (cx->entry_method != ACPI_CSTATE_FFH) {
919 current_thread_info()->status &= ~TS_POLLING; 947 current_thread_info()->status &= ~TS_POLLING;
920 /* 948 /*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 9f66181c814e..240a24400976 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
173{ 173{
174 int result = 0; 174 int result = 0;
175 175
176 if (!strncmp(val, "enable", strlen("enable") - 1)) { 176 if (!strncmp(val, "enable", strlen("enable"))) {
177 result = acpi_debug_trace(trace_method_name, trace_debug_level, 177 result = acpi_debug_trace(trace_method_name, trace_debug_level,
178 trace_debug_layer, 0); 178 trace_debug_layer, 0);
179 if (result) 179 if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
181 goto exit; 181 goto exit;
182 } 182 }
183 183
184 if (!strncmp(val, "disable", strlen("disable") - 1)) { 184 if (!strncmp(val, "disable", strlen("disable"))) {
185 int name = 0; 185 int name = 0;
186 result = acpi_debug_trace((char *)&name, trace_debug_level, 186 result = acpi_debug_trace((char *)&name, trace_debug_level,
187 trace_debug_layer, 0); 187 trace_debug_layer, 0);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a576575617d7..1e0a9e17c31d 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
558 union acpi_object arg0 = { ACPI_TYPE_INTEGER }; 558 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
559 struct acpi_object_list args = { 1, &arg0 }; 559 struct acpi_object_list args = { 1, &arg0 };
560 560
561 if (!video->cap._DOS)
562 return 0;
561 563
562 if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1) 564 if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
563 return -EINVAL; 565 return -EINVAL;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e0fb5b0435a3..9cb845e49334 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1031,7 +1031,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1031 dpm_wait_for_children(dev, async); 1031 dpm_wait_for_children(dev, async);
1032 1032
1033 if (async_error) 1033 if (async_error)
1034 return 0; 1034 goto Complete;
1035 1035
1036 pm_runtime_get_noresume(dev); 1036 pm_runtime_get_noresume(dev);
1037 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1037 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1040,7 +1040,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1040 if (pm_wakeup_pending()) { 1040 if (pm_wakeup_pending()) {
1041 pm_runtime_put_sync(dev); 1041 pm_runtime_put_sync(dev);
1042 async_error = -EBUSY; 1042 async_error = -EBUSY;
1043 return 0; 1043 goto Complete;
1044 } 1044 }
1045 1045
1046 device_lock(dev); 1046 device_lock(dev);
@@ -1097,6 +1097,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1097 } 1097 }
1098 1098
1099 device_unlock(dev); 1099 device_unlock(dev);
1100
1101 Complete:
1100 complete_all(&dev->power.completion); 1102 complete_all(&dev->power.completion);
1101 1103
1102 if (error) { 1104 if (error) {
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b5c5ff53cb57..fcb956bb4b4c 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
1475 first_word = 0; 1475 first_word = 0;
1476 spin_lock_irq(&b->bm_lock); 1476 spin_lock_irq(&b->bm_lock);
1477 } 1477 }
1478
1479 /* last page (respectively only page, for first page == last page) */ 1478 /* last page (respectively only page, for first page == last page) */
1480 last_word = MLPP(el >> LN2_BPL); 1479 last_word = MLPP(el >> LN2_BPL);
1481 bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word); 1480
1481 /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
1482 * ==> e = 32767, el = 32768, last_page = 2,
1483 * and now last_word = 0.
1484 * We do not want to touch last_page in this case,
1485 * as we did not allocate it, it is not present in bitmap->bm_pages.
1486 */
1487 if (last_word)
1488 bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
1482 1489
1483 /* possibly trailing bits. 1490 /* possibly trailing bits.
1484 * example: (e & 63) == 63, el will be e+1. 1491 * example: (e & 63) == 63, el will be e+1.
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9c5c84946b05..8e93a6ac9bb6 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
472 req->rq_state |= RQ_LOCAL_COMPLETED; 472 req->rq_state |= RQ_LOCAL_COMPLETED;
473 req->rq_state &= ~RQ_LOCAL_PENDING; 473 req->rq_state &= ~RQ_LOCAL_PENDING;
474 474
475 D_ASSERT(!(req->rq_state & RQ_NET_MASK)); 475 if (req->rq_state & RQ_LOCAL_ABORTED) {
476 _req_may_be_done(req, m);
477 break;
478 }
476 479
477 __drbd_chk_io_error(mdev, false); 480 __drbd_chk_io_error(mdev, false);
478 481
479 goto_queue_for_net_read: 482 goto_queue_for_net_read:
480 483
484 D_ASSERT(!(req->rq_state & RQ_NET_MASK));
485
481 /* no point in retrying if there is no good remote data, 486 /* no point in retrying if there is no good remote data,
482 * or we have no connection. */ 487 * or we have no connection. */
483 if (mdev->state.pdsk != D_UP_TO_DATE) { 488 if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
765 return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); 770 return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
766} 771}
767 772
773static void maybe_pull_ahead(struct drbd_conf *mdev)
774{
775 int congested = 0;
776
777 /* If I don't even have good local storage, we can not reasonably try
778 * to pull ahead of the peer. We also need the local reference to make
779 * sure mdev->act_log is there.
780 * Note: caller has to make sure that net_conf is there.
781 */
782 if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
783 return;
784
785 if (mdev->net_conf->cong_fill &&
786 atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
787 dev_info(DEV, "Congestion-fill threshold reached\n");
788 congested = 1;
789 }
790
791 if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
792 dev_info(DEV, "Congestion-extents threshold reached\n");
793 congested = 1;
794 }
795
796 if (congested) {
797 queue_barrier(mdev); /* last barrier, after mirrored writes */
798
799 if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
800 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
801 else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
802 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
803 }
804 put_ldev(mdev);
805}
806
768static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) 807static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
769{ 808{
770 const int rw = bio_rw(bio); 809 const int rw = bio_rw(bio);
@@ -972,29 +1011,8 @@ allocate_barrier:
972 _req_mod(req, queue_for_send_oos); 1011 _req_mod(req, queue_for_send_oos);
973 1012
974 if (remote && 1013 if (remote &&
975 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { 1014 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
976 int congested = 0; 1015 maybe_pull_ahead(mdev);
977
978 if (mdev->net_conf->cong_fill &&
979 atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
980 dev_info(DEV, "Congestion-fill threshold reached\n");
981 congested = 1;
982 }
983
984 if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
985 dev_info(DEV, "Congestion-extents threshold reached\n");
986 congested = 1;
987 }
988
989 if (congested) {
990 queue_barrier(mdev); /* last barrier, after mirrored writes */
991
992 if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
993 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
994 else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
995 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
996 }
997 }
998 1016
999 spin_unlock_irq(&mdev->req_lock); 1017 spin_unlock_irq(&mdev->req_lock);
1000 kfree(b); /* if someone else has beaten us to it... */ 1018 kfree(b); /* if someone else has beaten us to it... */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index cce7df367b79..553f43a90953 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
671 671
672 if (drive == current_reqD) 672 if (drive == current_reqD)
673 drive = current_drive; 673 drive = current_drive;
674 __cancel_delayed_work(&fd_timeout);
674 675
675 if (drive < 0 || drive >= N_DRIVE) { 676 if (drive < 0 || drive >= N_DRIVE) {
676 delay = 20UL * HZ; 677 delay = 20UL * HZ;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 264bc77dcb91..a8fddeb3d638 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -37,6 +37,7 @@
37#include <linux/kthread.h> 37#include <linux/kthread.h>
38#include <../drivers/ata/ahci.h> 38#include <../drivers/ata/ahci.h>
39#include <linux/export.h> 39#include <linux/export.h>
40#include <linux/debugfs.h>
40#include "mtip32xx.h" 41#include "mtip32xx.h"
41 42
42#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) 43#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
85 * allocated in mtip_init(). 86 * allocated in mtip_init().
86 */ 87 */
87static int mtip_major; 88static int mtip_major;
89static struct dentry *dfs_parent;
88 90
89static DEFINE_SPINLOCK(rssd_index_lock); 91static DEFINE_SPINLOCK(rssd_index_lock);
90static DEFINE_IDA(rssd_index_ida); 92static DEFINE_IDA(rssd_index_ida);
@@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
2546} 2548}
2547 2549
2548/* 2550/*
2549 * Sysfs register/status dump. 2551 * Sysfs status dump.
2550 * 2552 *
2551 * @dev Pointer to the device structure, passed by the kernrel. 2553 * @dev Pointer to the device structure, passed by the kernrel.
2552 * @attr Pointer to the device_attribute structure passed by the kernel. 2554 * @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
2555 * return value 2557 * return value
2556 * The size, in bytes, of the data copied into buf. 2558 * The size, in bytes, of the data copied into buf.
2557 */ 2559 */
2558static ssize_t mtip_hw_show_registers(struct device *dev, 2560static ssize_t mtip_hw_show_status(struct device *dev,
2559 struct device_attribute *attr, 2561 struct device_attribute *attr,
2560 char *buf) 2562 char *buf)
2561{ 2563{
2562 u32 group_allocated;
2563 struct driver_data *dd = dev_to_disk(dev)->private_data; 2564 struct driver_data *dd = dev_to_disk(dev)->private_data;
2564 int size = 0; 2565 int size = 0;
2566
2567 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
2568 size += sprintf(buf, "%s", "thermal_shutdown\n");
2569 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
2570 size += sprintf(buf, "%s", "write_protect\n");
2571 else
2572 size += sprintf(buf, "%s", "online\n");
2573
2574 return size;
2575}
2576
2577static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
2578
2579static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
2580 size_t len, loff_t *offset)
2581{
2582 struct driver_data *dd = (struct driver_data *)f->private_data;
2583 char buf[MTIP_DFS_MAX_BUF_SIZE];
2584 u32 group_allocated;
2585 int size = *offset;
2565 int n; 2586 int n;
2566 2587
2567 size += sprintf(&buf[size], "Hardware\n--------\n"); 2588 if (!len || size)
2568 size += sprintf(&buf[size], "S ACTive : [ 0x"); 2589 return 0;
2590
2591 if (size < 0)
2592 return -EINVAL;
2593
2594 size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
2569 2595
2570 for (n = dd->slot_groups-1; n >= 0; n--) 2596 for (n = dd->slot_groups-1; n >= 0; n--)
2571 size += sprintf(&buf[size], "%08X ", 2597 size += sprintf(&buf[size], "%08X ",
2572 readl(dd->port->s_active[n])); 2598 readl(dd->port->s_active[n]));
2573 2599
2574 size += sprintf(&buf[size], "]\n"); 2600 size += sprintf(&buf[size], "]\n");
2575 size += sprintf(&buf[size], "Command Issue : [ 0x"); 2601 size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
2576 2602
2577 for (n = dd->slot_groups-1; n >= 0; n--) 2603 for (n = dd->slot_groups-1; n >= 0; n--)
2578 size += sprintf(&buf[size], "%08X ", 2604 size += sprintf(&buf[size], "%08X ",
2579 readl(dd->port->cmd_issue[n])); 2605 readl(dd->port->cmd_issue[n]));
2580 2606
2581 size += sprintf(&buf[size], "]\n"); 2607 size += sprintf(&buf[size], "]\n");
2582 size += sprintf(&buf[size], "Completed : [ 0x"); 2608 size += sprintf(&buf[size], "H/ Completed : [ 0x");
2583 2609
2584 for (n = dd->slot_groups-1; n >= 0; n--) 2610 for (n = dd->slot_groups-1; n >= 0; n--)
2585 size += sprintf(&buf[size], "%08X ", 2611 size += sprintf(&buf[size], "%08X ",
2586 readl(dd->port->completed[n])); 2612 readl(dd->port->completed[n]));
2587 2613
2588 size += sprintf(&buf[size], "]\n"); 2614 size += sprintf(&buf[size], "]\n");
2589 size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n", 2615 size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
2590 readl(dd->port->mmio + PORT_IRQ_STAT)); 2616 readl(dd->port->mmio + PORT_IRQ_STAT));
2591 size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n", 2617 size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
2592 readl(dd->mmio + HOST_IRQ_STAT)); 2618 readl(dd->mmio + HOST_IRQ_STAT));
2593 size += sprintf(&buf[size], "\n"); 2619 size += sprintf(&buf[size], "\n");
2594 2620
2595 size += sprintf(&buf[size], "Local\n-----\n"); 2621 size += sprintf(&buf[size], "L/ Allocated : [ 0x");
2596 size += sprintf(&buf[size], "Allocated : [ 0x");
2597 2622
2598 for (n = dd->slot_groups-1; n >= 0; n--) { 2623 for (n = dd->slot_groups-1; n >= 0; n--) {
2599 if (sizeof(long) > sizeof(u32)) 2624 if (sizeof(long) > sizeof(u32))
@@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
2605 } 2630 }
2606 size += sprintf(&buf[size], "]\n"); 2631 size += sprintf(&buf[size], "]\n");
2607 2632
2608 size += sprintf(&buf[size], "Commands in Q: [ 0x"); 2633 size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
2609 2634
2610 for (n = dd->slot_groups-1; n >= 0; n--) { 2635 for (n = dd->slot_groups-1; n >= 0; n--) {
2611 if (sizeof(long) > sizeof(u32)) 2636 if (sizeof(long) > sizeof(u32))
@@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
2617 } 2642 }
2618 size += sprintf(&buf[size], "]\n"); 2643 size += sprintf(&buf[size], "]\n");
2619 2644
2620 return size; 2645 *offset = size <= len ? size : len;
2646 size = copy_to_user(ubuf, buf, *offset);
2647 if (size)
2648 return -EFAULT;
2649
2650 return *offset;
2621} 2651}
2622 2652
2623static ssize_t mtip_hw_show_status(struct device *dev, 2653static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
2624 struct device_attribute *attr, 2654 size_t len, loff_t *offset)
2625 char *buf)
2626{ 2655{
2627 struct driver_data *dd = dev_to_disk(dev)->private_data; 2656 struct driver_data *dd = (struct driver_data *)f->private_data;
2628 int size = 0; 2657 char buf[MTIP_DFS_MAX_BUF_SIZE];
2658 int size = *offset;
2629 2659
2630 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) 2660 if (!len || size)
2631 size += sprintf(buf, "%s", "thermal_shutdown\n"); 2661 return 0;
2632 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
2633 size += sprintf(buf, "%s", "write_protect\n");
2634 else
2635 size += sprintf(buf, "%s", "online\n");
2636
2637 return size;
2638}
2639 2662
2640static ssize_t mtip_hw_show_flags(struct device *dev, 2663 if (size < 0)
2641 struct device_attribute *attr, 2664 return -EINVAL;
2642 char *buf)
2643{
2644 struct driver_data *dd = dev_to_disk(dev)->private_data;
2645 int size = 0;
2646 2665
2647 size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n", 2666 size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
2648 dd->port->flags); 2667 dd->port->flags);
2649 size += sprintf(&buf[size], "Flag in dd struct : [ %08lX ]\n", 2668 size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
2650 dd->dd_flag); 2669 dd->dd_flag);
2651 2670
2652 return size; 2671 *offset = size <= len ? size : len;
2672 size = copy_to_user(ubuf, buf, *offset);
2673 if (size)
2674 return -EFAULT;
2675
2676 return *offset;
2653} 2677}
2654 2678
2655static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL); 2679static const struct file_operations mtip_regs_fops = {
2656static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); 2680 .owner = THIS_MODULE,
2657static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL); 2681 .open = simple_open,
2682 .read = mtip_hw_read_registers,
2683 .llseek = no_llseek,
2684};
2685
2686static const struct file_operations mtip_flags_fops = {
2687 .owner = THIS_MODULE,
2688 .open = simple_open,
2689 .read = mtip_hw_read_flags,
2690 .llseek = no_llseek,
2691};
2658 2692
2659/* 2693/*
2660 * Create the sysfs related attributes. 2694 * Create the sysfs related attributes.
@@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
2671 if (!kobj || !dd) 2705 if (!kobj || !dd)
2672 return -EINVAL; 2706 return -EINVAL;
2673 2707
2674 if (sysfs_create_file(kobj, &dev_attr_registers.attr))
2675 dev_warn(&dd->pdev->dev,
2676 "Error creating 'registers' sysfs entry\n");
2677 if (sysfs_create_file(kobj, &dev_attr_status.attr)) 2708 if (sysfs_create_file(kobj, &dev_attr_status.attr))
2678 dev_warn(&dd->pdev->dev, 2709 dev_warn(&dd->pdev->dev,
2679 "Error creating 'status' sysfs entry\n"); 2710 "Error creating 'status' sysfs entry\n");
2680 if (sysfs_create_file(kobj, &dev_attr_flags.attr))
2681 dev_warn(&dd->pdev->dev,
2682 "Error creating 'flags' sysfs entry\n");
2683 return 0; 2711 return 0;
2684} 2712}
2685 2713
@@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
2698 if (!kobj || !dd) 2726 if (!kobj || !dd)
2699 return -EINVAL; 2727 return -EINVAL;
2700 2728
2701 sysfs_remove_file(kobj, &dev_attr_registers.attr);
2702 sysfs_remove_file(kobj, &dev_attr_status.attr); 2729 sysfs_remove_file(kobj, &dev_attr_status.attr);
2703 sysfs_remove_file(kobj, &dev_attr_flags.attr);
2704 2730
2705 return 0; 2731 return 0;
2706} 2732}
2707 2733
2734static int mtip_hw_debugfs_init(struct driver_data *dd)
2735{
2736 if (!dfs_parent)
2737 return -1;
2738
2739 dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
2740 if (IS_ERR_OR_NULL(dd->dfs_node)) {
2741 dev_warn(&dd->pdev->dev,
2742 "Error creating node %s under debugfs\n",
2743 dd->disk->disk_name);
2744 dd->dfs_node = NULL;
2745 return -1;
2746 }
2747
2748 debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
2749 &mtip_flags_fops);
2750 debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
2751 &mtip_regs_fops);
2752
2753 return 0;
2754}
2755
2756static void mtip_hw_debugfs_exit(struct driver_data *dd)
2757{
2758 debugfs_remove_recursive(dd->dfs_node);
2759}
2760
2761
2708/* 2762/*
2709 * Perform any init/resume time hardware setup 2763 * Perform any init/resume time hardware setup
2710 * 2764 *
@@ -3730,6 +3784,7 @@ skip_create_disk:
3730 mtip_hw_sysfs_init(dd, kobj); 3784 mtip_hw_sysfs_init(dd, kobj);
3731 kobject_put(kobj); 3785 kobject_put(kobj);
3732 } 3786 }
3787 mtip_hw_debugfs_init(dd);
3733 3788
3734 if (dd->mtip_svc_handler) { 3789 if (dd->mtip_svc_handler) {
3735 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); 3790 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3755,6 +3810,8 @@ start_service_thread:
3755 return rv; 3810 return rv;
3756 3811
3757kthread_run_error: 3812kthread_run_error:
3813 mtip_hw_debugfs_exit(dd);
3814
3758 /* Delete our gendisk. This also removes the device from /dev */ 3815 /* Delete our gendisk. This also removes the device from /dev */
3759 del_gendisk(dd->disk); 3816 del_gendisk(dd->disk);
3760 3817
@@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
3805 kobject_put(kobj); 3862 kobject_put(kobj);
3806 } 3863 }
3807 } 3864 }
3865 mtip_hw_debugfs_exit(dd);
3808 3866
3809 /* 3867 /*
3810 * Delete our gendisk structure. This also removes the device 3868 * Delete our gendisk structure. This also removes the device
@@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
4152 } 4210 }
4153 mtip_major = error; 4211 mtip_major = error;
4154 4212
4213 if (!dfs_parent) {
4214 dfs_parent = debugfs_create_dir("rssd", NULL);
4215 if (IS_ERR_OR_NULL(dfs_parent)) {
4216 printk(KERN_WARNING "Error creating debugfs parent\n");
4217 dfs_parent = NULL;
4218 }
4219 }
4220
4155 /* Register our PCI operations. */ 4221 /* Register our PCI operations. */
4156 error = pci_register_driver(&mtip_pci_driver); 4222 error = pci_register_driver(&mtip_pci_driver);
4157 if (error) 4223 if (error) {
4224 debugfs_remove(dfs_parent);
4158 unregister_blkdev(mtip_major, MTIP_DRV_NAME); 4225 unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4226 }
4159 4227
4160 return error; 4228 return error;
4161} 4229}
@@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
4172 */ 4240 */
4173static void __exit mtip_exit(void) 4241static void __exit mtip_exit(void)
4174{ 4242{
4243 debugfs_remove_recursive(dfs_parent);
4244
4175 /* Release the allocated major block device number. */ 4245 /* Release the allocated major block device number. */
4176 unregister_blkdev(mtip_major, MTIP_DRV_NAME); 4246 unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4177 4247
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index b2c88da26b2a..f51fc23d17bb 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -26,7 +26,6 @@
26#include <linux/ata.h> 26#include <linux/ata.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/genhd.h> 28#include <linux/genhd.h>
29#include <linux/version.h>
30 29
31/* Offset of Subsystem Device ID in pci confoguration space */ 30/* Offset of Subsystem Device ID in pci confoguration space */
32#define PCI_SUBSYSTEM_DEVICEID 0x2E 31#define PCI_SUBSYSTEM_DEVICEID 0x2E
@@ -111,6 +110,8 @@
111 #define dbg_printk(format, arg...) 110 #define dbg_printk(format, arg...)
112#endif 111#endif
113 112
113#define MTIP_DFS_MAX_BUF_SIZE 1024
114
114#define __force_bit2int (unsigned int __force) 115#define __force_bit2int (unsigned int __force)
115 116
116enum { 117enum {
@@ -447,6 +448,8 @@ struct driver_data {
447 unsigned long dd_flag; /* NOTE: use atomic bit operations on this */ 448 unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
448 449
449 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ 450 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
451
452 struct dentry *dfs_node;
450}; 453};
451 454
452#endif 455#endif
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index aa2712060bfb..9a72277a31df 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
513 } 513 }
514} 514}
515 515
516struct mm_plug_cb {
517 struct blk_plug_cb cb;
518 struct cardinfo *card;
519};
520
521static void mm_unplug(struct blk_plug_cb *cb)
522{
523 struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
524
525 spin_lock_irq(&mmcb->card->lock);
526 activate(mmcb->card);
527 spin_unlock_irq(&mmcb->card->lock);
528 kfree(mmcb);
529}
530
531static int mm_check_plugged(struct cardinfo *card)
532{
533 struct blk_plug *plug = current->plug;
534 struct mm_plug_cb *mmcb;
535
536 if (!plug)
537 return 0;
538
539 list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
540 if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
541 return 1;
542 }
543 /* Not currently on the callback list */
544 mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
545 if (!mmcb)
546 return 0;
547
548 mmcb->card = card;
549 mmcb->cb.callback = mm_unplug;
550 list_add(&mmcb->cb.list, &plug->cb_list);
551 return 1;
552}
553
516static void mm_make_request(struct request_queue *q, struct bio *bio) 554static void mm_make_request(struct request_queue *q, struct bio *bio)
517{ 555{
518 struct cardinfo *card = q->queuedata; 556 struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
523 *card->biotail = bio; 561 *card->biotail = bio;
524 bio->bi_next = NULL; 562 bio->bi_next = NULL;
525 card->biotail = &bio->bi_next; 563 card->biotail = &bio->bi_next;
564 if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
565 activate(card);
526 spin_unlock_irq(&card->lock); 566 spin_unlock_irq(&card->lock);
527 567
528 return; 568 return;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 773cf27dc23f..9ad3b5ec1dc1 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
257 break; 257 break;
258 case BLKIF_OP_DISCARD: 258 case BLKIF_OP_DISCARD:
259 dst->u.discard.flag = src->u.discard.flag; 259 dst->u.discard.flag = src->u.discard.flag;
260 dst->u.discard.id = src->u.discard.id;
260 dst->u.discard.sector_number = src->u.discard.sector_number; 261 dst->u.discard.sector_number = src->u.discard.sector_number;
261 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 262 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
262 break; 263 break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
287 break; 288 break;
288 case BLKIF_OP_DISCARD: 289 case BLKIF_OP_DISCARD:
289 dst->u.discard.flag = src->u.discard.flag; 290 dst->u.discard.flag = src->u.discard.flag;
291 dst->u.discard.id = src->u.discard.id;
290 dst->u.discard.sector_number = src->u.discard.sector_number; 292 dst->u.discard.sector_number = src->u.discard.sector_number;
291 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 293 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
292 break; 294 break;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 60eed4bdd2e4..e4fb3374dcd2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
141 return free; 141 return free;
142} 142}
143 143
144static void add_id_to_freelist(struct blkfront_info *info, 144static int add_id_to_freelist(struct blkfront_info *info,
145 unsigned long id) 145 unsigned long id)
146{ 146{
147 if (info->shadow[id].req.u.rw.id != id)
148 return -EINVAL;
149 if (info->shadow[id].request == NULL)
150 return -EINVAL;
147 info->shadow[id].req.u.rw.id = info->shadow_free; 151 info->shadow[id].req.u.rw.id = info->shadow_free;
148 info->shadow[id].request = NULL; 152 info->shadow[id].request = NULL;
149 info->shadow_free = id; 153 info->shadow_free = id;
154 return 0;
150} 155}
151 156
157static const char *op_name(int op)
158{
159 static const char *const names[] = {
160 [BLKIF_OP_READ] = "read",
161 [BLKIF_OP_WRITE] = "write",
162 [BLKIF_OP_WRITE_BARRIER] = "barrier",
163 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
164 [BLKIF_OP_DISCARD] = "discard" };
165
166 if (op < 0 || op >= ARRAY_SIZE(names))
167 return "unknown";
168
169 if (!names[op])
170 return "reserved";
171
172 return names[op];
173}
152static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) 174static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
153{ 175{
154 unsigned int end = minor + nr; 176 unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
746 768
747 bret = RING_GET_RESPONSE(&info->ring, i); 769 bret = RING_GET_RESPONSE(&info->ring, i);
748 id = bret->id; 770 id = bret->id;
771 /*
772 * The backend has messed up and given us an id that we would
773 * never have given to it (we stamp it up to BLK_RING_SIZE -
774 * look in get_id_from_freelist.
775 */
776 if (id >= BLK_RING_SIZE) {
777 WARN(1, "%s: response to %s has incorrect id (%ld)\n",
778 info->gd->disk_name, op_name(bret->operation), id);
779 /* We can't safely get the 'struct request' as
780 * the id is busted. */
781 continue;
782 }
749 req = info->shadow[id].request; 783 req = info->shadow[id].request;
750 784
751 if (bret->operation != BLKIF_OP_DISCARD) 785 if (bret->operation != BLKIF_OP_DISCARD)
752 blkif_completion(&info->shadow[id]); 786 blkif_completion(&info->shadow[id]);
753 787
754 add_id_to_freelist(info, id); 788 if (add_id_to_freelist(info, id)) {
789 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
790 info->gd->disk_name, op_name(bret->operation), id);
791 continue;
792 }
755 793
756 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 794 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
757 switch (bret->operation) { 795 switch (bret->operation) {
758 case BLKIF_OP_DISCARD: 796 case BLKIF_OP_DISCARD:
759 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 797 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
760 struct request_queue *rq = info->rq; 798 struct request_queue *rq = info->rq;
761 printk(KERN_WARNING "blkfront: %s: discard op failed\n", 799 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
762 info->gd->disk_name); 800 info->gd->disk_name, op_name(bret->operation));
763 error = -EOPNOTSUPP; 801 error = -EOPNOTSUPP;
764 info->feature_discard = 0; 802 info->feature_discard = 0;
765 info->feature_secdiscard = 0; 803 info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
771 case BLKIF_OP_FLUSH_DISKCACHE: 809 case BLKIF_OP_FLUSH_DISKCACHE:
772 case BLKIF_OP_WRITE_BARRIER: 810 case BLKIF_OP_WRITE_BARRIER:
773 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 811 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
774 printk(KERN_WARNING "blkfront: %s: write %s op failed\n", 812 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
775 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 813 info->gd->disk_name, op_name(bret->operation));
776 "barrier" : "flush disk cache",
777 info->gd->disk_name);
778 error = -EOPNOTSUPP; 814 error = -EOPNOTSUPP;
779 } 815 }
780 if (unlikely(bret->status == BLKIF_RSP_ERROR && 816 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
781 info->shadow[id].req.u.rw.nr_segments == 0)) { 817 info->shadow[id].req.u.rw.nr_segments == 0)) {
782 printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n", 818 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
783 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 819 info->gd->disk_name, op_name(bret->operation));
784 "barrier" : "flush disk cache",
785 info->gd->disk_name);
786 error = -EOPNOTSUPP; 820 error = -EOPNOTSUPP;
787 } 821 }
788 if (unlikely(error)) { 822 if (unlikely(error)) {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index dcbe05616090..9a1eb0cfa95f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1067,26 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
1067 1067
1068 old_parent = clk->parent; 1068 old_parent = clk->parent;
1069 1069
1070 /* find index of new parent clock using cached parent ptrs */ 1070 if (!clk->parents)
1071 if (clk->parents)
1072 for (i = 0; i < clk->num_parents; i++)
1073 if (clk->parents[i] == parent)
1074 break;
1075 else
1076 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents), 1071 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1077 GFP_KERNEL); 1072 GFP_KERNEL);
1078 1073
1079 /* 1074 /*
1080 * find index of new parent clock using string name comparison 1075 * find index of new parent clock using cached parent ptrs,
1081 * also try to cache the parent to avoid future calls to __clk_lookup 1076 * or if not yet cached, use string name comparison and cache
1077 * them now to avoid future calls to __clk_lookup.
1082 */ 1078 */
1083 if (i == clk->num_parents) 1079 for (i = 0; i < clk->num_parents; i++) {
1084 for (i = 0; i < clk->num_parents; i++) 1080 if (clk->parents && clk->parents[i] == parent)
1085 if (!strcmp(clk->parent_names[i], parent->name)) { 1081 break;
1086 if (clk->parents) 1082 else if (!strcmp(clk->parent_names[i], parent->name)) {
1087 clk->parents[i] = __clk_lookup(parent->name); 1083 if (clk->parents)
1088 break; 1084 clk->parents[i] = __clk_lookup(parent->name);
1089 } 1085 break;
1086 }
1087 }
1090 1088
1091 if (i == clk->num_parents) { 1089 if (i == clk->num_parents) {
1092 pr_debug("%s: clock %s is not a possible parent of clock %s\n", 1090 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5873e481e5d2..a8743c399e83 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
1039 return true; 1039 return true;
1040} 1040}
1041 1041
1042static bool valid_inferred_mode(const struct drm_connector *connector,
1043 const struct drm_display_mode *mode)
1044{
1045 struct drm_display_mode *m;
1046 bool ok = false;
1047
1048 list_for_each_entry(m, &connector->probed_modes, head) {
1049 if (mode->hdisplay == m->hdisplay &&
1050 mode->vdisplay == m->vdisplay &&
1051 drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
1052 return false; /* duplicated */
1053 if (mode->hdisplay <= m->hdisplay &&
1054 mode->vdisplay <= m->vdisplay)
1055 ok = true;
1056 }
1057 return ok;
1058}
1059
1042static int 1060static int
1043drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, 1061drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
1044 struct detailed_timing *timing) 1062 struct detailed_timing *timing)
@@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
1048 struct drm_device *dev = connector->dev; 1066 struct drm_device *dev = connector->dev;
1049 1067
1050 for (i = 0; i < drm_num_dmt_modes; i++) { 1068 for (i = 0; i < drm_num_dmt_modes; i++) {
1051 if (mode_in_range(drm_dmt_modes + i, edid, timing)) { 1069 if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
1070 valid_inferred_mode(connector, drm_dmt_modes + i)) {
1052 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); 1071 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
1053 if (newmode) { 1072 if (newmode) {
1054 drm_mode_probed_add(connector, newmode); 1073 drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
1088 return modes; 1107 return modes;
1089 1108
1090 fixup_mode_1366x768(newmode); 1109 fixup_mode_1366x768(newmode);
1091 if (!mode_in_range(newmode, edid, timing)) { 1110 if (!mode_in_range(newmode, edid, timing) ||
1111 !valid_inferred_mode(connector, newmode)) {
1092 drm_mode_destroy(dev, newmode); 1112 drm_mode_destroy(dev, newmode);
1093 continue; 1113 continue;
1094 } 1114 }
@@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
1116 return modes; 1136 return modes;
1117 1137
1118 fixup_mode_1366x768(newmode); 1138 fixup_mode_1366x768(newmode);
1119 if (!mode_in_range(newmode, edid, timing)) { 1139 if (!mode_in_range(newmode, edid, timing) ||
1140 !valid_inferred_mode(connector, newmode)) {
1120 drm_mode_destroy(dev, newmode); 1141 drm_mode_destroy(dev, newmode);
1121 continue; 1142 continue;
1122 } 1143 }
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f94792626b94..36822b924eb1 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1401 } 1401 }
1402} 1402}
1403 1403
1404static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1405{
1406 struct apertures_struct *ap;
1407 struct pci_dev *pdev = dev_priv->dev->pdev;
1408 bool primary;
1409
1410 ap = alloc_apertures(1);
1411 if (!ap)
1412 return;
1413
1414 ap->ranges[0].base = dev_priv->dev->agp->base;
1415 ap->ranges[0].size =
1416 dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1417 primary =
1418 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1419
1420 remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1421
1422 kfree(ap);
1423}
1424
1404/** 1425/**
1405 * i915_driver_load - setup chip and create an initial config 1426 * i915_driver_load - setup chip and create an initial config
1406 * @dev: DRM device 1427 * @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1446 goto free_priv; 1467 goto free_priv;
1447 } 1468 }
1448 1469
1470 dev_priv->mm.gtt = intel_gtt_get();
1471 if (!dev_priv->mm.gtt) {
1472 DRM_ERROR("Failed to initialize GTT\n");
1473 ret = -ENODEV;
1474 goto put_bridge;
1475 }
1476
1477 i915_kick_out_firmware_fb(dev_priv);
1478
1449 pci_set_master(dev->pdev); 1479 pci_set_master(dev->pdev);
1450 1480
1451 /* overlay on gen2 is broken and can't address above 1G */ 1481 /* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1471 goto put_bridge; 1501 goto put_bridge;
1472 } 1502 }
1473 1503
1474 dev_priv->mm.gtt = intel_gtt_get();
1475 if (!dev_priv->mm.gtt) {
1476 DRM_ERROR("Failed to initialize GTT\n");
1477 ret = -ENODEV;
1478 goto out_rmmap;
1479 }
1480
1481 aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1504 aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1482 1505
1483 dev_priv->mm.gtt_mapping = 1506 dev_priv->mm.gtt_mapping =
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 59d44937dd9f..84b648a7ddd8 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
289 rdev->vm_manager.enabled = false; 289 rdev->vm_manager.enabled = false;
290 290
291 /* mark first vm as always in use, it's the system one */ 291 /* mark first vm as always in use, it's the system one */
292 /* allocate enough for 2 full VM pts */
292 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 293 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
293 rdev->vm_manager.max_pfn * 8, 294 rdev->vm_manager.max_pfn * 8 * 2,
294 RADEON_GEM_DOMAIN_VRAM); 295 RADEON_GEM_DOMAIN_VRAM);
295 if (r) { 296 if (r) {
296 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", 297 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -633,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
633 mutex_init(&vm->mutex); 634 mutex_init(&vm->mutex);
634 INIT_LIST_HEAD(&vm->list); 635 INIT_LIST_HEAD(&vm->list);
635 INIT_LIST_HEAD(&vm->va); 636 INIT_LIST_HEAD(&vm->va);
636 vm->last_pfn = 0; 637 /* SI requires equal sized PTs for all VMs, so always set
638 * last_pfn to max_pfn. cayman allows variable sized
639 * pts so we can grow then as needed. Once we switch
640 * to two level pts we can unify this again.
641 */
642 if (rdev->family >= CHIP_TAHITI)
643 vm->last_pfn = rdev->vm_manager.max_pfn;
644 else
645 vm->last_pfn = 0;
637 /* map the ib pool buffer at 0 in virtual address space, set 646 /* map the ib pool buffer at 0 in virtual address space, set
638 * read only 647 * read only
639 */ 648 */
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f28bd4b7ef98..21ec9f5653ce 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
292int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 292int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
293 struct drm_file *filp) 293 struct drm_file *filp)
294{ 294{
295 struct radeon_device *rdev = dev->dev_private;
295 struct drm_radeon_gem_busy *args = data; 296 struct drm_radeon_gem_busy *args = data;
296 struct drm_gem_object *gobj; 297 struct drm_gem_object *gobj;
297 struct radeon_bo *robj; 298 struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
317 break; 318 break;
318 } 319 }
319 drm_gem_object_unreference_unlocked(gobj); 320 drm_gem_object_unreference_unlocked(gobj);
320 r = radeon_gem_handle_lockup(robj->rdev, r); 321 r = radeon_gem_handle_lockup(rdev, r);
321 return r; 322 return r;
322} 323}
323 324
324int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 325int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
325 struct drm_file *filp) 326 struct drm_file *filp)
326{ 327{
328 struct radeon_device *rdev = dev->dev_private;
327 struct drm_radeon_gem_wait_idle *args = data; 329 struct drm_radeon_gem_wait_idle *args = data;
328 struct drm_gem_object *gobj; 330 struct drm_gem_object *gobj;
329 struct radeon_bo *robj; 331 struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
336 robj = gem_to_radeon_bo(gobj); 338 robj = gem_to_radeon_bo(gobj);
337 r = radeon_bo_wait(robj, NULL, false); 339 r = radeon_bo_wait(robj, NULL, false);
338 /* callback hw specific functions if any */ 340 /* callback hw specific functions if any */
339 if (robj->rdev->asic->ioctl_wait_idle) 341 if (rdev->asic->ioctl_wait_idle)
340 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); 342 robj->rdev->asic->ioctl_wait_idle(rdev, robj);
341 drm_gem_object_unreference_unlocked(gobj); 343 drm_gem_object_unreference_unlocked(gobj);
342 r = radeon_gem_handle_lockup(robj->rdev, r); 344 r = radeon_gem_handle_lockup(rdev, r);
343 return r; 345 return r;
344} 346}
345 347
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index c7b61f16ecfd..0b0279291a73 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
2365 WREG32(0x15DC, 0); 2365 WREG32(0x15DC, 0);
2366 2366
2367 /* empty context1-15 */ 2367 /* empty context1-15 */
2368 /* FIXME start with 1G, once using 2 level pt switch to full 2368 /* FIXME start with 4G, once using 2 level pt switch to full
2369 * vm size space 2369 * vm size space
2370 */ 2370 */
2371 /* set vm size, must be a multiple of 4 */ 2371 /* set vm size, must be a multiple of 4 */
2372 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 2372 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
2373 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE); 2373 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
2374 for (i = 1; i < 16; i++) { 2374 for (i = 1; i < 16; i++) {
2375 if (i < 8) 2375 if (i < 8)
2376 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 2376 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 7f1feb2f467a..637c51c11b44 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -693,7 +693,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
693 * sensors. We check this bit only, all the early CPUs 693 * sensors. We check this bit only, all the early CPUs
694 * without thermal sensors will be filtered out. 694 * without thermal sensors will be filtered out.
695 */ 695 */
696 if (!cpu_has(c, X86_FEATURE_DTS)) 696 if (!cpu_has(c, X86_FEATURE_DTHERM))
697 return; 697 return;
698 698
699 if (!pdev) { 699 if (!pdev) {
@@ -794,7 +794,7 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
794}; 794};
795 795
796static const struct x86_cpu_id coretemp_ids[] = { 796static const struct x86_cpu_id coretemp_ids[] = {
797 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS }, 797 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
798 {} 798 {}
799}; 799};
800MODULE_DEVICE_TABLE(x86cpu, coretemp_ids); 800MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 37fdaf81bd1f..ce59824fb414 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2292,6 +2292,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
2292 if (r) 2292 if (r)
2293 return r; 2293 return r;
2294 2294
2295 r = dm_pool_commit_metadata(pool->pmd);
2296 if (r) {
2297 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
2298 __func__, r);
2299 return r;
2300 }
2301
2295 r = dm_pool_reserve_metadata_snap(pool->pmd); 2302 r = dm_pool_reserve_metadata_snap(pool->pmd);
2296 if (r) 2303 if (r)
2297 DMWARN("reserve_metadata_snap message failed."); 2304 DMWARN("reserve_metadata_snap message failed.");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1c2f9048e1ae..a4c219e3c859 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5784,8 +5784,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
5784 super_types[mddev->major_version]. 5784 super_types[mddev->major_version].
5785 validate_super(mddev, rdev); 5785 validate_super(mddev, rdev);
5786 if ((info->state & (1<<MD_DISK_SYNC)) && 5786 if ((info->state & (1<<MD_DISK_SYNC)) &&
5787 (!test_bit(In_sync, &rdev->flags) || 5787 rdev->raid_disk != info->raid_disk) {
5788 rdev->raid_disk != info->raid_disk)) {
5789 /* This was a hot-add request, but events doesn't 5788 /* This was a hot-add request, but events doesn't
5790 * match, so reject it. 5789 * match, so reject it.
5791 */ 5790 */
@@ -6751,7 +6750,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
6751 thread->tsk = kthread_run(md_thread, thread, 6750 thread->tsk = kthread_run(md_thread, thread,
6752 "%s_%s", 6751 "%s_%s",
6753 mdname(thread->mddev), 6752 mdname(thread->mddev),
6754 name ?: mddev->pers->name); 6753 name);
6755 if (IS_ERR(thread->tsk)) { 6754 if (IS_ERR(thread->tsk)) {
6756 kfree(thread); 6755 kfree(thread);
6757 return NULL; 6756 return NULL;
@@ -7298,6 +7297,7 @@ void md_do_sync(struct mddev *mddev)
7298 int skipped = 0; 7297 int skipped = 0;
7299 struct md_rdev *rdev; 7298 struct md_rdev *rdev;
7300 char *desc; 7299 char *desc;
7300 struct blk_plug plug;
7301 7301
7302 /* just incase thread restarts... */ 7302 /* just incase thread restarts... */
7303 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7303 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7447,7 @@ void md_do_sync(struct mddev *mddev)
7447 } 7447 }
7448 mddev->curr_resync_completed = j; 7448 mddev->curr_resync_completed = j;
7449 7449
7450 blk_start_plug(&plug);
7450 while (j < max_sectors) { 7451 while (j < max_sectors) {
7451 sector_t sectors; 7452 sector_t sectors;
7452 7453
@@ -7552,6 +7553,7 @@ void md_do_sync(struct mddev *mddev)
7552 * this also signals 'finished resyncing' to md_stop 7553 * this also signals 'finished resyncing' to md_stop
7553 */ 7554 */
7554 out: 7555 out:
7556 blk_finish_plug(&plug);
7555 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 7557 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
7556 7558
7557 /* tell personality that we are finished */ 7559 /* tell personality that we are finished */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 9339e67fcc79..61a1833ebaf3 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
474 } 474 }
475 475
476 { 476 {
477 mddev->thread = md_register_thread(multipathd, mddev, NULL); 477 mddev->thread = md_register_thread(multipathd, mddev,
478 "multipath");
478 if (!mddev->thread) { 479 if (!mddev->thread) {
479 printk(KERN_ERR "multipath: couldn't allocate thread" 480 printk(KERN_ERR "multipath: couldn't allocate thread"
480 " for %s\n", mdname(mddev)); 481 " for %s\n", mdname(mddev));
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
index 50ed53bf4aa2..fc90c11620ad 100644
--- a/drivers/md/persistent-data/dm-space-map-checker.c
+++ b/drivers/md/persistent-data/dm-space-map-checker.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/device-mapper.h> 9#include <linux/device-mapper.h>
10#include <linux/export.h> 10#include <linux/export.h>
11#include <linux/vmalloc.h>
11 12
12#ifdef CONFIG_DM_DEBUG_SPACE_MAPS 13#ifdef CONFIG_DM_DEBUG_SPACE_MAPS
13 14
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
89 90
90 ca->nr = nr_blocks; 91 ca->nr = nr_blocks;
91 ca->nr_free = nr_blocks; 92 ca->nr_free = nr_blocks;
92 ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL); 93
93 if (!ca->counts) 94 if (!nr_blocks)
94 return -ENOMEM; 95 ca->counts = NULL;
96 else {
97 ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
98 if (!ca->counts)
99 return -ENOMEM;
100 }
95 101
96 return 0; 102 return 0;
97} 103}
98 104
105static void ca_destroy(struct count_array *ca)
106{
107 vfree(ca->counts);
108}
109
99static int ca_load(struct count_array *ca, struct dm_space_map *sm) 110static int ca_load(struct count_array *ca, struct dm_space_map *sm)
100{ 111{
101 int r; 112 int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
126static int ca_extend(struct count_array *ca, dm_block_t extra_blocks) 137static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
127{ 138{
128 dm_block_t nr_blocks = ca->nr + extra_blocks; 139 dm_block_t nr_blocks = ca->nr + extra_blocks;
129 uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL); 140 uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
130 if (!counts) 141 if (!counts)
131 return -ENOMEM; 142 return -ENOMEM;
132 143
133 memcpy(counts, ca->counts, sizeof(*counts) * ca->nr); 144 if (ca->counts) {
134 kfree(ca->counts); 145 memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
146 ca_destroy(ca);
147 }
135 ca->nr = nr_blocks; 148 ca->nr = nr_blocks;
136 ca->nr_free += extra_blocks; 149 ca->nr_free += extra_blocks;
137 ca->counts = counts; 150 ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
151 return 0; 164 return 0;
152} 165}
153 166
154static void ca_destroy(struct count_array *ca)
155{
156 kfree(ca->counts);
157}
158
159/*----------------------------------------------------------------*/ 167/*----------------------------------------------------------------*/
160 168
161struct sm_checker { 169struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
343 int r; 351 int r;
344 struct sm_checker *smc; 352 struct sm_checker *smc;
345 353
346 if (!sm) 354 if (IS_ERR_OR_NULL(sm))
347 return NULL; 355 return ERR_PTR(-EINVAL);
348 356
349 smc = kmalloc(sizeof(*smc), GFP_KERNEL); 357 smc = kmalloc(sizeof(*smc), GFP_KERNEL);
350 if (!smc) 358 if (!smc)
351 return NULL; 359 return ERR_PTR(-ENOMEM);
352 360
353 memcpy(&smc->sm, &ops_, sizeof(smc->sm)); 361 memcpy(&smc->sm, &ops_, sizeof(smc->sm));
354 r = ca_create(&smc->old_counts, sm); 362 r = ca_create(&smc->old_counts, sm);
355 if (r) { 363 if (r) {
356 kfree(smc); 364 kfree(smc);
357 return NULL; 365 return ERR_PTR(r);
358 } 366 }
359 367
360 r = ca_create(&smc->counts, sm); 368 r = ca_create(&smc->counts, sm);
361 if (r) { 369 if (r) {
362 ca_destroy(&smc->old_counts); 370 ca_destroy(&smc->old_counts);
363 kfree(smc); 371 kfree(smc);
364 return NULL; 372 return ERR_PTR(r);
365 } 373 }
366 374
367 smc->real_sm = sm; 375 smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
371 ca_destroy(&smc->counts); 379 ca_destroy(&smc->counts);
372 ca_destroy(&smc->old_counts); 380 ca_destroy(&smc->old_counts);
373 kfree(smc); 381 kfree(smc);
374 return NULL; 382 return ERR_PTR(r);
375 } 383 }
376 384
377 r = ca_commit(&smc->old_counts, &smc->counts); 385 r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
379 ca_destroy(&smc->counts); 387 ca_destroy(&smc->counts);
380 ca_destroy(&smc->old_counts); 388 ca_destroy(&smc->old_counts);
381 kfree(smc); 389 kfree(smc);
382 return NULL; 390 return ERR_PTR(r);
383 } 391 }
384 392
385 return &smc->sm; 393 return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
391 int r; 399 int r;
392 struct sm_checker *smc; 400 struct sm_checker *smc;
393 401
394 if (!sm) 402 if (IS_ERR_OR_NULL(sm))
395 return NULL; 403 return ERR_PTR(-EINVAL);
396 404
397 smc = kmalloc(sizeof(*smc), GFP_KERNEL); 405 smc = kmalloc(sizeof(*smc), GFP_KERNEL);
398 if (!smc) 406 if (!smc)
399 return NULL; 407 return ERR_PTR(-ENOMEM);
400 408
401 memcpy(&smc->sm, &ops_, sizeof(smc->sm)); 409 memcpy(&smc->sm, &ops_, sizeof(smc->sm));
402 r = ca_create(&smc->old_counts, sm); 410 r = ca_create(&smc->old_counts, sm);
403 if (r) { 411 if (r) {
404 kfree(smc); 412 kfree(smc);
405 return NULL; 413 return ERR_PTR(r);
406 } 414 }
407 415
408 r = ca_create(&smc->counts, sm); 416 r = ca_create(&smc->counts, sm);
409 if (r) { 417 if (r) {
410 ca_destroy(&smc->old_counts); 418 ca_destroy(&smc->old_counts);
411 kfree(smc); 419 kfree(smc);
412 return NULL; 420 return ERR_PTR(r);
413 } 421 }
414 422
415 smc->real_sm = sm; 423 smc->real_sm = sm;
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index fc469ba9f627..3d0ed5332883 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
290 dm_block_t nr_blocks) 290 dm_block_t nr_blocks)
291{ 291{
292 struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks); 292 struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
293 return dm_sm_checker_create_fresh(sm); 293 struct dm_space_map *smc;
294
295 if (IS_ERR_OR_NULL(sm))
296 return sm;
297
298 smc = dm_sm_checker_create_fresh(sm);
299 if (IS_ERR(smc))
300 dm_sm_destroy(sm);
301
302 return smc;
294} 303}
295EXPORT_SYMBOL_GPL(dm_sm_disk_create); 304EXPORT_SYMBOL_GPL(dm_sm_disk_create);
296 305
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 400fe144c0cd..e5604b32d91f 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
138 138
139void dm_tm_destroy(struct dm_transaction_manager *tm) 139void dm_tm_destroy(struct dm_transaction_manager *tm)
140{ 140{
141 if (!tm->is_clone)
142 wipe_shadow_table(tm);
143
141 kfree(tm); 144 kfree(tm);
142} 145}
143EXPORT_SYMBOL_GPL(dm_tm_destroy); 146EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
344 } 347 }
345 348
346 *sm = dm_sm_checker_create(inner); 349 *sm = dm_sm_checker_create(inner);
347 if (!*sm) 350 if (IS_ERR(*sm)) {
351 r = PTR_ERR(*sm);
348 goto bad2; 352 goto bad2;
353 }
349 354
350 } else { 355 } else {
351 r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location, 356 r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
364 } 369 }
365 370
366 *sm = dm_sm_checker_create(inner); 371 *sm = dm_sm_checker_create(inner);
367 if (!*sm) 372 if (IS_ERR(*sm)) {
373 r = PTR_ERR(*sm);
368 goto bad2; 374 goto bad2;
375 }
369 } 376 }
370 377
371 return 0; 378 return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a9c7981ddd24..8c2754f835ef 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
517 int bad_sectors; 517 int bad_sectors;
518 518
519 int disk = start_disk + i; 519 int disk = start_disk + i;
520 if (disk >= conf->raid_disks) 520 if (disk >= conf->raid_disks * 2)
521 disk -= conf->raid_disks; 521 disk -= conf->raid_disks * 2;
522 522
523 rdev = rcu_dereference(conf->mirrors[disk].rdev); 523 rdev = rcu_dereference(conf->mirrors[disk].rdev);
524 if (r1_bio->bios[disk] == IO_BLOCKED 524 if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
883 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 883 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
884 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 884 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
885 struct md_rdev *blocked_rdev; 885 struct md_rdev *blocked_rdev;
886 int plugged;
887 int first_clone; 886 int first_clone;
888 int sectors_handled; 887 int sectors_handled;
889 int max_sectors; 888 int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
1034 * the bad blocks. Each set of writes gets it's own r1bio 1033 * the bad blocks. Each set of writes gets it's own r1bio
1035 * with a set of bios attached. 1034 * with a set of bios attached.
1036 */ 1035 */
1037 plugged = mddev_check_plugged(mddev);
1038 1036
1039 disks = conf->raid_disks * 2; 1037 disks = conf->raid_disks * 2;
1040 retry_write: 1038 retry_write:
@@ -1191,6 +1189,8 @@ read_again:
1191 bio_list_add(&conf->pending_bio_list, mbio); 1189 bio_list_add(&conf->pending_bio_list, mbio);
1192 conf->pending_count++; 1190 conf->pending_count++;
1193 spin_unlock_irqrestore(&conf->device_lock, flags); 1191 spin_unlock_irqrestore(&conf->device_lock, flags);
1192 if (!mddev_check_plugged(mddev))
1193 md_wakeup_thread(mddev->thread);
1194 } 1194 }
1195 /* Mustn't call r1_bio_write_done before this next test, 1195 /* Mustn't call r1_bio_write_done before this next test,
1196 * as it could result in the bio being freed. 1196 * as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
1213 1213
1214 /* In case raid1d snuck in to freeze_array */ 1214 /* In case raid1d snuck in to freeze_array */
1215 wake_up(&conf->wait_barrier); 1215 wake_up(&conf->wait_barrier);
1216
1217 if (do_sync || !bitmap || !plugged)
1218 md_wakeup_thread(mddev->thread);
1219} 1216}
1220 1217
1221static void status(struct seq_file *seq, struct mddev *mddev) 1218static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2621,7 +2618,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2621 goto abort; 2618 goto abort;
2622 } 2619 }
2623 err = -ENOMEM; 2620 err = -ENOMEM;
2624 conf->thread = md_register_thread(raid1d, mddev, NULL); 2621 conf->thread = md_register_thread(raid1d, mddev, "raid1");
2625 if (!conf->thread) { 2622 if (!conf->thread) {
2626 printk(KERN_ERR 2623 printk(KERN_ERR
2627 "md/raid1:%s: couldn't allocate thread\n", 2624 "md/raid1:%s: couldn't allocate thread\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 99ae6068e456..8da6282254c3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1039 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1039 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1040 unsigned long flags; 1040 unsigned long flags;
1041 struct md_rdev *blocked_rdev; 1041 struct md_rdev *blocked_rdev;
1042 int plugged;
1043 int sectors_handled; 1042 int sectors_handled;
1044 int max_sectors; 1043 int max_sectors;
1045 int sectors; 1044 int sectors;
@@ -1239,7 +1238,6 @@ read_again:
1239 * of r10_bios is recored in bio->bi_phys_segments just as with 1238 * of r10_bios is recored in bio->bi_phys_segments just as with
1240 * the read case. 1239 * the read case.
1241 */ 1240 */
1242 plugged = mddev_check_plugged(mddev);
1243 1241
1244 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ 1242 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1245 raid10_find_phys(conf, r10_bio); 1243 raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
1396 bio_list_add(&conf->pending_bio_list, mbio); 1394 bio_list_add(&conf->pending_bio_list, mbio);
1397 conf->pending_count++; 1395 conf->pending_count++;
1398 spin_unlock_irqrestore(&conf->device_lock, flags); 1396 spin_unlock_irqrestore(&conf->device_lock, flags);
1397 if (!mddev_check_plugged(mddev))
1398 md_wakeup_thread(mddev->thread);
1399 1399
1400 if (!r10_bio->devs[i].repl_bio) 1400 if (!r10_bio->devs[i].repl_bio)
1401 continue; 1401 continue;
@@ -1423,6 +1423,8 @@ retry_write:
1423 bio_list_add(&conf->pending_bio_list, mbio); 1423 bio_list_add(&conf->pending_bio_list, mbio);
1424 conf->pending_count++; 1424 conf->pending_count++;
1425 spin_unlock_irqrestore(&conf->device_lock, flags); 1425 spin_unlock_irqrestore(&conf->device_lock, flags);
1426 if (!mddev_check_plugged(mddev))
1427 md_wakeup_thread(mddev->thread);
1426 } 1428 }
1427 1429
1428 /* Don't remove the bias on 'remaining' (one_write_done) until 1430 /* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
1448 1450
1449 /* In case raid10d snuck in to freeze_array */ 1451 /* In case raid10d snuck in to freeze_array */
1450 wake_up(&conf->wait_barrier); 1452 wake_up(&conf->wait_barrier);
1451
1452 if (do_sync || !mddev->bitmap || !plugged)
1453 md_wakeup_thread(mddev->thread);
1454} 1453}
1455 1454
1456static void status(struct seq_file *seq, struct mddev *mddev) 1455static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
2310 if (r10_sync_page_io(rdev, 2309 if (r10_sync_page_io(rdev,
2311 r10_bio->devs[sl].addr + 2310 r10_bio->devs[sl].addr +
2312 sect, 2311 sect,
2313 s<<9, conf->tmppage, WRITE) 2312 s, conf->tmppage, WRITE)
2314 == 0) { 2313 == 0) {
2315 /* Well, this device is dead */ 2314 /* Well, this device is dead */
2316 printk(KERN_NOTICE 2315 printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
2349 switch (r10_sync_page_io(rdev, 2348 switch (r10_sync_page_io(rdev,
2350 r10_bio->devs[sl].addr + 2349 r10_bio->devs[sl].addr +
2351 sect, 2350 sect,
2352 s<<9, conf->tmppage, 2351 s, conf->tmppage,
2353 READ)) { 2352 READ)) {
2354 case 0: 2353 case 0:
2355 /* Well, this device is dead */ 2354 /* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
2512 slot = r10_bio->read_slot; 2511 slot = r10_bio->read_slot;
2513 printk_ratelimited( 2512 printk_ratelimited(
2514 KERN_ERR 2513 KERN_ERR
2515 "md/raid10:%s: %s: redirecting" 2514 "md/raid10:%s: %s: redirecting "
2516 "sector %llu to another mirror\n", 2515 "sector %llu to another mirror\n",
2517 mdname(mddev), 2516 mdname(mddev),
2518 bdevname(rdev->bdev, b), 2517 bdevname(rdev->bdev, b),
@@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
2661 blk_start_plug(&plug); 2660 blk_start_plug(&plug);
2662 for (;;) { 2661 for (;;) {
2663 2662
2664 flush_pending_writes(conf); 2663 if (atomic_read(&mddev->plug_cnt) == 0)
2664 flush_pending_writes(conf);
2665 2665
2666 spin_lock_irqsave(&conf->device_lock, flags); 2666 spin_lock_irqsave(&conf->device_lock, flags);
2667 if (list_empty(head)) { 2667 if (list_empty(head)) {
@@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
2890 /* want to reconstruct this device */ 2890 /* want to reconstruct this device */
2891 rb2 = r10_bio; 2891 rb2 = r10_bio;
2892 sect = raid10_find_virt(conf, sector_nr, i); 2892 sect = raid10_find_virt(conf, sector_nr, i);
2893 if (sect >= mddev->resync_max_sectors) {
2894 /* last stripe is not complete - don't
2895 * try to recover this sector.
2896 */
2897 continue;
2898 }
2893 /* Unless we are doing a full sync, or a replacement 2899 /* Unless we are doing a full sync, or a replacement
2894 * we only need to recover the block if it is set in 2900 * we only need to recover the block if it is set in
2895 * the bitmap 2901 * the bitmap
@@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
3421 spin_lock_init(&conf->resync_lock); 3427 spin_lock_init(&conf->resync_lock);
3422 init_waitqueue_head(&conf->wait_barrier); 3428 init_waitqueue_head(&conf->wait_barrier);
3423 3429
3424 conf->thread = md_register_thread(raid10d, mddev, NULL); 3430 conf->thread = md_register_thread(raid10d, mddev, "raid10");
3425 if (!conf->thread) 3431 if (!conf->thread)
3426 goto out; 3432 goto out;
3427 3433
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d26767246d26..04348d76bb30 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
196 BUG_ON(!list_empty(&sh->lru)); 196 BUG_ON(!list_empty(&sh->lru));
197 BUG_ON(atomic_read(&conf->active_stripes)==0); 197 BUG_ON(atomic_read(&conf->active_stripes)==0);
198 if (test_bit(STRIPE_HANDLE, &sh->state)) { 198 if (test_bit(STRIPE_HANDLE, &sh->state)) {
199 if (test_bit(STRIPE_DELAYED, &sh->state)) 199 if (test_bit(STRIPE_DELAYED, &sh->state) &&
200 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
200 list_add_tail(&sh->lru, &conf->delayed_list); 201 list_add_tail(&sh->lru, &conf->delayed_list);
201 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 202 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
202 sh->bm_seq - conf->seq_write > 0) 203 sh->bm_seq - conf->seq_write > 0)
203 list_add_tail(&sh->lru, &conf->bitmap_list); 204 list_add_tail(&sh->lru, &conf->bitmap_list);
204 else { 205 else {
206 clear_bit(STRIPE_DELAYED, &sh->state);
205 clear_bit(STRIPE_BIT_DELAY, &sh->state); 207 clear_bit(STRIPE_BIT_DELAY, &sh->state);
206 list_add_tail(&sh->lru, &conf->handle_list); 208 list_add_tail(&sh->lru, &conf->handle_list);
207 } 209 }
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
606 * a chance*/ 608 * a chance*/
607 md_check_recovery(conf->mddev); 609 md_check_recovery(conf->mddev);
608 } 610 }
611 /*
612 * Because md_wait_for_blocked_rdev
613 * will dec nr_pending, we must
614 * increment it first.
615 */
616 atomic_inc(&rdev->nr_pending);
609 md_wait_for_blocked_rdev(rdev, conf->mddev); 617 md_wait_for_blocked_rdev(rdev, conf->mddev);
610 } else { 618 } else {
611 /* Acknowledged bad block - skip the write */ 619 /* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
1737 } else { 1745 } else {
1738 const char *bdn = bdevname(rdev->bdev, b); 1746 const char *bdn = bdevname(rdev->bdev, b);
1739 int retry = 0; 1747 int retry = 0;
1748 int set_bad = 0;
1740 1749
1741 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1750 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1742 atomic_inc(&rdev->read_errors); 1751 atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
1748 mdname(conf->mddev), 1757 mdname(conf->mddev),
1749 (unsigned long long)s, 1758 (unsigned long long)s,
1750 bdn); 1759 bdn);
1751 else if (conf->mddev->degraded >= conf->max_degraded) 1760 else if (conf->mddev->degraded >= conf->max_degraded) {
1761 set_bad = 1;
1752 printk_ratelimited( 1762 printk_ratelimited(
1753 KERN_WARNING 1763 KERN_WARNING
1754 "md/raid:%s: read error not correctable " 1764 "md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
1756 mdname(conf->mddev), 1766 mdname(conf->mddev),
1757 (unsigned long long)s, 1767 (unsigned long long)s,
1758 bdn); 1768 bdn);
1759 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1769 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
1760 /* Oh, no!!! */ 1770 /* Oh, no!!! */
1771 set_bad = 1;
1761 printk_ratelimited( 1772 printk_ratelimited(
1762 KERN_WARNING 1773 KERN_WARNING
1763 "md/raid:%s: read error NOT corrected!! " 1774 "md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
1765 mdname(conf->mddev), 1776 mdname(conf->mddev),
1766 (unsigned long long)s, 1777 (unsigned long long)s,
1767 bdn); 1778 bdn);
1768 else if (atomic_read(&rdev->read_errors) 1779 } else if (atomic_read(&rdev->read_errors)
1769 > conf->max_nr_stripes) 1780 > conf->max_nr_stripes)
1770 printk(KERN_WARNING 1781 printk(KERN_WARNING
1771 "md/raid:%s: Too many read errors, failing device %s.\n", 1782 "md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
1777 else { 1788 else {
1778 clear_bit(R5_ReadError, &sh->dev[i].flags); 1789 clear_bit(R5_ReadError, &sh->dev[i].flags);
1779 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1790 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1780 md_error(conf->mddev, rdev); 1791 if (!(set_bad
1792 && test_bit(In_sync, &rdev->flags)
1793 && rdev_set_badblocks(
1794 rdev, sh->sector, STRIPE_SECTORS, 0)))
1795 md_error(conf->mddev, rdev);
1781 } 1796 }
1782 } 1797 }
1783 rdev_dec_pending(rdev, conf->mddev); 1798 rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
3582 3597
3583finish: 3598finish:
3584 /* wait for this device to become unblocked */ 3599 /* wait for this device to become unblocked */
3585 if (conf->mddev->external && unlikely(s.blocked_rdev)) 3600 if (unlikely(s.blocked_rdev)) {
3586 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); 3601 if (conf->mddev->external)
3602 md_wait_for_blocked_rdev(s.blocked_rdev,
3603 conf->mddev);
3604 else
3605 /* Internal metadata will immediately
3606 * be written by raid5d, so we don't
3607 * need to wait here.
3608 */
3609 rdev_dec_pending(s.blocked_rdev,
3610 conf->mddev);
3611 }
3587 3612
3588 if (s.handle_bad_blocks) 3613 if (s.handle_bad_blocks)
3589 for (i = disks; i--; ) { 3614 for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
3881 raid_bio->bi_next = (void*)rdev; 3906 raid_bio->bi_next = (void*)rdev;
3882 align_bi->bi_bdev = rdev->bdev; 3907 align_bi->bi_bdev = rdev->bdev;
3883 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3908 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3884 /* No reshape active, so we can trust rdev->data_offset */
3885 align_bi->bi_sector += rdev->data_offset;
3886 3909
3887 if (!bio_fits_rdev(align_bi) || 3910 if (!bio_fits_rdev(align_bi) ||
3888 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, 3911 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
3893 return 0; 3916 return 0;
3894 } 3917 }
3895 3918
3919 /* No reshape active, so we can trust rdev->data_offset */
3920 align_bi->bi_sector += rdev->data_offset;
3921
3896 spin_lock_irq(&conf->device_lock); 3922 spin_lock_irq(&conf->device_lock);
3897 wait_event_lock_irq(conf->wait_for_stripe, 3923 wait_event_lock_irq(conf->wait_for_stripe,
3898 conf->quiesce == 0, 3924 conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
3971 struct stripe_head *sh; 3997 struct stripe_head *sh;
3972 const int rw = bio_data_dir(bi); 3998 const int rw = bio_data_dir(bi);
3973 int remaining; 3999 int remaining;
3974 int plugged;
3975 4000
3976 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 4001 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3977 md_flush_request(mddev, bi); 4002 md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
3990 bi->bi_next = NULL; 4015 bi->bi_next = NULL;
3991 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4016 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
3992 4017
3993 plugged = mddev_check_plugged(mddev);
3994 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 4018 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3995 DEFINE_WAIT(w); 4019 DEFINE_WAIT(w);
3996 int previous; 4020 int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4092 if ((bi->bi_rw & REQ_SYNC) && 4116 if ((bi->bi_rw & REQ_SYNC) &&
4093 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4117 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4094 atomic_inc(&conf->preread_active_stripes); 4118 atomic_inc(&conf->preread_active_stripes);
4119 mddev_check_plugged(mddev);
4095 release_stripe(sh); 4120 release_stripe(sh);
4096 } else { 4121 } else {
4097 /* cannot get stripe for read-ahead, just give-up */ 4122 /* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4099 finish_wait(&conf->wait_for_overlap, &w); 4124 finish_wait(&conf->wait_for_overlap, &w);
4100 break; 4125 break;
4101 } 4126 }
4102
4103 } 4127 }
4104 if (!plugged)
4105 md_wakeup_thread(mddev->thread);
4106 4128
4107 spin_lock_irq(&conf->device_lock); 4129 spin_lock_irq(&conf->device_lock);
4108 remaining = raid5_dec_bi_phys_segments(bi); 4130 remaining = raid5_dec_bi_phys_segments(bi);
@@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
4823 int raid_disk, memory, max_disks; 4845 int raid_disk, memory, max_disks;
4824 struct md_rdev *rdev; 4846 struct md_rdev *rdev;
4825 struct disk_info *disk; 4847 struct disk_info *disk;
4848 char pers_name[6];
4826 4849
4827 if (mddev->new_level != 5 4850 if (mddev->new_level != 5
4828 && mddev->new_level != 4 4851 && mddev->new_level != 4
@@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
4946 printk(KERN_INFO "md/raid:%s: allocated %dkB\n", 4969 printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
4947 mdname(mddev), memory); 4970 mdname(mddev), memory);
4948 4971
4949 conf->thread = md_register_thread(raid5d, mddev, NULL); 4972 sprintf(pers_name, "raid%d", mddev->new_level);
4973 conf->thread = md_register_thread(raid5d, mddev, pers_name);
4950 if (!conf->thread) { 4974 if (!conf->thread) {
4951 printk(KERN_ERR 4975 printk(KERN_ERR
4952 "md/raid:%s: couldn't allocate thread.\n", 4976 "md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
5465 if (rdev->saved_raid_disk >= 0 && 5489 if (rdev->saved_raid_disk >= 0 &&
5466 rdev->saved_raid_disk >= first && 5490 rdev->saved_raid_disk >= first &&
5467 conf->disks[rdev->saved_raid_disk].rdev == NULL) 5491 conf->disks[rdev->saved_raid_disk].rdev == NULL)
5468 disk = rdev->saved_raid_disk; 5492 first = rdev->saved_raid_disk;
5469 else 5493
5470 disk = first; 5494 for (disk = first; disk <= last; disk++) {
5471 for ( ; disk <= last ; disk++) {
5472 p = conf->disks + disk; 5495 p = conf->disks + disk;
5473 if (p->rdev == NULL) { 5496 if (p->rdev == NULL) {
5474 clear_bit(In_sync, &rdev->flags); 5497 clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
5477 if (rdev->saved_raid_disk != disk) 5500 if (rdev->saved_raid_disk != disk)
5478 conf->fullsync = 1; 5501 conf->fullsync = 1;
5479 rcu_assign_pointer(p->rdev, rdev); 5502 rcu_assign_pointer(p->rdev, rdev);
5480 break; 5503 goto out;
5481 } 5504 }
5505 }
5506 for (disk = first; disk <= last; disk++) {
5507 p = conf->disks + disk;
5482 if (test_bit(WantReplacement, &p->rdev->flags) && 5508 if (test_bit(WantReplacement, &p->rdev->flags) &&
5483 p->replacement == NULL) { 5509 p->replacement == NULL) {
5484 clear_bit(In_sync, &rdev->flags); 5510 clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
5490 break; 5516 break;
5491 } 5517 }
5492 } 5518 }
5519out:
5493 print_raid5_conf(conf); 5520 print_raid5_conf(conf);
5494 return err; 5521 return err;
5495} 5522}
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351a4097b2ba..76edbc1be33b 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -103,6 +103,7 @@
103#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ 103#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
104#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ 104#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
105#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ 105#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
106#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
106#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ 107#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
107#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 108#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
108 109
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index ba86b3f8a404..a166efc2fead 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
496 * @sk_buff: socket buffer with received data 496 * @sk_buff: socket buffer with received data
497 **/ 497 **/
498static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 498static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
499 __le16 csum, struct sk_buff *skb) 499 struct sk_buff *skb)
500{ 500{
501 u16 status = (u16)status_err; 501 u16 status = (u16)status_err;
502 u8 errors = (u8)(status_err >> 24); 502 u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
511 if (status & E1000_RXD_STAT_IXSM) 511 if (status & E1000_RXD_STAT_IXSM)
512 return; 512 return;
513 513
514 /* TCP/UDP checksum error bit is set */ 514 /* TCP/UDP checksum error bit or IP checksum error bit is set */
515 if (errors & E1000_RXD_ERR_TCPE) { 515 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
516 /* let the stack verify checksum errors */ 516 /* let the stack verify checksum errors */
517 adapter->hw_csum_err++; 517 adapter->hw_csum_err++;
518 return; 518 return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
523 return; 523 return;
524 524
525 /* It must be a TCP or UDP packet with a valid checksum */ 525 /* It must be a TCP or UDP packet with a valid checksum */
526 if (status & E1000_RXD_STAT_TCPCS) { 526 skb->ip_summed = CHECKSUM_UNNECESSARY;
527 /* TCP checksum is good */
528 skb->ip_summed = CHECKSUM_UNNECESSARY;
529 } else {
530 /*
531 * IP fragment with UDP payload
532 * Hardware complements the payload checksum, so we undo it
533 * and then put the value in host order for further stack use.
534 */
535 __sum16 sum = (__force __sum16)swab16((__force u16)csum);
536 skb->csum = csum_unfold(~sum);
537 skb->ip_summed = CHECKSUM_COMPLETE;
538 }
539 adapter->hw_csum_good++; 527 adapter->hw_csum_good++;
540} 528}
541 529
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
954 skb_put(skb, length); 942 skb_put(skb, length);
955 943
956 /* Receive Checksum Offload */ 944 /* Receive Checksum Offload */
957 e1000_rx_checksum(adapter, staterr, 945 e1000_rx_checksum(adapter, staterr, skb);
958 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
959 946
960 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 947 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
961 948
@@ -1341,8 +1328,7 @@ copydone:
1341 total_rx_bytes += skb->len; 1328 total_rx_bytes += skb->len;
1342 total_rx_packets++; 1329 total_rx_packets++;
1343 1330
1344 e1000_rx_checksum(adapter, staterr, 1331 e1000_rx_checksum(adapter, staterr, skb);
1345 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
1346 1332
1347 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1333 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1348 1334
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1512 } 1498 }
1513 } 1499 }
1514 1500
1515 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 1501 /* Receive Checksum Offload */
1516 e1000_rx_checksum(adapter, staterr, 1502 e1000_rx_checksum(adapter, staterr, skb);
1517 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
1518 1503
1519 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1504 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1520 1505
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3098 3083
3099 /* Enable Receive Checksum Offload for TCP and UDP */ 3084 /* Enable Receive Checksum Offload for TCP and UDP */
3100 rxcsum = er32(RXCSUM); 3085 rxcsum = er32(RXCSUM);
3101 if (adapter->netdev->features & NETIF_F_RXCSUM) { 3086 if (adapter->netdev->features & NETIF_F_RXCSUM)
3102 rxcsum |= E1000_RXCSUM_TUOFL; 3087 rxcsum |= E1000_RXCSUM_TUOFL;
3103 3088 else
3104 /*
3105 * IPv4 payload checksum for UDP fragments must be
3106 * used in conjunction with packet-split.
3107 */
3108 if (adapter->rx_ps_pages)
3109 rxcsum |= E1000_RXCSUM_IPPCSE;
3110 } else {
3111 rxcsum &= ~E1000_RXCSUM_TUOFL; 3089 rxcsum &= ~E1000_RXCSUM_TUOFL;
3112 /* no need to clear IPPCSE as it defaults to 0 */
3113 }
3114 ew32(RXCSUM, rxcsum); 3090 ew32(RXCSUM, rxcsum);
3115 3091
3116 if (adapter->hw.mac.type == e1000_pch2lan) { 3092 if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5241 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5217 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5242 5218
5243 /* Jumbo frame support */ 5219 /* Jumbo frame support */
5244 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 5220 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5245 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 5221 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5246 e_err("Jumbo Frames not supported.\n"); 5222 e_err("Jumbo Frames not supported.\n");
5247 return -EINVAL; 5223 return -EINVAL;
5248 }
5249
5250 /*
5251 * IP payload checksum (enabled with jumbos/packet-split when
5252 * Rx checksum is enabled) and generation of RSS hash is
5253 * mutually exclusive in the hardware.
5254 */
5255 if ((netdev->features & NETIF_F_RXCSUM) &&
5256 (netdev->features & NETIF_F_RXHASH)) {
5257 e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
5258 return -EINVAL;
5259 }
5260 } 5224 }
5261 5225
5262 /* Supported frame sizes */ 5226 /* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
6030 NETIF_F_RXALL))) 5994 NETIF_F_RXALL)))
6031 return 0; 5995 return 0;
6032 5996
6033 /*
6034 * IP payload checksum (enabled with jumbos/packet-split when Rx
6035 * checksum is enabled) and generation of RSS hash is mutually
6036 * exclusive in the hardware.
6037 */
6038 if (adapter->rx_ps_pages &&
6039 (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
6040 e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
6041 return -EINVAL;
6042 }
6043
6044 if (changed & NETIF_F_RXFCS) { 5997 if (changed & NETIF_F_RXFCS) {
6045 if (features & NETIF_F_RXFCS) { 5998 if (features & NETIF_F_RXFCS) {
6046 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 5999 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 8ce67064b9c5..90eef07943f4 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
357 struct igbvf_adapter *adapter = netdev_priv(netdev); 357 struct igbvf_adapter *adapter = netdev_priv(netdev);
358 struct e1000_hw *hw = &adapter->hw; 358 struct e1000_hw *hw = &adapter->hw;
359 359
360 if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) || 360 if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
361 ((ec->rx_coalesce_usecs > 3) && 361 (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
362 (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) || 362 adapter->current_itr = ec->rx_coalesce_usecs << 2;
363 (ec->rx_coalesce_usecs == 2)) 363 adapter->requested_itr = 1000000000 /
364 return -EINVAL; 364 (adapter->current_itr * 256);
365 365 } else if ((ec->rx_coalesce_usecs == 3) ||
366 /* convert to rate of irq's per second */ 366 (ec->rx_coalesce_usecs == 2)) {
367 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
368 adapter->current_itr = IGBVF_START_ITR; 367 adapter->current_itr = IGBVF_START_ITR;
369 adapter->requested_itr = ec->rx_coalesce_usecs; 368 adapter->requested_itr = ec->rx_coalesce_usecs;
370 } else { 369 } else if (ec->rx_coalesce_usecs == 0) {
371 adapter->current_itr = ec->rx_coalesce_usecs << 2; 370 /*
371 * The user's desire is to turn off interrupt throttling
372 * altogether, but due to HW limitations, we can't do that.
373 * Instead we set a very small value in EITR, which would
374 * allow ~967k interrupts per second, but allow the adapter's
375 * internal clocking to still function properly.
376 */
377 adapter->current_itr = 4;
372 adapter->requested_itr = 1000000000 / 378 adapter->requested_itr = 1000000000 /
373 (adapter->current_itr * 256); 379 (adapter->current_itr * 256);
374 } 380 } else
381 return -EINVAL;
375 382
376 writel(adapter->current_itr, 383 writel(adapter->current_itr,
377 hw->hw_addr + adapter->rx_ring->itr_register); 384 hw->hw_addr + adapter->rx_ring->itr_register);
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index da14432806c6..efc4b7f308cf 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -25,7 +25,7 @@ static int oprofile_perf_enabled;
25static DEFINE_MUTEX(oprofile_perf_mutex); 25static DEFINE_MUTEX(oprofile_perf_mutex);
26 26
27static struct op_counter_config *counter_config; 27static struct op_counter_config *counter_config;
28static struct perf_event **perf_events[nr_cpumask_bits]; 28static struct perf_event **perf_events[NR_CPUS];
29static int num_counters; 29static int num_counters;
30 30
31/* 31/*